Searched refs:dsb (Results 1 – 23 of 23) sorted by relevance
7 dsb(sy); in flush_tlb_local()11 dsb(sy); in flush_tlb_local()18 dsb(sy); in flush_tlb()22 dsb(sy); in flush_tlb()29 dsb(sy); in flush_tlb_all_local()33 dsb(sy); in flush_tlb_all_local()40 dsb(sy); in flush_tlb_all()44 dsb(sy); in flush_tlb_all()
44 dsb(ish); /* Ensure completion of the flush I-cache */ in invalidate_icache()
16 #define dsb(scope) asm volatile("dsb " #scope : : : "memory") macro19 #define mb() dsb(sy)21 #define rmb() dsb(ld)23 #define rmb() dsb(sy) /* 32-bit has no ld variant. */25 #define wmb() dsb(st)
150 dsb(sy); /* So the CPU issues all writes to the range */ in invalidate_dcache_va_range()167 dsb(sy); /* So we know the flushes happen before continuing */ in invalidate_dcache_va_range()175 dsb(sy); /* So the CPU issues all writes to the range */ in clean_dcache_va_range()179 dsb(sy); /* So we know the flushes happen before continuing */ in clean_dcache_va_range()188 dsb(sy); /* So the CPU issues all writes to the range */ in clean_and_invalidate_dcache_va_range()192 dsb(sy); /* So we know the flushes happen before continuing */ in clean_and_invalidate_dcache_va_range()233 dsb(sy); /* Ensure preceding are visible */ in flush_xen_data_tlb_range_va_local()239 dsb(sy); /* Ensure completion of the TLB flush */ in flush_xen_data_tlb_range_va_local()252 dsb(sy); /* Ensure preceding are visible */ in flush_xen_data_tlb_range_va()258 dsb(sy); /* Ensure completion of the TLB flush */ in flush_xen_data_tlb_range_va()
9 dsb(ishst); \
144 dsb366 dsb /* Flush PTE writes and finish reads */376 dsb418 dsb /* Ensure completion of TLB flush */434 dsb436 dsb441 dsb /* Ensure completion of TLB+BP flush */504 dsb /* So the CPU issues all writes to the range */517 dsb /* Ensure the flushes happen before524 dsb /* Ensure completion of TLB+BP flush */[all …]
69 dsb sy
286 dsb sy516 dsb sy /* Flush PTE writes and finish reads */524 dsb sy562 dsb sy /* Ensure completion of TLB flush */575 dsb sy577 dsb sy580 dsb sy /* Ensure completion of TLB flush */639 dsb sy /* So the CPU issues all writes to the range */652 dsb sy /* Ensure the flushes happen before658 dsb sy /* Ensure completion of TLB flush */[all …]
52 dsb sy
338 dsb sy
51 dsb(sy); in vexpress_ctrl_start()117 dsb(sy); isb(); in vexpress_reset()120 dsb(sy); isb(); in vexpress_reset()
36 dsb(ish); /* Ensure completion of the flush I-cache */ in invalidate_icache()
327 dsb(sy); in send_SGI_mask()340 dsb(sy); in send_SGI_self()348 dsb(sy); in send_SGI_allbutself()
322 dsb(sy); in udelay()
335 dsb(ish); in __setup_irq()337 dsb(ish); in __setup_irq()
357 dsb(sy); in stop_cpu()
373 dsb(sy); in gicv3_save_state()412 dsb(sy); in gicv3_restore_state()473 dsb(sy); in gicv3_read_irq()1071 dsb(sy); in gicv3_irq_enable()
53 dsb(sy); in do_idle()
125 dsb(ishst); in its_send_command()
518 dsb(sy); in gicv2_irq_enable()
79 dsb(sy); in hdlcd_flush()
1604 That will cause overhead on entries and exits due to dsb/isb. However, not all1609 dsb/isb.1619 1. dsb/isb on all EL1 -> EL2 trap entries to categorize SErrors correctly.1620 2. dsb/isb on EL2 -> EL1 return paths to prevent slipping hypervisor1622 3. dsb/isb in context switch to isolate SErrors between 2 vCPUs.1629 idle vCPU. This option will avoid most overhead of the dsb/isb, except the1630 dsb/isb in context switch which is used to isolate the SErrors between 21636 of the dsb/isb pairs.
1002 dsb(ishst);
Completed in 44 milliseconds