Searched refs:need_flush (Results 1 – 6 of 6) sorted by relevance
52 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush) in get_new_mmu_context() argument57 *need_flush = true; /* start new asid cycle */ in get_new_mmu_context()91 bool need_flush = false; in switch_mm_irqs_off() local96 get_new_mmu_context(next, cpu, &need_flush); in switch_mm_irqs_off()103 if (need_flush) in switch_mm_irqs_off()151 bool need_flush = false; in drop_mmu_context() local154 get_new_mmu_context(mm, cpu, &need_flush); in drop_mmu_context()157 if (need_flush) in drop_mmu_context()
368 unsigned int subregions, bool need_flush) in mpu_setup_region() argument387 if (need_flush) in mpu_setup_region()441 bool need_flush = region == PMSAv7_RAM_REGION; in pmsav7_setup() local448 xip[i].subreg, need_flush); in pmsav7_setup()
221 unsigned int need_flush : 1; member231 ns.need_flush = 1; in choose_new_asid()244 ns.need_flush = 0; in choose_new_asid()258 ns.need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < next_tlb_gen); in choose_new_asid()271 ns.need_flush = true; in choose_new_asid()565 bool need_flush) in load_new_mm_cr3() argument569 if (need_flush) { in load_new_mm_cr3()900 ns.need_flush = true; in switch_mm_irqs_off()926 if (ns.need_flush) { in switch_mm_irqs_off()
87 static bool need_flush; /* global flush state. set for each gart wrap */ variable104 need_flush = true; in alloc_iommu()113 need_flush = true; in alloc_iommu()117 need_flush = true; in alloc_iommu()142 if (need_flush) { in flush_gart()144 need_flush = false; in flush_gart()
19 static inline bool need_flush(struct iommu_map_table *iommu) in need_flush() function206 (n < pool->hint || need_flush(iommu))) { in iommu_tbl_range_alloc()
1096 bool need_flush = false; in stage2_unmap_walker() local1112 need_flush = !stage2_has_fwb(pgt); in stage2_unmap_walker()1122 if (need_flush && mm_ops->dcache_clean_inval_poc) in stage2_unmap_walker()
Completed in 21 milliseconds