| /arch/arm/mm/ |
| A D | flush.c | 199 void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) in __flush_dcache_folio() argument 206 if (!folio_test_highmem(folio)) { in __flush_dcache_folio() 208 folio_size(folio)); in __flush_dcache_folio() 235 flush_pfn_alias(folio_pfn(folio), folio_pos(folio)); in __flush_dcache_folio() 238 static void __flush_dcache_aliases(struct address_space *mapping, struct folio *folio) in __flush_dcache_aliases() argument 250 pgoff = folio->index; in __flush_dcache_aliases() 267 pfn = folio_pfn(folio); in __flush_dcache_aliases() 268 nr = folio_nr_pages(folio); in __flush_dcache_aliases() 288 struct folio *folio; in __sync_icache_dcache() local 299 if (folio_test_reserved(folio)) in __sync_icache_dcache() [all …]
|
| /arch/csky/abiv1/ |
| A D | cacheflush.c | 18 void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 22 if (is_zero_pfn(folio_pfn(folio))) in flush_dcache_folio() 25 mapping = folio_flush_mapping(folio); in flush_dcache_folio() 27 if (mapping && !folio_mapped(folio)) in flush_dcache_folio() 28 clear_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio() 33 set_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio() 48 struct folio *folio; in update_mmu_cache_range() local 58 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range() 59 if (!test_and_set_bit(PG_dcache_clean, &folio->flags)) in update_mmu_cache_range() 62 if (folio_flush_mapping(folio)) { in update_mmu_cache_range()
|
| /arch/s390/kernel/ |
| A D | uv.c | 136 int uv_destroy_folio(struct folio *folio) in uv_destroy_folio() argument 144 folio_get(folio); in uv_destroy_folio() 148 folio_put(folio); in uv_destroy_folio() 185 int uv_convert_from_secure_folio(struct folio *folio) in uv_convert_from_secure_folio() argument 193 folio_get(folio); in uv_convert_from_secure_folio() 197 folio_put(folio); in uv_convert_from_secure_folio() 250 static int expected_folio_refs(struct folio *folio) in expected_folio_refs() argument 386 if (!folio_test_dirty(folio) || folio_test_anon(folio) || in s390_wiggle_split_folio() 418 struct folio *folio; in make_hva_secure() local 428 if (!folio) { in make_hva_secure() [all …]
|
| /arch/arm64/include/asm/ |
| A D | mte.h | 167 static inline void folio_set_hugetlb_mte_tagged(struct folio *folio) in folio_set_hugetlb_mte_tagged() argument 169 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); in folio_set_hugetlb_mte_tagged() 176 set_bit(PG_mte_tagged, &folio->flags); in folio_set_hugetlb_mte_tagged() 180 static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio) in folio_test_hugetlb_mte_tagged() argument 182 bool ret = test_bit(PG_mte_tagged, &folio->flags); in folio_test_hugetlb_mte_tagged() 184 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); in folio_test_hugetlb_mte_tagged() 195 static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio) in folio_try_hugetlb_mte_tagging() argument 197 VM_WARN_ON_ONCE(!folio_test_hugetlb(folio)); in folio_try_hugetlb_mte_tagging() 212 static inline void folio_set_hugetlb_mte_tagged(struct folio *folio) in folio_set_hugetlb_mte_tagged() argument 216 static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio) in folio_test_hugetlb_mte_tagged() argument [all …]
|
| A D | hugetlb.h | 22 static inline void arch_clear_hugetlb_flags(struct folio *folio) in arch_clear_hugetlb_flags() argument 24 clear_bit(PG_dcache_clean, &folio->flags); in arch_clear_hugetlb_flags() 28 clear_bit(PG_mte_tagged, &folio->flags); in arch_clear_hugetlb_flags() 29 clear_bit(PG_mte_lock, &folio->flags); in arch_clear_hugetlb_flags()
|
| /arch/xtensa/mm/ |
| A D | cache.c | 90 struct folio *folio = page_folio(page); in clear_user_highpage() local 105 struct folio *folio = page_folio(dst); in copy_user_highpage() local 126 void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 138 set_bit(PG_arch_1, &folio->flags); in flush_dcache_folio() 220 struct folio *folio; in update_mmu_cache_range() local 226 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range() 231 nr = folio_nr_pages(folio); in update_mmu_cache_range() 235 if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) { in update_mmu_cache_range() 250 clear_bit(PG_arch_1, &folio->flags); in update_mmu_cache_range() 253 if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags) in update_mmu_cache_range() [all …]
|
| /arch/nios2/mm/ |
| A D | cacheflush.c | 74 static void flush_aliases(struct address_space *mapping, struct folio *folio) in flush_aliases() argument 82 pgoff = folio->index; in flush_aliases() 163 static void __flush_dcache_folio(struct folio *folio) in __flush_dcache_folio() argument 175 void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 183 if (is_zero_pfn(folio_pfn(folio))) in flush_dcache_folio() 192 __flush_dcache_folio(folio); in flush_dcache_folio() 195 flush_aliases(mapping, folio); in flush_dcache_folio() 214 struct folio *folio; in update_mmu_cache_range() local 231 __flush_dcache_folio(folio); in update_mmu_cache_range() 235 flush_aliases(mapping, folio); in update_mmu_cache_range() [all …]
|
| /arch/arm64/mm/ |
| A D | flush.c | 54 struct folio *folio = page_folio(pte_page(pte)); in __sync_icache_dcache() local 56 if (!test_bit(PG_dcache_clean, &folio->flags)) { in __sync_icache_dcache() 57 sync_icache_aliases((unsigned long)folio_address(folio), in __sync_icache_dcache() 58 (unsigned long)folio_address(folio) + in __sync_icache_dcache() 59 folio_size(folio)); in __sync_icache_dcache() 60 set_bit(PG_dcache_clean, &folio->flags); in __sync_icache_dcache() 70 void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 72 if (test_bit(PG_dcache_clean, &folio->flags)) in flush_dcache_folio() 73 clear_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio()
|
| A D | mteswap.c | 94 int arch_prepare_to_swap(struct folio *folio) in arch_prepare_to_swap() argument 102 nr = folio_nr_pages(folio); in arch_prepare_to_swap() 105 err = mte_save_tags(folio_page(folio, i)); in arch_prepare_to_swap() 113 __mte_invalidate_tags(folio_page(folio, i)); in arch_prepare_to_swap() 117 void arch_swap_restore(swp_entry_t entry, struct folio *folio) in arch_swap_restore() argument 124 nr = folio_nr_pages(folio); in arch_swap_restore() 127 mte_restore_tags(entry, folio_page(folio, i)); in arch_swap_restore()
|
| /arch/sparc/include/asm/ |
| A D | cacheflush_64.h | 38 void flush_dcache_folio_impl(struct folio *folio); 40 void smp_flush_dcache_folio_impl(struct folio *folio, int cpu); 41 void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio); 43 #define smp_flush_dcache_folio_impl(folio, cpu) flush_dcache_folio_impl(folio) argument 44 #define flush_dcache_folio_all(mm, folio) flush_dcache_folio_impl(folio) argument 49 void flush_dcache_folio(struct folio *folio);
|
| A D | cacheflush_32.h | 39 void sparc_flush_folio_to_ram(struct folio *folio); 42 #define flush_dcache_folio(folio) sparc_flush_folio_to_ram(folio) argument
|
| /arch/mips/mm/ |
| A D | cache.c | 104 struct folio *folio = page_folio(page); in __flush_dcache_pages() local 110 folio_set_dcache_dirty(folio); in __flush_dcache_pages() 130 struct folio *folio = page_folio(page); in __flush_anon_page() local 133 if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) { in __flush_anon_page() 148 struct folio *folio; in __update_cache() local 157 folio = page_folio(pfn_to_page(pfn)); in __update_cache() 159 address -= offset_in_folio(folio, pfn << PAGE_SHIFT); in __update_cache() 161 if (folio_test_dcache_dirty(folio)) { in __update_cache() 162 for (i = 0; i < folio_nr_pages(folio); i++) { in __update_cache() 163 addr = (unsigned long)kmap_local_folio(folio, i); in __update_cache() [all …]
|
| /arch/mips/include/asm/ |
| A D | cacheflush.h | 39 #define folio_test_dcache_dirty(folio) \ argument 40 test_bit(PG_dcache_dirty, &(folio)->flags) 41 #define folio_set_dcache_dirty(folio) \ argument 42 set_bit(PG_dcache_dirty, &(folio)->flags) 43 #define folio_clear_dcache_dirty(folio) \ argument 44 clear_bit(PG_dcache_dirty, &(folio)->flags) 56 static inline void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 59 __flush_dcache_pages(&folio->page, folio_nr_pages(folio)); in flush_dcache_folio() 61 folio_set_dcache_dirty(folio); in flush_dcache_folio()
|
| /arch/sh/mm/ |
| A D | cache.c | 64 struct folio *folio = page_folio(page); in copy_to_user_page() local 74 clear_bit(PG_dcache_clean, &folio->flags); in copy_to_user_page() 85 struct folio *folio = page_folio(page); in copy_from_user_page() local 95 clear_bit(PG_dcache_clean, &folio->flags); in copy_from_user_page() 102 struct folio *src = page_folio(from); in copy_user_highpage() 150 struct folio *folio = page_folio(pfn_to_page(pfn)); in __update_cache() local 153 __flush_purge_region(folio_address(folio), in __update_cache() 154 folio_size(folio)); in __update_cache() 160 struct folio *folio = page_folio(page); in __flush_anon_page() local 174 folio_size(folio)); in __flush_anon_page() [all …]
|
| A D | cache-sh7705.c | 137 struct folio *folio = arg; in sh7705_flush_dcache_folio() local 138 struct address_space *mapping = folio_flush_mapping(folio); in sh7705_flush_dcache_folio() 141 clear_bit(PG_dcache_clean, &folio->flags); in sh7705_flush_dcache_folio() 143 unsigned long pfn = folio_pfn(folio); in sh7705_flush_dcache_folio() 144 unsigned int i, nr = folio_nr_pages(folio); in sh7705_flush_dcache_folio() 186 struct folio *folio = arg; in sh7705_flush_icache_folio() local 187 __flush_purge_region(folio_address(folio), folio_size(folio)); in sh7705_flush_icache_folio()
|
| A D | cache-sh4.c | 112 struct folio *folio = arg; in sh4_flush_dcache_folio() local 114 struct address_space *mapping = folio_flush_mapping(folio); in sh4_flush_dcache_folio() 117 clear_bit(PG_dcache_clean, &folio->flags); in sh4_flush_dcache_folio() 121 unsigned long pfn = folio_pfn(folio); in sh4_flush_dcache_folio() 122 unsigned long addr = (unsigned long)folio_address(folio); in sh4_flush_dcache_folio() 123 unsigned int i, nr = folio_nr_pages(folio); in sh4_flush_dcache_folio() 244 struct folio *folio = page_folio(page); in sh4_flush_cache_page() local 250 test_bit(PG_dcache_clean, folio_flags(folio, 0)) && in sh4_flush_cache_page()
|
| /arch/powerpc/mm/ |
| A D | pgtable.c | 87 struct folio *folio = maybe_pte_to_folio(pte); in set_pte_filter_hash() local 88 if (!folio) in set_pte_filter_hash() 91 flush_dcache_icache_folio(folio); in set_pte_filter_hash() 112 struct folio *folio; in set_pte_filter() local 125 folio = maybe_pte_to_folio(pte); in set_pte_filter() 126 if (unlikely(!folio)) in set_pte_filter() 135 flush_dcache_icache_folio(folio); in set_pte_filter() 147 struct folio *folio; in set_access_flags_filter() local 173 folio = maybe_pte_to_folio(pte); in set_access_flags_filter() 174 if (unlikely(!folio)) in set_access_flags_filter() [all …]
|
| A D | cacheflush.c | 151 void flush_dcache_icache_folio(struct folio *folio) in flush_dcache_icache_folio() argument 153 unsigned int i, nr = folio_nr_pages(folio); in flush_dcache_icache_folio() 158 if (!folio_test_highmem(folio)) { in flush_dcache_icache_folio() 159 void *addr = folio_address(folio); in flush_dcache_icache_folio() 164 void *start = kmap_local_folio(folio, i * PAGE_SIZE); in flush_dcache_icache_folio() 170 unsigned long pfn = folio_pfn(folio); in flush_dcache_icache_folio()
|
| /arch/csky/abiv2/ |
| A D | cacheflush.c | 14 struct folio *folio; in update_mmu_cache_range() local 22 folio = page_folio(pfn_to_page(pfn)); in update_mmu_cache_range() 24 if (test_and_set_bit(PG_dcache_clean, &folio->flags)) in update_mmu_cache_range() 28 for (i = 0; i < folio_nr_pages(folio); i++) { in update_mmu_cache_range() 29 unsigned long addr = (unsigned long) kmap_local_folio(folio, in update_mmu_cache_range()
|
| /arch/powerpc/include/asm/ |
| A D | cacheflush.h | 38 static inline void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 43 if (test_bit(PG_dcache_clean, &folio->flags)) in flush_dcache_folio() 44 clear_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio() 60 void flush_dcache_icache_folio(struct folio *folio);
|
| /arch/arm/include/asm/ |
| A D | hugetlb.h | 18 static inline void arch_clear_hugetlb_flags(struct folio *folio) in arch_clear_hugetlb_flags() argument 20 clear_bit(PG_dcache_clean, &folio->flags); in arch_clear_hugetlb_flags()
|
| /arch/openrisc/mm/ |
| A D | cache.c | 85 struct folio *folio = page_folio(pfn_to_page(pfn)); in update_cache() local 86 int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags); in update_cache() 94 unsigned int nr = folio_nr_pages(folio); in update_cache() 97 sync_icache_dcache(folio_page(folio, nr)); in update_cache()
|
| /arch/sh/include/asm/ |
| A D | hugetlb.h | 15 static inline void arch_clear_hugetlb_flags(struct folio *folio) in arch_clear_hugetlb_flags() argument 17 clear_bit(PG_dcache_clean, &folio->flags); in arch_clear_hugetlb_flags()
|
| /arch/csky/abiv2/inc/abi/ |
| A D | cacheflush.h | 21 static inline void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 23 if (test_bit(PG_dcache_clean, &folio->flags)) in flush_dcache_folio() 24 clear_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio()
|
| /arch/riscv/include/asm/ |
| A D | cacheflush.h | 24 static inline void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument 26 if (test_bit(PG_dcache_clean, &folio->flags)) in flush_dcache_folio() 27 clear_bit(PG_dcache_clean, &folio->flags); in flush_dcache_folio()
|