1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HIGHMEM_INTERNAL_H
3 #define _LINUX_HIGHMEM_INTERNAL_H
4
5 /*
6 * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
7 */
8 #ifdef CONFIG_KMAP_LOCAL
9 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
11 void kunmap_local_indexed(const void *vaddr);
12 void kmap_local_fork(struct task_struct *tsk);
13 void __kmap_local_sched_out(void);
14 void __kmap_local_sched_in(void);
kmap_assert_nomap(void)15 static inline void kmap_assert_nomap(void)
16 {
17 DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
18 }
19 #else
kmap_local_fork(struct task_struct * tsk)20 static inline void kmap_local_fork(struct task_struct *tsk) { }
kmap_assert_nomap(void)21 static inline void kmap_assert_nomap(void) { }
22 #endif
23
24 #ifdef CONFIG_HIGHMEM
25 #include <asm/highmem.h>
26
27 #ifndef ARCH_HAS_KMAP_FLUSH_TLB
kmap_flush_tlb(unsigned long addr)28 static inline void kmap_flush_tlb(unsigned long addr) { }
29 #endif
30
31 #ifndef kmap_prot
32 #define kmap_prot PAGE_KERNEL
33 #endif
34
35 void *kmap_high(struct page *page);
36 void kunmap_high(struct page *page);
37 void __kmap_flush_unused(void);
38 struct page *__kmap_to_page(void *addr);
39
kmap(struct page * page)40 static inline void *kmap(struct page *page)
41 {
42 void *addr;
43
44 might_sleep();
45 if (!PageHighMem(page))
46 addr = page_address(page);
47 else
48 addr = kmap_high(page);
49 kmap_flush_tlb((unsigned long)addr);
50 return addr;
51 }
52
kunmap(struct page * page)53 static inline void kunmap(struct page *page)
54 {
55 might_sleep();
56 if (!PageHighMem(page))
57 return;
58 kunmap_high(page);
59 }
60
kmap_to_page(void * addr)61 static inline struct page *kmap_to_page(void *addr)
62 {
63 return __kmap_to_page(addr);
64 }
65
kmap_flush_unused(void)66 static inline void kmap_flush_unused(void)
67 {
68 __kmap_flush_unused();
69 }
70
kmap_local_page(struct page * page)71 static inline void *kmap_local_page(struct page *page)
72 {
73 return __kmap_local_page_prot(page, kmap_prot);
74 }
75
kmap_local_page_try_from_panic(struct page * page)76 static inline void *kmap_local_page_try_from_panic(struct page *page)
77 {
78 if (!PageHighMem(page))
79 return page_address(page);
80 /* If the page is in HighMem, it's not safe to kmap it.*/
81 return NULL;
82 }
83
kmap_local_folio(struct folio * folio,size_t offset)84 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
85 {
86 struct page *page = folio_page(folio, offset / PAGE_SIZE);
87 return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
88 }
89
kmap_local_page_prot(struct page * page,pgprot_t prot)90 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
91 {
92 return __kmap_local_page_prot(page, prot);
93 }
94
kmap_local_pfn(unsigned long pfn)95 static inline void *kmap_local_pfn(unsigned long pfn)
96 {
97 return __kmap_local_pfn_prot(pfn, kmap_prot);
98 }
99
__kunmap_local(const void * vaddr)100 static inline void __kunmap_local(const void *vaddr)
101 {
102 kunmap_local_indexed(vaddr);
103 }
104
kmap_atomic_prot(struct page * page,pgprot_t prot)105 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
106 {
107 if (IS_ENABLED(CONFIG_PREEMPT_RT))
108 migrate_disable();
109 else
110 preempt_disable();
111
112 pagefault_disable();
113 return __kmap_local_page_prot(page, prot);
114 }
115
kmap_atomic(struct page * page)116 static inline void *kmap_atomic(struct page *page)
117 {
118 return kmap_atomic_prot(page, kmap_prot);
119 }
120
kmap_atomic_pfn(unsigned long pfn)121 static inline void *kmap_atomic_pfn(unsigned long pfn)
122 {
123 if (IS_ENABLED(CONFIG_PREEMPT_RT))
124 migrate_disable();
125 else
126 preempt_disable();
127
128 pagefault_disable();
129 return __kmap_local_pfn_prot(pfn, kmap_prot);
130 }
131
__kunmap_atomic(const void * addr)132 static inline void __kunmap_atomic(const void *addr)
133 {
134 kunmap_local_indexed(addr);
135 pagefault_enable();
136 if (IS_ENABLED(CONFIG_PREEMPT_RT))
137 migrate_enable();
138 else
139 preempt_enable();
140 }
141
142 unsigned long __nr_free_highpages(void);
143 unsigned long __totalhigh_pages(void);
144
nr_free_highpages(void)145 static inline unsigned long nr_free_highpages(void)
146 {
147 return __nr_free_highpages();
148 }
149
totalhigh_pages(void)150 static inline unsigned long totalhigh_pages(void)
151 {
152 return __totalhigh_pages();
153 }
154
is_kmap_addr(const void * x)155 static inline bool is_kmap_addr(const void *x)
156 {
157 unsigned long addr = (unsigned long)x;
158
159 return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
160 (addr >= __fix_to_virt(FIX_KMAP_END) &&
161 addr < __fix_to_virt(FIX_KMAP_BEGIN));
162 }
163 #else /* CONFIG_HIGHMEM */
164
kmap_to_page(void * addr)165 static inline struct page *kmap_to_page(void *addr)
166 {
167 return virt_to_page(addr);
168 }
169
kmap(struct page * page)170 static inline void *kmap(struct page *page)
171 {
172 might_sleep();
173 return page_address(page);
174 }
175
kunmap_high(struct page * page)176 static inline void kunmap_high(struct page *page) { }
kmap_flush_unused(void)177 static inline void kmap_flush_unused(void) { }
178
kunmap(struct page * page)179 static inline void kunmap(struct page *page)
180 {
181 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
182 kunmap_flush_on_unmap(page_address(page));
183 #endif
184 }
185
kmap_local_page(struct page * page)186 static inline void *kmap_local_page(struct page *page)
187 {
188 return page_address(page);
189 }
190
kmap_local_page_try_from_panic(struct page * page)191 static inline void *kmap_local_page_try_from_panic(struct page *page)
192 {
193 return page_address(page);
194 }
195
kmap_local_folio(struct folio * folio,size_t offset)196 static inline void *kmap_local_folio(struct folio *folio, size_t offset)
197 {
198 return folio_address(folio) + offset;
199 }
200
kmap_local_page_prot(struct page * page,pgprot_t prot)201 static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
202 {
203 return kmap_local_page(page);
204 }
205
kmap_local_pfn(unsigned long pfn)206 static inline void *kmap_local_pfn(unsigned long pfn)
207 {
208 return kmap_local_page(pfn_to_page(pfn));
209 }
210
__kunmap_local(const void * addr)211 static inline void __kunmap_local(const void *addr)
212 {
213 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
214 kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
215 #endif
216 }
217
kmap_atomic(struct page * page)218 static inline void *kmap_atomic(struct page *page)
219 {
220 if (IS_ENABLED(CONFIG_PREEMPT_RT))
221 migrate_disable();
222 else
223 preempt_disable();
224 pagefault_disable();
225 return page_address(page);
226 }
227
kmap_atomic_prot(struct page * page,pgprot_t prot)228 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
229 {
230 return kmap_atomic(page);
231 }
232
kmap_atomic_pfn(unsigned long pfn)233 static inline void *kmap_atomic_pfn(unsigned long pfn)
234 {
235 return kmap_atomic(pfn_to_page(pfn));
236 }
237
__kunmap_atomic(const void * addr)238 static inline void __kunmap_atomic(const void *addr)
239 {
240 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
241 kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
242 #endif
243 pagefault_enable();
244 if (IS_ENABLED(CONFIG_PREEMPT_RT))
245 migrate_enable();
246 else
247 preempt_enable();
248 }
249
nr_free_highpages(void)250 static inline unsigned long nr_free_highpages(void) { return 0; }
totalhigh_pages(void)251 static inline unsigned long totalhigh_pages(void) { return 0; }
252
is_kmap_addr(const void * x)253 static inline bool is_kmap_addr(const void *x)
254 {
255 return false;
256 }
257
258 #endif /* CONFIG_HIGHMEM */
259
260 /**
261 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
262 * @__addr: Virtual address to be unmapped
263 *
264 * Unmaps an address previously mapped by kmap_atomic() and re-enables
265 * pagefaults. Depending on PREEMP_RT configuration, re-enables also
266 * migration and preemption. Users should not count on these side effects.
267 *
268 * Mappings should be unmapped in the reverse order that they were mapped.
269 * See kmap_local_page() for details on nesting.
270 *
271 * @__addr can be any address within the mapped page, so there is no need
272 * to subtract any offset that has been added. In contrast to kunmap(),
273 * this function takes the address returned from kmap_atomic(), not the
274 * page passed to it. The compiler will warn you if you pass the page.
275 */
276 #define kunmap_atomic(__addr) \
277 do { \
278 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
279 __kunmap_atomic(__addr); \
280 } while (0)
281
282 /**
283 * kunmap_local - Unmap a page mapped via kmap_local_page().
284 * @__addr: An address within the page mapped
285 *
286 * @__addr can be any address within the mapped page. Commonly it is the
287 * address return from kmap_local_page(), but it can also include offsets.
288 *
289 * Unmapping should be done in the reverse order of the mapping. See
290 * kmap_local_page() for details.
291 */
292 #define kunmap_local(__addr) \
293 do { \
294 BUILD_BUG_ON(__same_type((__addr), struct page *)); \
295 __kunmap_local(__addr); \
296 } while (0)
297
298 #endif
299