1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _PARISC_PGTABLE_H
3 #define _PARISC_PGTABLE_H
4
5 #include <asm/page.h>
6
7 #if CONFIG_PGTABLE_LEVELS == 3
8 #include <asm-generic/pgtable-nopud.h>
9 #elif CONFIG_PGTABLE_LEVELS == 2
10 #include <asm-generic/pgtable-nopmd.h>
11 #endif
12
13 #include <asm/fixmap.h>
14
15 #ifndef __ASSEMBLY__
16 /*
17 * we simulate an x86-style page table for the linux mm code
18 */
19
20 #include <linux/bitops.h>
21 #include <linux/spinlock.h>
22 #include <linux/mm_types.h>
23 #include <asm/processor.h>
24 #include <asm/cache.h>
25
26 /* This is for the serialization of PxTLB broadcasts. At least on the N class
27 * systems, only one PxTLB inter processor broadcast can be active at any one
28 * time on the Merced bus. */
29 extern spinlock_t pa_tlb_flush_lock;
30 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
31 extern int pa_serialize_tlb_flushes;
32 #else
33 #define pa_serialize_tlb_flushes (0)
34 #endif
35
36 #define purge_tlb_start(flags) do { \
37 if (pa_serialize_tlb_flushes) \
38 spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
39 else \
40 local_irq_save(flags); \
41 } while (0)
42 #define purge_tlb_end(flags) do { \
43 if (pa_serialize_tlb_flushes) \
44 spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
45 else \
46 local_irq_restore(flags); \
47 } while (0)
48
49 /* Purge data and instruction TLB entries. The TLB purge instructions
50 * are slow on SMP machines since the purge must be broadcast to all CPUs.
51 */
52
purge_tlb_entries(struct mm_struct * mm,unsigned long addr)53 static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
54 {
55 unsigned long flags;
56
57 purge_tlb_start(flags);
58 mtsp(mm->context.space_id, SR_TEMP1);
59 pdtlb(SR_TEMP1, addr);
60 pitlb(SR_TEMP1, addr);
61 purge_tlb_end(flags);
62 }
63
64 extern void __update_cache(pte_t pte);
65
66 /* Certain architectures need to do special things when PTEs
67 * within a page table are directly modified. Thus, the following
68 * hook is made available.
69 */
70 #define set_pte(pteptr, pteval) \
71 do { \
72 *(pteptr) = (pteval); \
73 mb(); \
74 } while(0)
75
76 #define set_pte_at(mm, addr, pteptr, pteval) \
77 do { \
78 if (pte_present(pteval) && \
79 pte_user(pteval)) \
80 __update_cache(pteval); \
81 *(pteptr) = (pteval); \
82 purge_tlb_entries(mm, addr); \
83 } while (0)
84
85 #endif /* !__ASSEMBLY__ */
86
87 #define pte_ERROR(e) \
88 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
89 #if CONFIG_PGTABLE_LEVELS == 3
90 #define pmd_ERROR(e) \
91 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
92 #endif
93 #define pgd_ERROR(e) \
94 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
95
96 /* This is the size of the initially mapped kernel memory */
97 #if defined(CONFIG_64BIT)
98 #define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
99 #else
100 #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
101 #endif
102 #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
103
104 #if CONFIG_PGTABLE_LEVELS == 3
105 #define PMD_TABLE_ORDER 1
106 #define PGD_TABLE_ORDER 0
107 #else
108 #define PGD_TABLE_ORDER 1
109 #endif
110
111 /* Definitions for 3rd level (we use PLD here for Page Lower directory
112 * because PTE_SHIFT is used lower down to mean shift that has to be
113 * done to get usable bits out of the PTE) */
114 #define PLD_SHIFT PAGE_SHIFT
115 #define PLD_SIZE PAGE_SIZE
116 #define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
117 #define PTRS_PER_PTE (1UL << BITS_PER_PTE)
118
119 /* Definitions for 2nd level */
120 #if CONFIG_PGTABLE_LEVELS == 3
121 #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
122 #define PMD_SIZE (1UL << PMD_SHIFT)
123 #define PMD_MASK (~(PMD_SIZE-1))
124 #define BITS_PER_PMD (PAGE_SHIFT + PMD_TABLE_ORDER - BITS_PER_PMD_ENTRY)
125 #define PTRS_PER_PMD (1UL << BITS_PER_PMD)
126 #else
127 #define BITS_PER_PMD 0
128 #endif
129
130 /* Definitions for 1st level */
131 #define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
132 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
133 #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
134 #else
135 #define BITS_PER_PGD (PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY)
136 #endif
137 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
138 #define PGDIR_MASK (~(PGDIR_SIZE-1))
139 #define PTRS_PER_PGD (1UL << BITS_PER_PGD)
140 #define USER_PTRS_PER_PGD PTRS_PER_PGD
141
142 #ifdef CONFIG_64BIT
143 #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
144 #define MAX_ADDRESS (1UL << MAX_ADDRBITS)
145 #define SPACEID_SHIFT (MAX_ADDRBITS - 32)
146 #else
147 #define MAX_ADDRBITS (BITS_PER_LONG)
148 #define MAX_ADDRESS (1ULL << MAX_ADDRBITS)
149 #define SPACEID_SHIFT 0
150 #endif
151
152 /* This calculates the number of initial pages we need for the initial
153 * page tables */
154 #if (KERNEL_INITIAL_ORDER) >= (PLD_SHIFT + BITS_PER_PTE)
155 # define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PLD_SHIFT - BITS_PER_PTE))
156 #else
157 # define PT_INITIAL (1) /* all initial PTEs fit into one page */
158 #endif
159
160 /*
161 * pgd entries used up by user/kernel:
162 */
163
164 /* NB: The tlb miss handlers make certain assumptions about the order */
165 /* of the following bits, so be careful (One example, bits 25-31 */
166 /* are moved together in one instruction). */
167
168 #define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
169 #define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
170 #define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
171 #define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
172 #define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
173 #define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
174 #define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
175 #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
176 #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
177 #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
178 #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
179 #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
180 #ifdef CONFIG_HUGETLB_PAGE
181 #define _PAGE_SPECIAL_BIT _PAGE_DMB_BIT /* DMB feature is currently unused */
182 #else
183 #define _PAGE_SPECIAL_BIT _PAGE_HPAGE_BIT /* use unused HUGE PAGE bit */
184 #endif
185
186 /* N.B. The bits are defined in terms of a 32 bit word above, so the */
187 /* following macro is ok for both 32 and 64 bit. */
188
189 #define xlate_pabit(x) (31 - x)
190
191 /* this defines the shift to the usable bits in the PTE it is set so
192 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
193 * to zero */
194 #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
195
196 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
197 #define PFN_PTE_SHIFT 12
198
199 #define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
200 #define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
201 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
202 #define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
203 #define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
204 #define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
205 #define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
206 #define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
207 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
208 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
209 #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
210 #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
211 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
212 #define _PAGE_SPECIAL (1 << xlate_pabit(_PAGE_SPECIAL_BIT))
213
214 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
215 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
216 #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
217 #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
218 #define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
219 #define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
220
221 /* We borrow bit 23 to store the exclusive marker in swap PTEs. */
222 #define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED
223
224 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
225 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
226 * for a few meta-information bits, so we shift the address to be
227 * able to effectively address 40/42/44-bits of physical address space
228 * depending on 4k/16k/64k PAGE_SIZE */
229 #define _PxD_PRESENT_BIT 31
230 #define _PxD_VALID_BIT 30
231
232 #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
233 #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
234 #define PxD_FLAG_MASK (0xf)
235 #define PxD_FLAG_SHIFT (4)
236 #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
237
238 #ifndef __ASSEMBLY__
239
240 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
241 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
242 /* Others seem to make this executable, I don't know if that's correct
243 or not. The stack is mapped this way though so this is necessary
244 in the short term - dhd@linuxcare.com, 2000-08-08 */
245 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
246 #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
247 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
248 #define PAGE_COPY PAGE_EXECREAD
249 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
250 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
251 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
252 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
253 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
254 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
255 #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
256
257
258 /*
259 * We could have an execute only page using "gateway - promote to priv
260 * level 3", but that is kind of silly. So, the way things are defined
261 * now, we must always have read permission for pages with execute
262 * permission. For the fun of it we'll go ahead and support write only
263 * pages.
264 */
265
266 /*xwr*/
267
268 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
269
270 /* initial page tables for 0-8MB for kernel */
271
272 extern pte_t pg0[];
273
274 /* zero page used for uninitialized stuff */
275
276 extern unsigned long *empty_zero_page;
277
278 /*
279 * ZERO_PAGE is a global shared page that is always zero: used
280 * for zero-mapped memory areas etc..
281 */
282
283 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
284
285 #define pte_none(x) (pte_val(x) == 0)
286 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
287 #define pte_user(x) (pte_val(x) & _PAGE_USER)
288 #define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
289
290 #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
291 #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
292 #define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK)
293 #define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
294 #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
295 #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
296
297 #define pmd_none(x) (!pmd_val(x))
298 #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
299 #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
pmd_clear(pmd_t * pmd)300 static inline void pmd_clear(pmd_t *pmd) {
301 set_pmd(pmd, __pmd(0));
302 }
303
304
305
306 #if CONFIG_PGTABLE_LEVELS == 3
307 #define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud)))
308 #define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
309
310 /* For 64 bit we have three level tables */
311
312 #define pud_none(x) (!pud_val(x))
313 #define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
314 #define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
pud_clear(pud_t * pud)315 static inline void pud_clear(pud_t *pud) {
316 set_pud(pud, __pud(0));
317 }
318 #endif
319
320 /*
321 * The following only work if pte_present() is true.
322 * Undefined behaviour if not..
323 */
pte_dirty(pte_t pte)324 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_young(pte_t pte)325 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
pte_write(pte_t pte)326 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
pte_special(pte_t pte)327 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
328
pte_mkclean(pte_t pte)329 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
pte_mkold(pte_t pte)330 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
pte_wrprotect(pte_t pte)331 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
pte_mkdirty(pte_t pte)332 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
pte_mkyoung(pte_t pte)333 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
pte_mkwrite(pte_t pte)334 static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
pte_mkspecial(pte_t pte)335 static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
336
337 /*
338 * Huge pte definitions.
339 */
340 #ifdef CONFIG_HUGETLB_PAGE
341 #define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
342 #define pte_mkhuge(pte) (__pte(pte_val(pte) | \
343 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
344 #else
345 #define pte_huge(pte) (0)
346 #define pte_mkhuge(pte) (pte)
347 #endif
348
349
350 /*
351 * Conversion functions: convert a page and protection to a page entry,
352 * and a page entry and page directory to the page they refer to.
353 */
354 #define __mk_pte(addr,pgprot) \
355 ({ \
356 pte_t __pte; \
357 \
358 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
359 \
360 __pte; \
361 })
362
363 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
364
pfn_pte(unsigned long pfn,pgprot_t pgprot)365 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
366 {
367 pte_t pte;
368 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
369 return pte;
370 }
371
pte_modify(pte_t pte,pgprot_t newprot)372 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
373 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
374
375 /* Permanent address of a page. On parisc we don't have highmem. */
376
377 #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
378
379 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
380
pmd_page_vaddr(pmd_t pmd)381 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
382 {
383 return ((unsigned long) __va(pmd_address(pmd)));
384 }
385
386 #define pmd_pfn(pmd) (pmd_address(pmd) >> PAGE_SHIFT)
387 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
388 #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
389
390 /* Find an entry in the second-level page table.. */
391
392 extern void paging_init (void);
393
394 /* Used for deferring calls to flush_dcache_page() */
395
396 #define PG_dcache_dirty PG_arch_1
397
398 #define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep)
399
400 /*
401 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
402 * are !pte_none() && !pte_present().
403 *
404 * Format of swap PTEs (32bit):
405 *
406 * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
407 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
408 * <---------------- offset -----------------> P E <ofs> < type ->
409 *
410 * E is the exclusive marker that is not stored in swap entries.
411 * _PAGE_PRESENT (P) must be 0.
412 *
413 * For the 64bit version, the offset is extended by 32bit.
414 */
415 #define __swp_type(x) ((x).val & 0x1f)
416 #define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
417 (((x).val >> 8) & ~0x7) )
418 #define __swp_entry(type, offset) ((swp_entry_t) { \
419 ((type) & 0x1f) | \
420 ((offset & 0x7) << 6) | \
421 ((offset & ~0x7) << 8) })
422 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
423 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
424
pte_swp_exclusive(pte_t pte)425 static inline int pte_swp_exclusive(pte_t pte)
426 {
427 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
428 }
429
pte_swp_mkexclusive(pte_t pte)430 static inline pte_t pte_swp_mkexclusive(pte_t pte)
431 {
432 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
433 return pte;
434 }
435
pte_swp_clear_exclusive(pte_t pte)436 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
437 {
438 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
439 return pte;
440 }
441
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)442 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
443 {
444 pte_t pte;
445
446 if (!pte_young(*ptep))
447 return 0;
448
449 pte = *ptep;
450 if (!pte_young(pte)) {
451 return 0;
452 }
453 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
454 return 1;
455 }
456
457 struct mm_struct;
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)458 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
459 {
460 pte_t old_pte;
461
462 old_pte = *ptep;
463 set_pte_at(mm, addr, ptep, __pte(0));
464
465 return old_pte;
466 }
467
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)468 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
469 {
470 set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
471 }
472
473 #define pte_same(A,B) (pte_val(A) == pte_val(B))
474
475 struct seq_file;
476 extern void arch_report_meminfo(struct seq_file *m);
477
478 #endif /* !__ASSEMBLY__ */
479
480
481 /* TLB page size encoding - see table 3-1 in parisc20.pdf */
482 #define _PAGE_SIZE_ENCODING_4K 0
483 #define _PAGE_SIZE_ENCODING_16K 1
484 #define _PAGE_SIZE_ENCODING_64K 2
485 #define _PAGE_SIZE_ENCODING_256K 3
486 #define _PAGE_SIZE_ENCODING_1M 4
487 #define _PAGE_SIZE_ENCODING_4M 5
488 #define _PAGE_SIZE_ENCODING_16M 6
489 #define _PAGE_SIZE_ENCODING_64M 7
490
491 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
492 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
493 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
494 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
495 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
496 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
497 #endif
498
499
500 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
501
502 /* We provide our own get_unmapped_area to provide cache coherency */
503
504 #define HAVE_ARCH_UNMAPPED_AREA
505 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
506
507 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
508 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
509 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
510 #define __HAVE_ARCH_PTE_SAME
511
512 #endif /* _PARISC_PGTABLE_H */
513