1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4 * Copyright (C) 2012 Regents of the University of California
5 */
6
7 #ifndef _ASM_RISCV_PGALLOC_H
8 #define _ASM_RISCV_PGALLOC_H
9
10 #include <linux/mm.h>
11 #include <asm/tlb.h>
12
13 #ifdef CONFIG_MMU
14 #define __HAVE_ARCH_PUD_ALLOC_ONE
15 #define __HAVE_ARCH_PUD_FREE
16 #include <asm-generic/pgalloc.h>
17
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd,pte_t * pte)18 static inline void pmd_populate_kernel(struct mm_struct *mm,
19 pmd_t *pmd, pte_t *pte)
20 {
21 unsigned long pfn = virt_to_pfn(pte);
22
23 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
24 }
25
pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte)26 static inline void pmd_populate(struct mm_struct *mm,
27 pmd_t *pmd, pgtable_t pte)
28 {
29 unsigned long pfn = virt_to_pfn(page_address(pte));
30
31 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
32 }
33
34 #ifndef __PAGETABLE_PMD_FOLDED
pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd)35 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
36 {
37 unsigned long pfn = virt_to_pfn(pmd);
38
39 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
40 }
41
p4d_populate(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)42 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
43 {
44 if (pgtable_l4_enabled) {
45 unsigned long pfn = virt_to_pfn(pud);
46
47 set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
48 }
49 }
50
p4d_populate_safe(struct mm_struct * mm,p4d_t * p4d,pud_t * pud)51 static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
52 pud_t *pud)
53 {
54 if (pgtable_l4_enabled) {
55 unsigned long pfn = virt_to_pfn(pud);
56
57 set_p4d_safe(p4d,
58 __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
59 }
60 }
61
pgd_populate(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d)62 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
63 {
64 if (pgtable_l5_enabled) {
65 unsigned long pfn = virt_to_pfn(p4d);
66
67 set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
68 }
69 }
70
pgd_populate_safe(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d)71 static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
72 p4d_t *p4d)
73 {
74 if (pgtable_l5_enabled) {
75 unsigned long pfn = virt_to_pfn(p4d);
76
77 set_pgd_safe(pgd,
78 __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
79 }
80 }
81
82 #define pud_alloc_one pud_alloc_one
pud_alloc_one(struct mm_struct * mm,unsigned long addr)83 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
84 {
85 if (pgtable_l4_enabled)
86 return __pud_alloc_one(mm, addr);
87
88 return NULL;
89 }
90
91 #define pud_free pud_free
pud_free(struct mm_struct * mm,pud_t * pud)92 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
93 {
94 if (pgtable_l4_enabled)
95 __pud_free(mm, pud);
96 }
97
98 #define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
99
100 #define p4d_alloc_one p4d_alloc_one
p4d_alloc_one(struct mm_struct * mm,unsigned long addr)101 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
102 {
103 if (pgtable_l5_enabled) {
104 gfp_t gfp = GFP_PGTABLE_USER;
105
106 if (mm == &init_mm)
107 gfp = GFP_PGTABLE_KERNEL;
108 return (p4d_t *)get_zeroed_page(gfp);
109 }
110
111 return NULL;
112 }
113
__p4d_free(struct mm_struct * mm,p4d_t * p4d)114 static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
115 {
116 BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
117 free_page((unsigned long)p4d);
118 }
119
120 #define p4d_free p4d_free
p4d_free(struct mm_struct * mm,p4d_t * p4d)121 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
122 {
123 if (pgtable_l5_enabled)
124 __p4d_free(mm, p4d);
125 }
126
127 #define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
128 #endif /* __PAGETABLE_PMD_FOLDED */
129
sync_kernel_mappings(pgd_t * pgd)130 static inline void sync_kernel_mappings(pgd_t *pgd)
131 {
132 memcpy(pgd + USER_PTRS_PER_PGD,
133 init_mm.pgd + USER_PTRS_PER_PGD,
134 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
135 }
136
pgd_alloc(struct mm_struct * mm)137 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
138 {
139 pgd_t *pgd;
140
141 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
142 if (likely(pgd != NULL)) {
143 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
144 /* Copy kernel mappings */
145 sync_kernel_mappings(pgd);
146 }
147 return pgd;
148 }
149
150 #ifndef __PAGETABLE_PMD_FOLDED
151
152 #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
153
154 #endif /* __PAGETABLE_PMD_FOLDED */
155
156 #define __pte_free_tlb(tlb, pte, buf) \
157 do { \
158 pgtable_pte_page_dtor(pte); \
159 tlb_remove_page((tlb), pte); \
160 } while (0)
161 #endif /* CONFIG_MMU */
162
163 #endif /* _ASM_RISCV_PGALLOC_H */
164