1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #ifndef ASM__RISCV__MM_H
4 #define ASM__RISCV__MM_H
5
6 #include <public/xen.h>
7 #include <xen/bug.h>
8 #include <xen/const.h>
9 #include <xen/mm-frame.h>
10 #include <xen/pdx.h>
11 #include <xen/pfn.h>
12 #include <xen/sections.h>
13 #include <xen/types.h>
14
15 #include <asm/page.h>
16 #include <asm/page-bits.h>
17
18 extern vaddr_t directmap_virt_start;
19
20 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
21 #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
22
paddr_to_pte(paddr_t paddr,unsigned int permissions)23 static inline pte_t paddr_to_pte(paddr_t paddr,
24 unsigned int permissions)
25 {
26 return (pte_t) { .pte = (paddr_to_pfn(paddr) << PTE_PPN_SHIFT) | permissions };
27 }
28
pte_to_paddr(pte_t pte)29 static inline paddr_t pte_to_paddr(pte_t pte)
30 {
31 return pfn_to_paddr(pte.pte >> PTE_PPN_SHIFT);
32 }
33
34 #define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn))
35 #define gaddr_to_gfn(ga) _gfn(paddr_to_pfn(ga))
36 #define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn))
37 #define maddr_to_mfn(ma) _mfn(paddr_to_pfn(ma))
38 #define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
39
maddr_to_virt(paddr_t ma)40 static inline void *maddr_to_virt(paddr_t ma)
41 {
42 unsigned long va = directmap_virt_start + maddr_to_directmapoff(ma);
43
44 ASSERT((va >= DIRECTMAP_VIRT_START) && (va <= DIRECTMAP_VIRT_END));
45
46 return (void *)va;
47 }
48
49 #define mfn_from_pte(pte) maddr_to_mfn(pte_to_paddr(pte))
50
51 #define vmap_to_mfn(va) \
52 ({ \
53 pte_t __entry = pt_walk((vaddr_t)(va), NULL); \
54 BUG_ON(!pte_is_mapping(__entry)); \
55 maddr_to_mfn(pte_to_paddr(__entry)); \
56 })
57
58 /*
59 * virt_to_maddr() is expected to work with virtual addresses from either
60 * the directmap region or Xen's linkage (XEN_VIRT_START) region.
61 * Therefore, it is sufficient to check only these regions and assert if `va`
62 * is not within the directmap or Xen's linkage region.
63 */
virt_to_maddr(unsigned long va)64 static inline unsigned long virt_to_maddr(unsigned long va)
65 {
66 const unsigned long xen_size = (unsigned long)(_end - _start);
67 const unsigned long xen_virt_start = _AC(XEN_VIRT_START, UL);
68 const unsigned long xen_virt_end = xen_virt_start + xen_size - 1;
69
70 if ((va >= DIRECTMAP_VIRT_START) &&
71 (va <= DIRECTMAP_VIRT_END))
72 return directmapoff_to_maddr(va - directmap_virt_start);
73
74 ASSERT((va >= xen_virt_start) && (va <= xen_virt_end));
75
76 /*
77 * The .init* sections will be freed when Xen completes booting,
78 * so the [__init_begin, __init_end) range must be excluded.
79 */
80 ASSERT((system_state < SYS_STATE_active) || !is_init_section(va));
81
82 /* phys_offset = load_start - XEN_VIRT_START */
83 return phys_offset + va;
84 }
85 #define virt_to_maddr(va) virt_to_maddr((unsigned long)(va))
86
87 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
88 #define __virt_to_mfn(va) mfn_x(maddr_to_mfn(virt_to_maddr(va)))
89 #define __mfn_to_virt(mfn) maddr_to_virt(mfn_to_maddr(_mfn(mfn)))
90
91 /*
92 * We define non-underscored wrappers for above conversion functions.
93 * These are overriden in various source files while underscored version
94 * remain intact.
95 */
96 #define virt_to_mfn(va) __virt_to_mfn(va)
97 #define mfn_to_virt(mfn) __mfn_to_virt(mfn)
98
99 struct page_info
100 {
101 /* Each frame can be threaded onto a doubly-linked list. */
102 struct page_list_entry list;
103
104 /* Reference count and various PGC_xxx flags and fields. */
105 unsigned long count_info;
106
107 /* Context-dependent fields follow... */
108 union {
109 /* Page is in use: ((count_info & PGC_count_mask) != 0). */
110 struct {
111 /* Type reference count and various PGT_xxx flags and fields. */
112 unsigned long type_info;
113 } inuse;
114
115 /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
116 union {
117 struct {
118 /*
119 * Index of the first *possibly* unscrubbed page in the buddy.
120 * One more bit than maximum possible order to accommodate
121 * INVALID_DIRTY_IDX.
122 */
123 #define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1)
124 unsigned long first_dirty:MAX_ORDER + 1;
125
126 /* Do TLBs need flushing for safety before next page use? */
127 bool need_tlbflush:1;
128
129 #define BUDDY_NOT_SCRUBBING 0
130 #define BUDDY_SCRUBBING 1
131 #define BUDDY_SCRUB_ABORT 2
132 unsigned long scrub_state:2;
133 };
134
135 unsigned long val;
136 } free;
137 } u;
138
139 union {
140 /* Page is in use */
141 struct {
142 /* Owner of this page (NULL if page is anonymous). */
143 struct domain *domain;
144 } inuse;
145
146 /* Page is on a free list. */
147 struct {
148 /* Order-size of the free chunk this page is the head of. */
149 unsigned int order;
150 } free;
151 } v;
152
153 union {
154 /*
155 * Timestamp from 'TLB clock', used to avoid extra safety flushes.
156 * Only valid for: a) free pages, and b) pages with zero type count
157 */
158 uint32_t tlbflush_timestamp;
159 };
160 };
161
162 extern struct page_info *frametable_virt_start;
163
164 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
165
166 /* Convert between machine frame numbers and page-info structures. */
167 #define mfn_to_page(mfn) (frametable_virt_start + mfn_x(mfn))
168 #define page_to_mfn(pg) _mfn((pg) - frametable_virt_start)
169
page_to_virt(const struct page_info * pg)170 static inline void *page_to_virt(const struct page_info *pg)
171 {
172 return mfn_to_virt(mfn_x(page_to_mfn(pg)));
173 }
174
175 /* Convert between Xen-heap virtual addresses and page-info structures. */
virt_to_page(const void * v)176 static inline struct page_info *virt_to_page(const void *v)
177 {
178 unsigned long va = (unsigned long)v;
179
180 ASSERT((va >= DIRECTMAP_VIRT_START) && (va <= DIRECTMAP_VIRT_END));
181
182 return frametable_virt_start + PFN_DOWN(va - directmap_virt_start);
183 }
184
185 /*
186 * Common code requires get_page_type and put_page_type.
187 * We don't care about typecounts so we just do the minimum to make it
188 * happy.
189 */
get_page_type(struct page_info * page,unsigned long type)190 static inline int get_page_type(struct page_info *page, unsigned long type)
191 {
192 return 1;
193 }
194
put_page_type(struct page_info * page)195 static inline void put_page_type(struct page_info *page)
196 {
197 }
198
put_page_and_type(struct page_info * page)199 static inline void put_page_and_type(struct page_info *page)
200 {
201 put_page_type(page);
202 put_page(page);
203 }
204
205 /*
206 * RISC-V does not have an M2P, but common code expects a handful of
207 * M2P-related defines and functions. Provide dummy versions of these.
208 */
209 #define INVALID_M2P_ENTRY (~0UL)
210 #define SHARED_M2P_ENTRY (~0UL - 1UL)
211 #define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)
212
213 #define set_gpfn_from_mfn(mfn, pfn) do { (void)(mfn), (void)(pfn); } while (0)
214 #define mfn_to_gfn(d, mfn) ((void)(d), _gfn(mfn_x(mfn)))
215
216 #define PDX_GROUP_SHIFT (PAGE_SHIFT + VPN_BITS)
217
domain_get_maximum_gpfn(struct domain * d)218 static inline unsigned long domain_get_maximum_gpfn(struct domain *d)
219 {
220 BUG_ON("unimplemented");
221 return 0;
222 }
223
arch_memory_op(int op,XEN_GUEST_HANDLE_PARAM (void)arg)224 static inline long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
225 {
226 BUG_ON("unimplemented");
227 return 0;
228 }
229
230 /*
231 * On RISCV, all the RAM is currently direct mapped in Xen.
232 * Hence return always true.
233 */
arch_mfns_in_directmap(unsigned long mfn,unsigned long nr)234 static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
235 {
236 return true;
237 }
238
239 #define PG_shift(idx) (BITS_PER_LONG - (idx))
240 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
241
242 #define PGT_none PG_mask(0, 1) /* no special uses of this page */
243 #define PGT_writable_page PG_mask(1, 1) /* has writable mappings? */
244 #define PGT_type_mask PG_mask(1, 1) /* Bits 31 or 63. */
245
246 /* Count of uses of this frame as its current type. */
247 #define PGT_count_width PG_shift(2)
248 #define PGT_count_mask ((1UL << PGT_count_width) - 1)
249
250 /*
251 * Page needs to be scrubbed. Since this bit can only be set on a page that is
252 * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
253 */
254 #define _PGC_need_scrub _PGC_allocated
255 #define PGC_need_scrub PGC_allocated
256
257 /* Cleared when the owning guest 'frees' this page. */
258 #define _PGC_allocated PG_shift(1)
259 #define PGC_allocated PG_mask(1, 1)
260 /* Page is Xen heap? */
261 #define _PGC_xen_heap PG_shift(2)
262 #define PGC_xen_heap PG_mask(1, 2)
263 /* Page is broken? */
264 #define _PGC_broken PG_shift(7)
265 #define PGC_broken PG_mask(1, 7)
266 /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
267 #define PGC_state PG_mask(3, 9)
268 #define PGC_state_inuse PG_mask(0, 9)
269 #define PGC_state_offlining PG_mask(1, 9)
270 #define PGC_state_offlined PG_mask(2, 9)
271 #define PGC_state_free PG_mask(3, 9)
272 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
273
274 /* Count of references to this frame. */
275 #define PGC_count_width PG_shift(9)
276 #define PGC_count_mask ((1UL << PGC_count_width) - 1)
277
278 #define _PGC_extra PG_shift(10)
279 #define PGC_extra PG_mask(1, 10)
280
281 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
282 #define is_xen_heap_mfn(mfn) \
283 (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
284
285 #define is_xen_fixed_mfn(mfn) \
286 ((mfn_to_maddr(mfn) >= virt_to_maddr((vaddr_t)_start)) && \
287 (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1)))
288
289 #define page_get_owner(p) (p)->v.inuse.domain
290 #define page_set_owner(p, d) ((p)->v.inuse.domain = (d))
291
292 /* TODO: implement */
293 #define mfn_valid(mfn) ({ (void)(mfn); 0; })
294
295 #define domain_set_alloc_bitsize(d) ((void)(d))
296 #define domain_clamp_alloc_bitsize(d, b) ((void)(d), (b))
297
298 #define PFN_ORDER(pg) ((pg)->v.free.order)
299
300 extern unsigned char cpu0_boot_stack[];
301
302 void setup_initial_pagetables(void);
303
304 void enable_mmu(void);
305
306 void remove_identity_mapping(void);
307
308 unsigned long calc_phys_offset(void);
309
310 void turn_on_mmu(unsigned long ra);
311
arch_get_dma_bitsize(void)312 static inline unsigned int arch_get_dma_bitsize(void)
313 {
314 return 32; /* TODO */
315 }
316
317 void setup_fixmap_mappings(void);
318
319 void *early_fdt_map(paddr_t fdt_paddr);
320
321 #endif /* ASM__RISCV__MM_H */
322