1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _ASM_PPC_MM_H
3 #define _ASM_PPC_MM_H
4 
5 #include <public/xen.h>
6 #include <xen/pdx.h>
7 #include <xen/types.h>
8 #include <asm/config.h>
9 #include <asm/page-bits.h>
10 #include <asm/page.h>
11 
12 void setup_initial_pagetables(void);
13 
14 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
15 #define paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
16 #define gfn_to_gaddr(gfn) pfn_to_paddr(gfn_x(gfn))
17 #define gaddr_to_gfn(ga)  _gfn(paddr_to_pfn(ga))
18 #define mfn_to_maddr(mfn) pfn_to_paddr(mfn_x(mfn))
19 #define maddr_to_mfn(ma)  _mfn(paddr_to_pfn(ma))
20 #define vmap_to_mfn(va)   maddr_to_mfn(virt_to_maddr((vaddr_t)va))
21 #define vmap_to_page(va)  mfn_to_page(vmap_to_mfn(va))
22 
23 #define virt_to_maddr(va) ((paddr_t)((vaddr_t)(va) & PADDR_MASK))
24 #define maddr_to_virt(pa) ((void *)((paddr_t)(pa) | XEN_VIRT_START))
25 
26 /* Convert between Xen-heap virtual addresses and machine addresses. */
27 #define __pa(x)             (virt_to_maddr(x))
28 #define __va(x)             (maddr_to_virt(x))
29 
30 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
31 #define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
32 #define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
33 
34 /* Convert between Xen-heap virtual addresses and page-info structures. */
virt_to_page(const void * v)35 static inline struct page_info *virt_to_page(const void *v)
36 {
37     BUG_ON("unimplemented");
38     return NULL;
39 }
40 
41 #define virt_to_mfn(va)     __virt_to_mfn(va)
42 #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
43 
44 #define PG_shift(idx)   (BITS_PER_LONG - (idx))
45 #define PG_mask(x, idx) (x ## UL << PG_shift(idx))
46 
47 #define PGT_none          PG_mask(0, 1)  /* no special uses of this page   */
48 #define PGT_writable_page PG_mask(1, 1)  /* has writable mappings?         */
49 #define PGT_type_mask     PG_mask(1, 1)  /* Bits 31 or 63.                 */
50 
51  /* 2-bit count of uses of this frame as its current type. */
52 #define PGT_count_mask    PG_mask(3, 3)
53 
54 /* Cleared when the owning guest 'frees' this page. */
55 #define _PGC_allocated    PG_shift(1)
56 #define PGC_allocated     PG_mask(1, 1)
57 /* Page is Xen heap? */
58 #define _PGC_xen_heap     PG_shift(2)
59 #define PGC_xen_heap      PG_mask(1, 2)
60 /* Page is broken? */
61 #define _PGC_broken       PG_shift(7)
62 #define PGC_broken        PG_mask(1, 7)
63  /* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
64 #define PGC_state         PG_mask(3, 9)
65 #define PGC_state_inuse   PG_mask(0, 9)
66 #define PGC_state_offlining PG_mask(1, 9)
67 #define PGC_state_offlined PG_mask(2, 9)
68 #define PGC_state_free    PG_mask(3, 9)
69 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
70 /* Page is not reference counted */
71 #define _PGC_extra        PG_shift(10)
72 #define PGC_extra         PG_mask(1, 10)
73 
74 /* Count of references to this frame. */
75 #define PGC_count_width   PG_shift(10)
76 #define PGC_count_mask    ((1UL<<PGC_count_width)-1)
77 
78 /*
79  * Page needs to be scrubbed. Since this bit can only be set on a page that is
80  * free (i.e. in PGC_state_free) we can reuse PGC_allocated bit.
81  */
82 #define _PGC_need_scrub   _PGC_allocated
83 #define PGC_need_scrub    PGC_allocated
84 
85 #define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
86 #define is_xen_heap_mfn(mfn) \
87     (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
88 
89 #define is_xen_fixed_mfn(mfn)                                   \
90     ((mfn_to_maddr(mfn) >= virt_to_maddr((vaddr_t)_start)) &&           \
91      (mfn_to_maddr(mfn) <= virt_to_maddr((vaddr_t)_end - 1)))
92 
93 #define page_get_owner(_p)    (_p)->v.inuse.domain
94 #define page_set_owner(_p,_d) ((_p)->v.inuse.domain = (_d))
95 
96 /* TODO: implement */
97 #define mfn_valid(mfn) ({ (void) (mfn); 0; })
98 
99 #define domain_set_alloc_bitsize(d) ((void)(d))
100 #define domain_clamp_alloc_bitsize(d, b) (b)
101 
102 #define PFN_ORDER(pfn_) ((pfn_)->v.free.order)
103 
104 struct page_info
105 {
106     /* Each frame can be threaded onto a doubly-linked list. */
107     struct page_list_entry list;
108 
109     /* Reference count and various PGC_xxx flags and fields. */
110     unsigned long count_info;
111 
112     /* Context-dependent fields follow... */
113     union {
114         /* Page is in use: ((count_info & PGC_count_mask) != 0). */
115         struct {
116             /* Type reference count and various PGT_xxx flags and fields. */
117             unsigned long type_info;
118         } inuse;
119         /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
120         union {
121             struct {
122                 /*
123                  * Index of the first *possibly* unscrubbed page in the buddy.
124                  * One more bit than maximum possible order to accommodate
125                  * INVALID_DIRTY_IDX.
126                  */
127 #define INVALID_DIRTY_IDX ((1UL << (MAX_ORDER + 1)) - 1)
128                 unsigned long first_dirty:MAX_ORDER + 1;
129 
130                 /* Do TLBs need flushing for safety before next page use? */
131                 bool need_tlbflush:1;
132 
133 #define BUDDY_NOT_SCRUBBING    0
134 #define BUDDY_SCRUBBING        1
135 #define BUDDY_SCRUB_ABORT      2
136                 unsigned long scrub_state:2;
137             };
138 
139             unsigned long val;
140         } free;
141 
142     } u;
143 
144     union {
145         /* Page is in use, but not as a shadow. */
146         struct {
147             /* Owner of this page (NULL if page is anonymous). */
148             struct domain *domain;
149         } inuse;
150 
151         /* Page is on a free list. */
152         struct {
153             /* Order-size of the free chunk this page is the head of. */
154             unsigned int order;
155         } free;
156 
157     } v;
158 
159     union {
160         /*
161          * Timestamp from 'TLB clock', used to avoid extra safety flushes.
162          * Only valid for: a) free pages, and b) pages with zero type count
163          */
164         uint32_t tlbflush_timestamp;
165     };
166     uint64_t pad;
167 };
168 
169 
170 #define FRAMETABLE_VIRT_START  (XEN_VIRT_START + GB(32))
171 #define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
172 
173 /* PDX of the first page in the frame table. */
174 extern unsigned long frametable_base_pdx;
175 
176 /* Convert between machine frame numbers and page-info structures. */
177 #define mfn_to_page(mfn)                                            \
178     (frame_table + (mfn_to_pdx(mfn) - frametable_base_pdx))
179 #define page_to_mfn(pg)                                             \
180     pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
181 
page_to_virt(const struct page_info * pg)182 static inline void *page_to_virt(const struct page_info *pg)
183 {
184     return mfn_to_virt(mfn_x(page_to_mfn(pg)));
185 }
186 
187 /*
188  * Common code requires get_page_type and put_page_type.
189  * We don't care about typecounts so we just do the minimum to make it
190  * happy.
191  */
get_page_type(struct page_info * page,unsigned long type)192 static inline int get_page_type(struct page_info *page, unsigned long type)
193 {
194     return 1;
195 }
196 
put_page_type(struct page_info * page)197 static inline void put_page_type(struct page_info *page)
198 {
199     return;
200 }
201 
202 /* TODO */
get_page_nr(struct page_info * page,const struct domain * domain,unsigned long nr)203 static inline bool get_page_nr(struct page_info *page, const struct domain *domain,
204                         unsigned long nr)
205 {
206     BUG_ON("unimplemented");
207 }
put_page_nr(struct page_info * page,unsigned long nr)208 static inline void put_page_nr(struct page_info *page, unsigned long nr)
209 {
210     BUG_ON("unimplemented");
211 }
212 
put_page_and_type(struct page_info * page)213 static inline void put_page_and_type(struct page_info *page)
214 {
215     put_page_type(page);
216     put_page(page);
217 }
218 
219 /*
220  * PPC does not have an M2P, but common code expects a handful of
221  * M2P-related defines and functions. Provide dummy versions of these.
222  */
223 #define INVALID_M2P_ENTRY        (~0UL)
224 #define SHARED_M2P_ENTRY         (~0UL - 1UL)
225 #define SHARED_M2P(_e)           ((_e) == SHARED_M2P_ENTRY)
226 
227 #define set_gpfn_from_mfn(mfn, pfn) BUG_ON("unimplemented")
228 #define mfn_to_gfn(d, mfn) ({ BUG_ON("unimplemented"); _gfn(0); })
229 
230 #define PDX_GROUP_SHIFT XEN_PT_SHIFT_LVL_3
231 
domain_get_maximum_gpfn(struct domain * d)232 static inline unsigned long domain_get_maximum_gpfn(struct domain *d)
233 {
234     BUG_ON("unimplemented");
235     return 0;
236 }
237 
arch_memory_op(int op,XEN_GUEST_HANDLE_PARAM (void)arg)238 static inline long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
239 {
240     BUG_ON("unimplemented");
241     return 0;
242 }
243 
arch_get_dma_bitsize(void)244 static inline unsigned int arch_get_dma_bitsize(void)
245 {
246     return 32; /* TODO */
247 }
248 
249 /*
250  * On PPC, all the RAM is currently direct mapped in Xen.
251  * Hence return always true.
252  */
arch_mfns_in_directmap(unsigned long mfn,unsigned long nr)253 static inline bool arch_mfns_in_directmap(unsigned long mfn, unsigned long nr)
254 {
255     return true;
256 }
257 
258 #endif /* _ASM_PPC_MM_H */
259