1 #ifndef __ASM_PPC_P2M_H__
2 #define __ASM_PPC_P2M_H__
3
4 #include <asm/page-bits.h>
5
6 #define paddr_bits PADDR_BITS
7
8 /*
9 * List of possible type for each page in the p2m entry.
10 * The number of available bit per page in the pte for this purpose is 4 bits.
11 * So it's possible to only have 16 fields. If we run out of value in the
12 * future, it's possible to use higher value for pseudo-type and don't store
13 * them in the p2m entry.
14 */
15 typedef enum {
16 p2m_invalid = 0, /* Nothing mapped here */
17 p2m_ram_rw, /* Normal read/write guest RAM */
18 p2m_ram_ro, /* Read-only; writes are silently dropped */
19 p2m_max_real_type, /* Types after this won't be store in the p2m */
20 } p2m_type_t;
21
22 #include <xen/p2m-common.h>
23
get_page_and_type(struct page_info * page,struct domain * domain,unsigned long type)24 static inline int get_page_and_type(struct page_info *page,
25 struct domain *domain,
26 unsigned long type)
27 {
28 BUG_ON("unimplemented");
29 return 1;
30 }
31
32 /* Look up a GFN and take a reference count on the backing page. */
33 typedef unsigned int p2m_query_t;
34 #define P2M_ALLOC (1u<<0) /* Populate PoD and paged-out entries */
35 #define P2M_UNSHARE (1u<<1) /* Break CoW sharing */
36
get_page_from_gfn(struct domain * d,unsigned long gfn,p2m_type_t * t,p2m_query_t q)37 static inline struct page_info *get_page_from_gfn(
38 struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
39 {
40 BUG_ON("unimplemented");
41 return NULL;
42 }
43
memory_type_changed(struct domain * d)44 static inline void memory_type_changed(struct domain *d)
45 {
46 BUG_ON("unimplemented");
47 }
48
49
guest_physmap_mark_populate_on_demand(struct domain * d,unsigned long gfn,unsigned int order)50 static inline int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
51 unsigned int order)
52 {
53 BUG_ON("unimplemented");
54 return 1;
55 }
56
guest_physmap_add_entry(struct domain * d,gfn_t gfn,mfn_t mfn,unsigned long page_order,p2m_type_t t)57 static inline int guest_physmap_add_entry(struct domain *d,
58 gfn_t gfn,
59 mfn_t mfn,
60 unsigned long page_order,
61 p2m_type_t t)
62 {
63 BUG_ON("unimplemented");
64 return 1;
65 }
66
67 /* Untyped version for RAM only, for compatibility */
68 static inline int __must_check
guest_physmap_add_page(struct domain * d,gfn_t gfn,mfn_t mfn,unsigned int page_order)69 guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
70 unsigned int page_order)
71 {
72 return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
73 }
74
gfn_to_mfn(struct domain * d,gfn_t gfn)75 static inline mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
76 {
77 BUG_ON("unimplemented");
78 return _mfn(0);
79 }
80
arch_acquire_resource_check(struct domain * d)81 static inline bool arch_acquire_resource_check(struct domain *d)
82 {
83 /*
84 * Requires refcounting the foreign mappings and walking the p2m on
85 * teardown in order to remove foreign pages from the p2m and drop the
86 * extra reference counts.
87 */
88 return false;
89 }
90
p2m_altp2m_check(struct vcpu * v,uint16_t idx)91 static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
92 {
93 /* Not supported on PPC. */
94 }
95
96 #endif /* __ASM_PPC_P2M_H__ */
97