1 /*
2 * Interface to map physical memory onto contiguous virtual memory areas.
3 *
4 * Two ranges of linear address space are reserved for this purpose: A general
5 * purpose area (VMAP_DEFAULT) and a livepatch-specific area (VMAP_XEN). The
6 * latter is used when loading livepatches and the former for everything else.
7 */
8 #if !defined(__XEN_VMAP_H__) && defined(VMAP_VIRT_START)
9 #define __XEN_VMAP_H__
10
11 #include <xen/mm-frame.h>
12 #include <xen/page-size.h>
13
14 /* Identifiers for the linear ranges tracked by vmap */
15 enum vmap_region {
16 /*
17 * Region used for general purpose RW mappings. Mapping/allocating memory
18 * here can induce extra allocations for the supporting page tables.
19 */
20 VMAP_DEFAULT,
21 /*
22 * Region used for loading livepatches. Can't use VMAP_DEFAULT because it
23 * must live close to the running Xen image. The caller also ensures all
24 * page tables are already in place with adequate PTE flags.
25 */
26 VMAP_XEN,
27 /* Sentinel value for bounds checking */
28 VMAP_REGION_NR,
29 };
30
31 /*
32 * Runtime initialiser for each vmap region type
33 *
34 * Must only be called once per vmap region type.
35 *
36 * @param type Designation of the region to initialise.
37 * @param start Start address of the `type` region.
38 * @param end End address (not inclusive) of the `type` region
39 */
40 void vm_init_type(enum vmap_region type, void *start, void *end);
41
42 /*
43 * Maps a set of physical ranges onto a single virtual range
44 *
45 * `mfn` is an array of `nr` physical ranges, each of which is `granularity`
46 * pages wide. `type` defines which vmap region to use for the mapping and
47 * `flags` is the PTE flags the page table leaves are meant to have.
48 *
49 * Typically used via the vmap() and vmap_contig() helpers.
50 *
51 * @param mfn Array of mfns
52 * @param granularity Number of contiguous pages each mfn represents
53 * @param nr Number of mfns in the `mfn` array
54 * @param align Alignment of the virtual area to map
55 * @param flags PTE flags for the leaves of the PT tree.
56 * @param type Which region to create the mappings on
57 * @return Pointer to the mapped area on success; NULL otherwise.
58 */
59 void *__vmap(const mfn_t *mfn, unsigned int granularity, unsigned int nr,
60 unsigned int align, unsigned int flags, enum vmap_region type);
61
62 /*
63 * Map an array of pages contiguously into the VMAP_DEFAULT vmap region
64 *
65 * @param[in] mfn Pointer to the base of an array of mfns
66 * @param[in] nr Number of mfns in the array
67 * @return Pointer to the mapped area on success; NULL otherwise.
68 */
69 void *vmap(const mfn_t *mfn, unsigned int nr);
70
71 /*
72 * Maps physically contiguous pages onto the VMAP_DEFAULT vmap region
73 *
74 * @param mfn Base mfn of the physical region
75 * @param nr Number of mfns in the physical region
76 * @return Pointer to the mapped area on success; NULL otherwise.
77 */
78 void *vmap_contig(mfn_t mfn, unsigned int nr);
79
80 /*
81 * Unmaps a range of virtually contiguous memory from one of the vmap regions
82 *
83 * The system remembers internally how wide the mapping is and unmaps it all.
84 * It also can determine the vmap region type from the `va`.
85 *
86 * @param va Virtual base address of the range to unmap
87 */
88 void vunmap(const void *va);
89
90 /*
91 * Allocate `size` octets of possibly non-contiguous physical memory and map
92 * them contiguously in the VMAP_DEFAULT vmap region
93 *
94 * @param size Pointer to the base of an array of mfns
95 * @return Pointer to the mapped area on success; NULL otherwise.
96 */
97 void *vmalloc(size_t size);
98
99 /* Same as vmalloc(), but for the VMAP_XEN vmap region. */
100 void *vmalloc_xen(size_t size);
101
102 /* Same as vmalloc(), but set the contents to zero before returning */
103 void *vzalloc(size_t size);
104
105 /*
106 * Unmap and free memory from vmalloc(), vmalloc_xen() or vzalloc()
107 *
108 * The system remembers internally how wide the allocation is and
109 * unmaps/frees it all.
110 *
111 * @param va Virtual base address of the range to free and unmap
112 */
113 void vfree(void *va);
114
115 /*
116 * Analogous to vmap_contig(), but for IO memory
117 *
118 * Unlike vmap_contig(), it ensures architecturally correct cacheability
119 * settings are set for the mapped IO memory.
120 *
121 * @param pa Physical base address of the MMIO region.
122 * @param len Length of the MMIO region in octets.
123 * @return Pointer to the mapped area on success; NULL otherwise.
124 */
125 void __iomem *ioremap(paddr_t pa, size_t len);
126
127 /* Return the number of pages in the mapping starting at address 'va' */
128 unsigned int vmap_size(const void *va);
129
130 /* Analogous to vunmap(), but for IO memory mapped via ioremap() */
iounmap(void __iomem * va)131 static inline void iounmap(void __iomem *va)
132 {
133 unsigned long addr = (unsigned long)(void __force *)va;
134
135 vunmap((void *)(addr & PAGE_MASK));
136 }
137
138 /* Pointer to 1 octet past the end of the VMAP_DEFAULT virtual area */
139 void *arch_vmap_virt_end(void);
140
141 /* Initialises the VMAP_DEFAULT virtual range */
vm_init(void)142 static inline void vm_init(void)
143 {
144 vm_init_type(VMAP_DEFAULT, (void *)VMAP_VIRT_START, arch_vmap_virt_end());
145 }
146
147 #endif /* __XEN_VMAP_H__ */
148