1 /*
2  * Interface to map physical memory onto contiguous virtual memory areas.
3  *
4  * Two ranges of linear address space are reserved for this purpose: A general
5  * purpose area (VMAP_DEFAULT) and a livepatch-specific area (VMAP_XEN). The
6  * latter is used when loading livepatches and the former for everything else.
7  */
8 #ifndef __XEN_VMAP_H__
9 #define __XEN_VMAP_H__
10 
11 #include <xen/mm-frame.h>
12 #include <xen/mm-types.h>
13 #include <xen/page-size.h>
14 
15 /* Identifiers for the linear ranges tracked by vmap */
16 enum vmap_region {
17     /*
18      * Region used for general purpose RW mappings. Mapping/allocating memory
19      * here can induce extra allocations for the supporting page tables.
20      */
21     VMAP_DEFAULT,
22     /*
23      * Region used for loading livepatches. Can't use VMAP_DEFAULT because it
24      * must live close to the running Xen image. The caller also ensures all
25      * page tables are already in place with adequate PTE flags.
26      */
27     VMAP_XEN,
28     /* Sentinel value for bounds checking */
29     VMAP_REGION_NR,
30 };
31 
32 /*
33  * Runtime initialiser for each vmap region type
34  *
35  * Must only be called once per vmap region type.
36  *
37  * @param type  Designation of the region to initialise.
38  * @param start Start address of the `type` region.
39  * @param end   End address (not inclusive) of the `type` region
40  */
41 void vm_init_type(enum vmap_region type, void *start, void *end);
42 
43 /*
44  * Maps a set of physical ranges onto a single virtual range
45  *
46  * `mfn` is an array of `nr` physical ranges, each of which is `granularity`
47  * pages wide. `type` defines which vmap region to use for the mapping and
48  * `flags` is the PTE flags the page table leaves are meant to have.
49  *
50  * Typically used via the vmap() and vmap_contig() helpers.
51  *
52  * @param mfn          Array of mfns
53  * @param granularity  Number of contiguous pages each mfn represents
54  * @param nr           Number of mfns in the `mfn` array
55  * @param align        Alignment of the virtual area to map
56  * @param flags        PTE flags for the leaves of the PT tree.
57  * @param type         Which region to create the mappings on
58  * @return Pointer to the mapped area on success; NULL otherwise.
59  */
60 void *__vmap(const mfn_t *mfn, unsigned int granularity, unsigned int nr,
61              unsigned int align, pte_attr_t flags, enum vmap_region type);
62 
63 /*
64  * Map an array of pages contiguously into the VMAP_DEFAULT vmap region
65  *
66  * @param[in] mfn Pointer to the base of an array of mfns
67  * @param[in] nr  Number of mfns in the array
68  * @return Pointer to the mapped area on success; NULL otherwise.
69  */
70 void *vmap(const mfn_t *mfn, unsigned int nr);
71 
72 /*
73  * Maps physically contiguous pages onto the VMAP_DEFAULT vmap region
74  *
75  * @param mfn Base mfn of the physical region
76  * @param nr  Number of mfns in the physical region
77  * @return Pointer to the mapped area on success; NULL otherwise.
78  */
79 void *vmap_contig(mfn_t mfn, unsigned int nr);
80 
81 /*
82  * Unmaps a range of virtually contiguous memory from one of the vmap regions
83  *
84  * The system remembers internally how wide the mapping is and unmaps it all.
85  * It also can determine the vmap region type from the `va`.
86  *
87  * @param va Virtual base address of the range to unmap
88  */
89 void vunmap(const void *va);
90 
91 /*
92  * Allocate `size` octets of possibly non-contiguous physical memory and map
93  * them contiguously in the VMAP_DEFAULT vmap region
94  *
95  * @param size Pointer to the base of an array of mfns
96  * @return Pointer to the mapped area on success; NULL otherwise.
97  */
98 void *vmalloc(size_t size);
99 
100 /* Same as vmalloc(), but for the VMAP_XEN vmap region. */
101 void *vmalloc_xen(size_t size);
102 
103 /* Same as vmalloc(), but set the contents to zero before returning */
104 void *vzalloc(size_t size);
105 
106 /*
107  * Unmap and free memory from vmalloc(), vmalloc_xen() or vzalloc()
108  *
109  * The system remembers internally how wide the allocation is and
110  * unmaps/frees it all.
111  *
112  * @param va Virtual base address of the range to free and unmap
113  */
114 void vfree(void *va);
115 
116 /*
117  * Analogous to vmap_contig(), but for IO memory
118  *
119  * Unlike vmap_contig(), it ensures architecturally correct cacheability
120  * settings are set for the mapped IO memory.
121  *
122  * @param pa  Physical base address of the MMIO region.
123  * @param len Length of the MMIO region in octets.
124  * @return Pointer to the mapped area on success; NULL otherwise.
125  */
126 void __iomem *ioremap(paddr_t pa, size_t len);
127 
128 /* Return the number of pages in the mapping starting at address 'va' */
129 unsigned int vmap_size(const void *va);
130 
131 /* Analogous to vunmap(), but for IO memory mapped via ioremap() */
iounmap(void __iomem * va)132 static inline void iounmap(void __iomem *va)
133 {
134     unsigned long addr = (unsigned long)(void __force *)va;
135 
136     vunmap((void *)(addr & PAGE_MASK));
137 }
138 
139 /* Pointer to 1 octet past the end of the VMAP_DEFAULT virtual area */
140 void *arch_vmap_virt_end(void);
141 
142 /* Initialises the VMAP_DEFAULT virtual range */
vm_init(void)143 static inline void vm_init(void)
144 {
145 #ifdef CONFIG_HAS_VMAP
146     vm_init_type(VMAP_DEFAULT, (void *)VMAP_VIRT_START, arch_vmap_virt_end());
147 #endif
148 }
149 
150 #endif /* __XEN_VMAP_H__ */
151