1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 // Low-level page table manipulation routines.
6 //
7 // Functions in this interface generally should only be called by modules that
8 // are responsible for managing address spaces. Don't call them directly from
9 // more general modules (syscall handlers, etc).
10 //
11 // The map operation return errors in cases of inconsistent mappings where
12 // there are existing mappings present in the range or allocation failures.
13 // Unmap does not return error for non-existing mappings.
14 //
15 // The caller will generally need to operate on some higher-level model of the
16 // address space first, and hold locks on that model (possibly fine-grained)
17 // to prevent conflicting updates. The update operations defined here are not
18 // required to be thread-safe with respect to updates affecting overlapping
19 // address ranges.
20 // FIXME: The current implementation is not thread-safe and the caller must
21 // ensure that the address space being operated on is locked. This may possibly
22 // require different modules directly operating on a page-table to share a
23 // lock.
24 //
25 // A caller must always flag the start of a set of one or more map and unmap
26 // operations by calling the start function.  If synchronisation of updates
27 // with the page-table walkers (either locally or on other CPUs) can be
28 // deferred, then it will be deferred until a call is made to the corresponding
29 // commit function. The caller must always call the commit function before
30 // relying in any way on the updates having taken effect.
31 //
32 // In multi-processor systems, remote CPUs or IOMMU-protected devices using
33 // an affected address space might either continue to see the old mapping, or
34 // see a temporarily invalid mapping (which may extend outside the specified
35 // address range), especially if the mapping change has caused a page to
36 // change sizes. This will not occur for any memory access that the commit()
37 // call _inter-thread happens before_ (as defined by C18), or for any call
38 // to lookup() or lookup_range() that completes after an RCU grace period has
39 // elapsed after the commit function returns.
40 //
41 // In general, any function in this file that returns error_t or bool will
42 // validate its arguments and fail with an error code or false result if
43 // they are invalid. Any function that returns void will panic on invalid
44 // inputs.
45 
46 //
47 // Hypervisor page table management.
48 //
49 // In order to correctly attribute ownership of page table levels, the caller
50 // must avoid allocating page table levels in one partition if they might be
51 // subsequently freed into another partition. This can be done by selecting
52 // some allocation block size that mappings will never cross, and pre-
53 // allocating page table levels down to that block size from a global pool.
54 //
55 // Mappings with NONE access type may be used to indicate that the hypervisor
56 // should only be permitted to access the mapping on behalf of a VM, and will
57 // take specific action to enable and disable such accesses (e.g. clearing and
58 // setting PAN on ARMv8.1). Not all architectures support this; a different
59 // technique must be used for useraccess on those that do not.
60 //
61 
62 // Returns false if the specified address is unmapped.
63 bool
64 pgtable_hyp_lookup(uintptr_t virt, paddr_t *mapped_base, size_t *mapped_size,
65 		   pgtable_hyp_memtype_t *mapped_memtype,
66 		   pgtable_access_t	 *mapped_access);
67 
68 // Returns false if there is no mapping in the specified range. If a mapping
69 // is found and can be efficiently determined to be the last mapping in the
70 // range, the boolean *remainder_unmapped will be set to true; otherwise it
71 // will be unchanged. Note that the returned mapping may extend beyond the
72 // specified range.
73 bool
74 pgtable_hyp_lookup_range(uintptr_t virt_base, size_t virt_size,
75 			 uintptr_t *mapped_virt, paddr_t *mapped_phys,
76 			 size_t		       *mapped_size,
77 			 pgtable_hyp_memtype_t *mapped_memtype,
78 			 pgtable_access_t      *mapped_access,
79 			 bool		       *remainder_unmapped);
80 
81 // Creates page table levels owned by the given partition which are able to
82 // directly map entries covering the given size, but don't actually map
83 // anything. This is intended for preallocating levels using the hypervisor's
84 // private allocator, but might be more generally useful.
85 error_t
86 pgtable_hyp_preallocate(partition_t *partition, uintptr_t virt, size_t size);
87 
88 extern opaque_lock_t pgtable_hyp_map_lock;
89 
90 // Flag the start of one of more map or unmap calls.
91 void
92 pgtable_hyp_start(void) ACQUIRE_LOCK(pgtable_hyp_map_lock);
93 
94 // Creates a new mapping, possibly merging adjacent mappings into large blocks.
95 //
96 // An error will be returned if there are any existing mappings in the given
97 // region that are not exactly identical to the requested mapping.
98 //
99 // If merge_limit is nonzero, then this will attempt to merge page table levels
100 // that become congruent as a result of this operation into larger pages, as
101 // long as the new size is less than merge_limit. Any page table levels freed by
102 // this will be freed into the specified partition, so merge_limit should be no
103 // greater than preserved_prealloc would be for an unmap operation in the same
104 // region.
105 //
106 // Note that this operation may cause transient translation aborts or TLB
107 // conflict aborts in the affected range or within a merge_limit aligned region
108 // around it. The caller is responsible for not making calls with a nonzero
109 // merge_limit that might have those effects on the hypervisor code, the stack
110 // of any hypervisor thread, or any other address that may be touched during the
111 // handling of a transient hypervisor fault.
112 error_t
113 pgtable_hyp_map_merge(partition_t *partition, uintptr_t virt, size_t size,
114 		      paddr_t phys, pgtable_hyp_memtype_t memtype,
115 		      pgtable_access_t access, vmsa_shareability_t shareability,
116 		      size_t merge_limit) REQUIRE_LOCK(pgtable_hyp_map_lock);
117 
118 // Creates a new mapping. No attempt will be made to merge adjacent mappings.
119 static inline error_t
pgtable_hyp_map(partition_t * partition,uintptr_t virt,size_t size,paddr_t phys,pgtable_hyp_memtype_t memtype,pgtable_access_t access,vmsa_shareability_t shareability)120 pgtable_hyp_map(partition_t *partition, uintptr_t virt, size_t size,
121 		paddr_t phys, pgtable_hyp_memtype_t memtype,
122 		pgtable_access_t access, vmsa_shareability_t shareability)
123 	REQUIRE_LOCK(pgtable_hyp_map_lock)
124 {
125 	return pgtable_hyp_map_merge(partition, virt, size, phys, memtype,
126 				     access, shareability, 0U);
127 }
128 
129 // Creates a new mapping, replacing any existing mappings in the region, and
130 // possibly merging adjacent mappings into large blocks. The merge_limit
131 // argument has the same semantics as for @see pgtable_hyp_map_merge().
132 error_t
133 pgtable_hyp_remap_merge(partition_t *partition, uintptr_t virt, size_t size,
134 			paddr_t phys, pgtable_hyp_memtype_t memtype,
135 			pgtable_access_t    access,
136 			vmsa_shareability_t shareability, size_t merge_limit)
137 	REQUIRE_LOCK(pgtable_hyp_map_lock);
138 
139 // Creates a new mapping, replacing any existing mappings in the region.
140 static inline error_t
pgtable_hyp_remap(partition_t * partition,uintptr_t virt,size_t size,paddr_t phys,pgtable_hyp_memtype_t memtype,pgtable_access_t access,vmsa_shareability_t shareability)141 pgtable_hyp_remap(partition_t *partition, uintptr_t virt, size_t size,
142 		  paddr_t phys, pgtable_hyp_memtype_t memtype,
143 		  pgtable_access_t access, vmsa_shareability_t shareability)
144 	REQUIRE_LOCK(pgtable_hyp_map_lock)
145 {
146 	return pgtable_hyp_remap_merge(partition, virt, size, phys, memtype,
147 				       access, shareability, 0U);
148 }
149 
150 // Removes all mappings in the given range. Frees levels into the specified
151 // partition's allocators, but only if they cannot be used to create mappings
152 // of the size preserved_prealloc.  The preserved_prealloc field can therefore
153 // be used to prevent freeing of levels created by a previous hyp_preallocate
154 // call to the specified partition.
155 void
156 pgtable_hyp_unmap(partition_t *partition, uintptr_t virt, size_t size,
157 		  size_t preserved_prealloc) REQUIRE_LOCK(pgtable_hyp_map_lock);
158 #define PGTABLE_HYP_UNMAP_PRESERVE_ALL	0U
159 #define PGTABLE_HYP_UNMAP_PRESERVE_NONE util_bit((sizeof(uintptr_t) * 8U) - 1U)
160 
161 // Ensure that all previous hypervisor map and unmap calls are complete.
162 void
163 pgtable_hyp_commit(void) RELEASE_LOCK(pgtable_hyp_map_lock);
164 
165 //
166 // VM page table management.
167 //
168 // VM page tables don't have the same constraints for level preallocation &
169 // freeing because they are always entirely owned by one partition.
170 //
171 error_t
172 pgtable_vm_init(partition_t *partition, pgtable_vm_t *pgtable, vmid_t vmid);
173 
174 // Free all resources for page table
175 void
176 pgtable_vm_destroy(partition_t *partition, pgtable_vm_t *pgtable);
177 
178 // Returns false if the specified address is unmapped.
179 bool
180 pgtable_vm_lookup(pgtable_vm_t *pgtable, vmaddr_t virt, paddr_t *mapped_base,
181 		  size_t *mapped_size, pgtable_vm_memtype_t *mapped_memtype,
182 		  pgtable_access_t *mapped_vm_kernel_access,
183 		  pgtable_access_t *mapped_vm_user_access);
184 
185 // Returns false if there is no mapping in the specified range. If a mapping
186 // is found and can be efficiently determined to be the last mapping in the
187 // range, the boolean *remainder_unmapped will be set to true; otherwise it
188 // will be unchanged. Note that the returned mapping may extend beyond the
189 // specified range.
190 bool
191 pgtable_vm_lookup_range(pgtable_vm_t *pgtable, vmaddr_t virt_base,
192 			size_t virt_size, vmaddr_t *mapped_virt,
193 			paddr_t *mapped_phys, size_t *mapped_size,
194 			pgtable_vm_memtype_t *mapped_memtype,
195 			pgtable_access_t     *mapped_vm_kernel_access,
196 			pgtable_access_t     *mapped_vm_user_access,
197 			bool		     *remainder_unmapped);
198 
199 extern opaque_lock_t pgtable_vm_map_lock;
200 
201 // Flag the start of one of more map or unmap calls.
202 void
203 pgtable_vm_start(pgtable_vm_t *pgtable) ACQUIRE_LOCK(pgtable)
204 	ACQUIRE_LOCK(pgtable_vm_map_lock);
205 
206 // Creates a new mapping.
207 //
208 // If try_map is true, it returns an error if any existing mappings are present
209 // in the range that are not exactly identical to the requested mapping. If
210 // try_map is false, any existing mappings in the specified range are removed or
211 // updated.
212 //
213 // If allow_merge is true, then any page table levels that become congruent as a
214 // result of this operation will be merged into larger pages.
215 //
216 // pgtable_vm_start() must have been called before this call.
217 error_t
218 pgtable_vm_map(partition_t *partition, pgtable_vm_t *pgtable, vmaddr_t virt,
219 	       size_t size, paddr_t phys, pgtable_vm_memtype_t memtype,
220 	       pgtable_access_t vm_kernel_access,
221 	       pgtable_access_t vm_user_access, bool try_map, bool allow_merge)
222 	REQUIRE_LOCK(pgtable) REQUIRE_LOCK(pgtable_vm_map_lock);
223 
224 // Removes all mappings in the given range. pgtable_vm_start() must have been
225 // called before this call.
226 void
227 pgtable_vm_unmap(partition_t *partition, pgtable_vm_t *pgtable, vmaddr_t virt,
228 		 size_t size) REQUIRE_LOCK(pgtable)
229 	REQUIRE_LOCK(pgtable_vm_map_lock);
230 
231 // Remove only mappings that match the physical address within the specified
232 // range
233 void
234 pgtable_vm_unmap_matching(partition_t *partition, pgtable_vm_t *pgtable,
235 			  vmaddr_t virt, paddr_t phys, size_t size)
236 	REQUIRE_LOCK(pgtable) REQUIRE_LOCK(pgtable_vm_map_lock);
237 
238 // Ensure that all previous VM map and unmap calls are complete.
239 void
240 pgtable_vm_commit(pgtable_vm_t *pgtable) RELEASE_LOCK(pgtable)
241 	RELEASE_LOCK(pgtable_vm_map_lock);
242 
243 // Set VTCR and VTTBR registers with page table vtcr and vttbr bitfields values.
244 void
245 pgtable_vm_load_regs(pgtable_vm_t *vm_pgtable);
246 
247 // Validate page table access
248 bool
249 pgtable_access_check(pgtable_access_t access, pgtable_access_t access_check);
250 
251 // Mask a pagetable access
252 pgtable_access_t
253 pgtable_access_mask(pgtable_access_t access, pgtable_access_t access_mask);
254 
255 // Check if input page table accesses are equal
256 bool
257 pgtable_access_is_equal(pgtable_access_t access, pgtable_access_t access_check);
258 
259 // Get combined access
260 pgtable_access_t
261 pgtable_access_combine(pgtable_access_t access1, pgtable_access_t access2);
262