1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3 * Copyright (c) 2016, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6 #ifndef CORE_MMU_H
7 #define CORE_MMU_H
8
9 #ifndef __ASSEMBLER__
10 #include <assert.h>
11 #include <compiler.h>
12 #include <kernel/user_ta.h>
13 #include <mm/tee_mmu_types.h>
14 #include <types_ext.h>
15 #include <util.h>
16 #endif
17
18 #include <mm/core_mmu_arch.h>
19 #include <platform_config.h>
20
21 /* A small page is the smallest unit of memory that can be mapped */
22 #define SMALL_PAGE_SIZE BIT(SMALL_PAGE_SHIFT)
23 #define SMALL_PAGE_MASK ((paddr_t)SMALL_PAGE_SIZE - 1)
24
25 /*
26 * PGDIR is the translation table above the translation table that holds
27 * the pages.
28 */
29 #define CORE_MMU_PGDIR_SIZE BIT(CORE_MMU_PGDIR_SHIFT)
30 #define CORE_MMU_PGDIR_MASK ((paddr_t)CORE_MMU_PGDIR_SIZE - 1)
31
32 /* TA user space code, data, stack and heap are mapped using this granularity */
33 #define CORE_MMU_USER_CODE_SIZE BIT(CORE_MMU_USER_CODE_SHIFT)
34 #define CORE_MMU_USER_CODE_MASK ((paddr_t)CORE_MMU_USER_CODE_SIZE - 1)
35
36 /* TA user space parameters are mapped using this granularity */
37 #define CORE_MMU_USER_PARAM_SIZE BIT(CORE_MMU_USER_PARAM_SHIFT)
38 #define CORE_MMU_USER_PARAM_MASK ((paddr_t)CORE_MMU_USER_PARAM_SIZE - 1)
39
40 /*
41 * Identify mapping constraint: virtual base address is the physical start addr.
42 * If platform did not set some macros, some get default value.
43 */
44 #ifndef TEE_RAM_VA_SIZE
45 #define TEE_RAM_VA_SIZE CORE_MMU_PGDIR_SIZE
46 #endif
47
48 #ifndef TEE_LOAD_ADDR
49 #define TEE_LOAD_ADDR TEE_RAM_START
50 #endif
51
52 #define TEE_RAM_VA_START TEE_RAM_START
53 #define TEE_TEXT_VA_START (TEE_RAM_VA_START + \
54 (TEE_LOAD_ADDR - TEE_RAM_START))
55
56 #ifndef STACK_ALIGNMENT
57 #define STACK_ALIGNMENT (sizeof(long) * U(2))
58 #endif
59
60 #ifndef __ASSEMBLER__
61 /*
62 * Memory area type:
63 * MEM_AREA_END: Reserved, marks the end of a table of mapping areas.
64 * MEM_AREA_TEE_RAM: core RAM (read/write/executable, secure, reserved to TEE)
65 * MEM_AREA_TEE_RAM_RX: core private read-only/executable memory (secure)
66 * MEM_AREA_TEE_RAM_RO: core private read-only/non-executable memory (secure)
67 * MEM_AREA_TEE_RAM_RW: core private read/write/non-executable memory (secure)
68 * MEM_AREA_INIT_RAM_RO: init private read-only/non-executable memory (secure)
69 * MEM_AREA_INIT_RAM_RX: init private read-only/executable memory (secure)
70 * MEM_AREA_NEX_RAM_RO: nexus private read-only/non-executable memory (secure)
71 * MEM_AREA_NEX_RAM_RW: nexus private r/w/non-executable memory (secure)
72 * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE)
73 * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE)
74 * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure)
75 * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances.
76 * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE.
77 * MEM_AREA_RAM_NSEC: NonSecure RAM storing data
78 * MEM_AREA_RAM_SEC: Secure RAM storing some secrets
79 * MEM_AREA_IO_NSEC: NonSecure HW mapped registers
80 * MEM_AREA_IO_SEC: Secure HW mapped registers
81 * MEM_AREA_EXT_DT: Memory loads external device tree
82 * MEM_AREA_RES_VASPACE: Reserved virtual memory space
83 * MEM_AREA_SHM_VASPACE: Virtual memory space for dynamic shared memory buffers
84 * MEM_AREA_TS_VASPACE: TS va space, only used with phys_to_virt()
85 * MEM_AREA_DDR_OVERALL: Overall DDR address range, candidate to dynamic shm.
86 * MEM_AREA_SEC_RAM_OVERALL: Whole secure RAM
87 * MEM_AREA_MAXTYPE: lower invalid 'type' value
88 */
89 enum teecore_memtypes {
90 MEM_AREA_END = 0,
91 MEM_AREA_TEE_RAM,
92 MEM_AREA_TEE_RAM_RX,
93 MEM_AREA_TEE_RAM_RO,
94 MEM_AREA_TEE_RAM_RW,
95 MEM_AREA_INIT_RAM_RO,
96 MEM_AREA_INIT_RAM_RX,
97 MEM_AREA_NEX_RAM_RO,
98 MEM_AREA_NEX_RAM_RW,
99 MEM_AREA_TEE_COHERENT,
100 MEM_AREA_TEE_ASAN,
101 MEM_AREA_IDENTITY_MAP_RX,
102 MEM_AREA_TA_RAM,
103 MEM_AREA_NSEC_SHM,
104 MEM_AREA_RAM_NSEC,
105 MEM_AREA_RAM_SEC,
106 MEM_AREA_IO_NSEC,
107 MEM_AREA_IO_SEC,
108 MEM_AREA_EXT_DT,
109 MEM_AREA_RES_VASPACE,
110 MEM_AREA_SHM_VASPACE,
111 MEM_AREA_TS_VASPACE,
112 MEM_AREA_PAGER_VASPACE,
113 MEM_AREA_SDP_MEM,
114 MEM_AREA_DDR_OVERALL,
115 MEM_AREA_SEC_RAM_OVERALL,
116 MEM_AREA_MAXTYPE
117 };
118
teecore_memtype_name(enum teecore_memtypes type)119 static inline const char *teecore_memtype_name(enum teecore_memtypes type)
120 {
121 static const char * const names[] = {
122 [MEM_AREA_END] = "END",
123 [MEM_AREA_TEE_RAM] = "TEE_RAM_RWX",
124 [MEM_AREA_TEE_RAM_RX] = "TEE_RAM_RX",
125 [MEM_AREA_TEE_RAM_RO] = "TEE_RAM_RO",
126 [MEM_AREA_TEE_RAM_RW] = "TEE_RAM_RW",
127 [MEM_AREA_INIT_RAM_RO] = "INIT_RAM_RO",
128 [MEM_AREA_INIT_RAM_RX] = "INIT_RAM_RX",
129 [MEM_AREA_NEX_RAM_RO] = "NEX_RAM_RO",
130 [MEM_AREA_NEX_RAM_RW] = "NEX_RAM_RW",
131 [MEM_AREA_TEE_ASAN] = "TEE_ASAN",
132 [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX",
133 [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT",
134 [MEM_AREA_TA_RAM] = "TA_RAM",
135 [MEM_AREA_NSEC_SHM] = "NSEC_SHM",
136 [MEM_AREA_RAM_NSEC] = "RAM_NSEC",
137 [MEM_AREA_RAM_SEC] = "RAM_SEC",
138 [MEM_AREA_IO_NSEC] = "IO_NSEC",
139 [MEM_AREA_IO_SEC] = "IO_SEC",
140 [MEM_AREA_EXT_DT] = "EXT_DT",
141 [MEM_AREA_RES_VASPACE] = "RES_VASPACE",
142 [MEM_AREA_SHM_VASPACE] = "SHM_VASPACE",
143 [MEM_AREA_TS_VASPACE] = "TS_VASPACE",
144 [MEM_AREA_PAGER_VASPACE] = "PAGER_VASPACE",
145 [MEM_AREA_SDP_MEM] = "SDP_MEM",
146 [MEM_AREA_DDR_OVERALL] = "DDR_OVERALL",
147 [MEM_AREA_SEC_RAM_OVERALL] = "SEC_RAM_OVERALL",
148 };
149
150 COMPILE_TIME_ASSERT(ARRAY_SIZE(names) == MEM_AREA_MAXTYPE);
151 return names[type];
152 }
153
154 #ifdef CFG_CORE_RWDATA_NOEXEC
155 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM_RW
156 #else
157 #define MEM_AREA_TEE_RAM_RW_DATA MEM_AREA_TEE_RAM
158 #endif
159
160 struct core_mmu_phys_mem {
161 const char *name;
162 enum teecore_memtypes type;
163 __extension__ union {
164 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
165 struct {
166 uint32_t lo_addr;
167 uint32_t hi_addr;
168 };
169 #endif
170 paddr_t addr;
171 };
172 __extension__ union {
173 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
174 struct {
175 uint32_t lo_size;
176 uint32_t hi_size;
177 };
178 #endif
179 paddr_size_t size;
180 };
181 };
182
183 #define __register_memory(_name, _type, _addr, _size, _section) \
184 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
185 { .name = (_name), .type = (_type), .addr = (_addr), \
186 .size = (_size) }
187
188 #if __SIZEOF_LONG__ != __SIZEOF_PADDR__
189 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
190 SCATTERED_ARRAY_DEFINE_ITEM(_section, struct core_mmu_phys_mem) = \
191 { .name = (_name), .type = (_type), .lo_addr = (_addr), \
192 .lo_size = (_size) }
193 #else
194 #define __register_memory_ul(_name, _type, _addr, _size, _section) \
195 __register_memory(_name, _type, _addr, _size, _section)
196 #endif
197
198 #define register_phys_mem(type, addr, size) \
199 __register_memory(#addr, (type), (addr), (size), \
200 phys_mem_map)
201
202 #define register_phys_mem_ul(type, addr, size) \
203 __register_memory_ul(#addr, (type), (addr), (size), \
204 phys_mem_map)
205
206 /* Same as register_phys_mem() but with PGDIR_SIZE granularity */
207 #define register_phys_mem_pgdir(type, addr, size) \
208 register_phys_mem(type, ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
209 ROUNDUP(size + addr - ROUNDDOWN(addr, CORE_MMU_PGDIR_SIZE), \
210 CORE_MMU_PGDIR_SIZE))
211
212 #ifdef CFG_SECURE_DATA_PATH
213 #define register_sdp_mem(addr, size) \
214 __register_memory(#addr, MEM_AREA_SDP_MEM, (addr), (size), \
215 phys_sdp_mem)
216 #else
217 #define register_sdp_mem(addr, size) \
218 static int CONCAT(__register_sdp_mem_unused, __COUNTER__) \
219 __unused
220 #endif
221
222 /* register_dynamic_shm() is deprecated, please use register_ddr() instead */
223 #define register_dynamic_shm(addr, size) \
224 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), (size), \
225 phys_ddr_overall_compat)
226
227 /*
228 * register_ddr() - Define a memory range
229 * @addr: Base address
230 * @size: Length
231 *
232 * This macro can be used multiple times to define disjoint ranges. While
233 * initializing holes are carved out of these ranges where it overlaps with
234 * special memory, for instance memory registered with register_sdp_mem().
235 *
236 * The memory that remains is accepted as non-secure shared memory when
237 * communicating with normal world.
238 *
239 * This macro is an alternative to supply the memory description with a
240 * devicetree blob.
241 */
242 #define register_ddr(addr, size) \
243 __register_memory(#addr, MEM_AREA_DDR_OVERALL, (addr), \
244 (size), phys_ddr_overall)
245
246 #define phys_ddr_overall_begin \
247 SCATTERED_ARRAY_BEGIN(phys_ddr_overall, struct core_mmu_phys_mem)
248
249 #define phys_ddr_overall_end \
250 SCATTERED_ARRAY_END(phys_ddr_overall, struct core_mmu_phys_mem)
251
252 #define phys_ddr_overall_compat_begin \
253 SCATTERED_ARRAY_BEGIN(phys_ddr_overall_compat, struct core_mmu_phys_mem)
254
255 #define phys_ddr_overall_compat_end \
256 SCATTERED_ARRAY_END(phys_ddr_overall_compat, struct core_mmu_phys_mem)
257
258 #define phys_sdp_mem_begin \
259 SCATTERED_ARRAY_BEGIN(phys_sdp_mem, struct core_mmu_phys_mem)
260
261 #define phys_sdp_mem_end \
262 SCATTERED_ARRAY_END(phys_sdp_mem, struct core_mmu_phys_mem)
263
264 #define phys_mem_map_begin \
265 SCATTERED_ARRAY_BEGIN(phys_mem_map, struct core_mmu_phys_mem)
266
267 #define phys_mem_map_end \
268 SCATTERED_ARRAY_END(phys_mem_map, struct core_mmu_phys_mem)
269
270 #ifdef CFG_CORE_RESERVED_SHM
271 /* Default NSec shared memory allocated from NSec world */
272 extern unsigned long default_nsec_shm_paddr;
273 extern unsigned long default_nsec_shm_size;
274 #endif
275
276 void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg);
277 void core_init_mmu_regs(struct core_mmu_config *cfg);
278
279 /* Arch specific function to help optimizing 1 MMU xlat table */
280 bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr);
281
282 /*
283 * struct mmu_partition - stores MMU partition.
284 *
285 * Basically it represent whole MMU mapping. It is possible
286 * to create multiple partitions, and change them in runtime,
287 * effectively changing how OP-TEE sees memory.
288 * This is opaque struct which is defined differently for
289 * v7 and LPAE MMUs
290 *
291 * This structure used mostly when virtualization is enabled.
292 * When CFG_VIRTUALIZATION==n only default partition exists.
293 */
294 struct mmu_partition;
295
296 /*
297 * core_mmu_get_user_va_range() - Return range of user va space
298 * @base: Lowest user virtual address
299 * @size: Size in bytes of user address space
300 */
301 void core_mmu_get_user_va_range(vaddr_t *base, size_t *size);
302
303 /*
304 * enum core_mmu_fault - different kinds of faults
305 * @CORE_MMU_FAULT_ALIGNMENT: alignment fault
306 * @CORE_MMU_FAULT_DEBUG_EVENT: debug event
307 * @CORE_MMU_FAULT_TRANSLATION: translation fault
308 * @CORE_MMU_FAULT_WRITE_PERMISSION: Permission fault during write
309 * @CORE_MMU_FAULT_READ_PERMISSION: Permission fault during read
310 * @CORE_MMU_FAULT_ASYNC_EXTERNAL: asynchronous external abort
311 * @CORE_MMU_FAULT_ACCESS_BIT: access bit fault
312 * @CORE_MMU_FAULT_TAG_CHECK: tag check fault
313 * @CORE_MMU_FAULT_OTHER: Other/unknown fault
314 */
315 enum core_mmu_fault {
316 CORE_MMU_FAULT_ALIGNMENT,
317 CORE_MMU_FAULT_DEBUG_EVENT,
318 CORE_MMU_FAULT_TRANSLATION,
319 CORE_MMU_FAULT_WRITE_PERMISSION,
320 CORE_MMU_FAULT_READ_PERMISSION,
321 CORE_MMU_FAULT_ASYNC_EXTERNAL,
322 CORE_MMU_FAULT_ACCESS_BIT,
323 CORE_MMU_FAULT_TAG_CHECK,
324 CORE_MMU_FAULT_OTHER,
325 };
326
327 /*
328 * core_mmu_get_fault_type() - get fault type
329 * @fault_descr: Content of fault status or exception syndrome register
330 * @returns an enum describing the content of fault status register.
331 */
332 enum core_mmu_fault core_mmu_get_fault_type(uint32_t fault_descr);
333
334 /*
335 * core_mm_type_to_attr() - convert memory type to attribute
336 * @t: memory type
337 * @returns an attribute that can be passed to core_mm_set_entry() and friends
338 */
339 uint32_t core_mmu_type_to_attr(enum teecore_memtypes t);
340
341 /*
342 * core_mmu_create_user_map() - Create user mode mapping
343 * @uctx: Pointer to user mode context
344 * @map: MMU configuration to use when activating this VA space
345 */
346 void core_mmu_create_user_map(struct user_mode_ctx *uctx,
347 struct core_mmu_user_map *map);
348 /*
349 * core_mmu_get_user_map() - Reads current MMU configuration for user VA space
350 * @map: MMU configuration for current user VA space.
351 */
352 void core_mmu_get_user_map(struct core_mmu_user_map *map);
353
354 /*
355 * core_mmu_set_user_map() - Set new MMU configuration for user VA space
356 * @map: User context MMU configuration or NULL to set core VA space
357 *
358 * Activate user VA space mapping and set its ASID if @map is not NULL,
359 * otherwise activate core mapping and set ASID to 0.
360 */
361 void core_mmu_set_user_map(struct core_mmu_user_map *map);
362
363 /*
364 * struct core_mmu_table_info - Properties for a translation table
365 * @table: Pointer to translation table
366 * @va_base: VA base address of the transaltion table
367 * @level: Translation table level
368 * @shift: The shift of each entry in the table
369 * @num_entries: Number of entries in this table.
370 */
371 struct core_mmu_table_info {
372 void *table;
373 vaddr_t va_base;
374 unsigned level;
375 unsigned shift;
376 unsigned num_entries;
377 #ifdef CFG_VIRTUALIZATION
378 struct mmu_partition *prtn;
379 #endif
380 };
381
382 /*
383 * core_mmu_find_table() - Locates a translation table
384 * @prtn: MMU partition where search should be performed
385 * @va: Virtual address for the table to cover
386 * @max_level: Don't traverse beyond this level
387 * @tbl_info: Pointer to where to store properties.
388 * @return true if a translation table was found, false on error
389 */
390 bool core_mmu_find_table(struct mmu_partition *prtn, vaddr_t va,
391 unsigned max_level,
392 struct core_mmu_table_info *tbl_info);
393
394 /*
395 * core_mmu_entry_to_finer_grained() - divide mapping at current level into
396 * smaller ones so memory can be mapped with finer granularity
397 * @tbl_info: table where target record located
398 * @idx: index of record for which a pdgir must be setup.
399 * @secure: true/false if pgdir maps secure/non-secure memory (32bit mmu)
400 * @return true on successful, false on error
401 */
402 bool core_mmu_entry_to_finer_grained(struct core_mmu_table_info *tbl_info,
403 unsigned int idx, bool secure);
404
405 void core_mmu_set_entry_primitive(void *table, size_t level, size_t idx,
406 paddr_t pa, uint32_t attr);
407
408 void core_mmu_get_user_pgdir(struct core_mmu_table_info *pgd_info);
409
410 /*
411 * core_mmu_set_entry() - Set entry in translation table
412 * @tbl_info: Translation table properties
413 * @idx: Index of entry to update
414 * @pa: Physical address to assign entry
415 * @attr: Attributes to assign entry
416 */
417 void core_mmu_set_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
418 paddr_t pa, uint32_t attr);
419
420 void core_mmu_get_entry_primitive(const void *table, size_t level, size_t idx,
421 paddr_t *pa, uint32_t *attr);
422
423 /*
424 * core_mmu_get_entry() - Get entry from translation table
425 * @tbl_info: Translation table properties
426 * @idx: Index of entry to read
427 * @pa: Physical address is returned here if pa is not NULL
428 * @attr: Attributues are returned here if attr is not NULL
429 */
430 void core_mmu_get_entry(struct core_mmu_table_info *tbl_info, unsigned idx,
431 paddr_t *pa, uint32_t *attr);
432
433 /*
434 * core_mmu_va2idx() - Translate from virtual address to table index
435 * @tbl_info: Translation table properties
436 * @va: Virtual address to translate
437 * @returns index in transaltion table
438 */
core_mmu_va2idx(struct core_mmu_table_info * tbl_info,vaddr_t va)439 static inline unsigned core_mmu_va2idx(struct core_mmu_table_info *tbl_info,
440 vaddr_t va)
441 {
442 return (va - tbl_info->va_base) >> tbl_info->shift;
443 }
444
445 /*
446 * core_mmu_idx2va() - Translate from table index to virtual address
447 * @tbl_info: Translation table properties
448 * @idx: Index to translate
449 * @returns Virtual address
450 */
core_mmu_idx2va(struct core_mmu_table_info * tbl_info,unsigned idx)451 static inline vaddr_t core_mmu_idx2va(struct core_mmu_table_info *tbl_info,
452 unsigned idx)
453 {
454 return (idx << tbl_info->shift) + tbl_info->va_base;
455 }
456
457 /*
458 * core_mmu_get_block_offset() - Get offset inside a block/page
459 * @tbl_info: Translation table properties
460 * @pa: Physical address
461 * @returns offset within one block of the translation table
462 */
core_mmu_get_block_offset(struct core_mmu_table_info * tbl_info,paddr_t pa)463 static inline size_t core_mmu_get_block_offset(
464 struct core_mmu_table_info *tbl_info, paddr_t pa)
465 {
466 return pa & ((1 << tbl_info->shift) - 1);
467 }
468
469 /*
470 * core_mmu_is_dynamic_vaspace() - Check if memory region belongs to
471 * empty virtual address space that is used for dymanic mappings
472 * @mm: memory region to be checked
473 * @returns result of the check
474 */
core_mmu_is_dynamic_vaspace(struct tee_mmap_region * mm)475 static inline bool core_mmu_is_dynamic_vaspace(struct tee_mmap_region *mm)
476 {
477 return mm->type == MEM_AREA_RES_VASPACE ||
478 mm->type == MEM_AREA_SHM_VASPACE;
479 }
480
481 /*
482 * core_mmu_map_pages() - map list of pages at given virtual address
483 * @vstart: Virtual address where mapping begins
484 * @pages: Array of page addresses
485 * @num_pages: Number of pages
486 * @memtype: Type of memmory to be mapped
487 *
488 * Note: This function asserts that pages are not mapped executeable for
489 * kernel (privileged) mode.
490 *
491 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
492 */
493 TEE_Result core_mmu_map_pages(vaddr_t vstart, paddr_t *pages, size_t num_pages,
494 enum teecore_memtypes memtype);
495
496 /*
497 * core_mmu_map_contiguous_pages() - map range of pages at given virtual address
498 * @vstart: Virtual address where mapping begins
499 * @pstart: Physical address of the first page
500 * @num_pages: Number of pages
501 * @memtype: Type of memmory to be mapped
502 *
503 * Note: This function asserts that pages are not mapped executeable for
504 * kernel (privileged) mode.
505 *
506 * @returns: TEE_SUCCESS on success, TEE_ERROR_XXX on error
507 */
508 TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart,
509 size_t num_pages,
510 enum teecore_memtypes memtype);
511
512 /*
513 * core_mmu_unmap_pages() - remove mapping at given virtual address
514 * @vstart: Virtual address where mapping begins
515 * @num_pages: Number of pages to unmap
516 */
517 void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages);
518
519 /*
520 * core_mmu_user_mapping_is_active() - Report if user mapping is active
521 * @returns true if a user VA space is active, false if user VA space is
522 * inactive.
523 */
524 bool core_mmu_user_mapping_is_active(void);
525
526 /*
527 * core_mmu_mattr_is_ok() - Check that supplied mem attributes can be used
528 * @returns true if the attributes can be used, false if not.
529 */
530 bool core_mmu_mattr_is_ok(uint32_t mattr);
531
532 void core_mmu_get_mem_by_type(enum teecore_memtypes type, vaddr_t *s,
533 vaddr_t *e);
534
535 enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa);
536
537 /* routines to retreive shared mem configuration */
core_mmu_is_shm_cached(void)538 static inline bool core_mmu_is_shm_cached(void)
539 {
540 return mattr_is_cached(core_mmu_type_to_attr(MEM_AREA_NSEC_SHM));
541 }
542
543 TEE_Result core_mmu_remove_mapping(enum teecore_memtypes type, void *addr,
544 size_t len);
545 void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr,
546 size_t len);
547
548 /*
549 * core_mmu_find_mapping_exclusive() - Find mapping of specified type and
550 * length. If more than one mapping of
551 * specified type is present, NULL will be
552 * returned.
553 * @type: memory type
554 * @len: length in bytes
555 */
556 struct tee_mmap_region *
557 core_mmu_find_mapping_exclusive(enum teecore_memtypes type, size_t len);
558
559 /*
560 * tlbi_mva_range() - Invalidate TLB for virtual address range
561 * @va: start virtual address, must be a multiple of @granule
562 * @len: length in bytes of range, must be a multiple of @granule
563 * @granule: granularity of mapping, supported values are
564 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
565 * match the actual mappings.
566 */
567 void tlbi_mva_range(vaddr_t va, size_t len, size_t granule);
568
569 /*
570 * tlbi_mva_range_asid() - Invalidate TLB for virtual address range for
571 * a specific ASID
572 * @va: start virtual address, must be a multiple of @granule
573 * @len: length in bytes of range, must be a multiple of @granule
574 * @granule: granularity of mapping, supported values are
575 * CORE_MMU_PGDIR_SIZE or SMALL_PAGE_SIZE. This value must
576 * match the actual mappings.
577 * @asid: Address space identifier
578 */
579 void tlbi_mva_range_asid(vaddr_t va, size_t len, size_t granule, uint32_t asid);
580
581 /* Check cpu mmu enabled or not */
582 bool cpu_mmu_enabled(void);
583
584 #ifdef CFG_CORE_DYN_SHM
585 /*
586 * Check if platform defines nsec DDR range(s).
587 * Static SHM (MEM_AREA_NSEC_SHM) is not covered by this API as it is
588 * always present.
589 */
590 bool core_mmu_nsec_ddr_is_defined(void);
591
592 void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
593 size_t nelems);
594 #endif
595
596 /* Initialize MMU partition */
597 void core_init_mmu_prtn(struct mmu_partition *prtn, struct tee_mmap_region *mm);
598
599 unsigned int asid_alloc(void);
600 void asid_free(unsigned int asid);
601
602 #ifdef CFG_SECURE_DATA_PATH
603 /* Alloc and fill SDP memory objects table - table is NULL terminated */
604 struct mobj **core_sdp_mem_create_mobjs(void);
605 #endif
606
607 #ifdef CFG_VIRTUALIZATION
608 size_t core_mmu_get_total_pages_size(void);
609 struct mmu_partition *core_alloc_mmu_prtn(void *tables);
610 void core_free_mmu_prtn(struct mmu_partition *prtn);
611 void core_mmu_set_prtn(struct mmu_partition *prtn);
612 void core_mmu_set_default_prtn(void);
613 void core_mmu_set_default_prtn_tbl(void);
614 #endif
615
616 void core_mmu_init_virtualization(void);
617
618 /* init some allocation pools */
619 void core_mmu_init_ta_ram(void);
620
621 void core_init_mmu(struct tee_mmap_region *mm);
622
623 void core_mmu_set_info_table(struct core_mmu_table_info *tbl_info,
624 unsigned int level, vaddr_t va_base, void *table);
625 void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
626 struct user_mode_ctx *uctx);
627 void core_mmu_map_region(struct mmu_partition *prtn,
628 struct tee_mmap_region *mm);
629
630 bool arch_va2pa_helper(void *va, paddr_t *pa);
631
core_mmap_is_end_of_table(const struct tee_mmap_region * mm)632 static inline bool core_mmap_is_end_of_table(const struct tee_mmap_region *mm)
633 {
634 return mm->type == MEM_AREA_END;
635 }
636
core_mmu_check_end_pa(paddr_t pa,size_t len)637 static inline bool core_mmu_check_end_pa(paddr_t pa, size_t len)
638 {
639 paddr_t end_pa = 0;
640
641 if (ADD_OVERFLOW(pa, len - 1, &end_pa))
642 return false;
643 return core_mmu_check_max_pa(end_pa);
644 }
645 #endif /*__ASSEMBLER__*/
646
647 #endif /* CORE_MMU_H */
648