1 /* SPDX-License-Identifier: BSD-2-Clause */ 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 #ifndef CORE_MEMPROT_H 7 #define CORE_MEMPROT_H 8 9 #include <mm/core_mmu.h> 10 #include <types_ext.h> 11 12 /* 13 * "pbuf_is" support. 14 * 15 * core_vbuf_is()/core_pbuf_is() can be used to check if a teecore mapped 16 * virtual address or a physical address is "Secure", "Unsecure", "external 17 * RAM" and some other fancy attributes. 18 * 19 * DO NOT use 'buf_is(Secure, buffer)==false' as a assumption that buffer is 20 * UnSecured ! This is NOT a valid asumption ! A buffer is certified UnSecured 21 * only if 'buf_is(UnSecure, buffer)==true'. 22 */ 23 24 /* memory atttributes */ 25 enum buf_is_attr { 26 CORE_MEM_CACHED, 27 CORE_MEM_NSEC_SHM, 28 CORE_MEM_NON_SEC, 29 CORE_MEM_SEC, 30 CORE_MEM_TEE_RAM, 31 CORE_MEM_TA_RAM, 32 CORE_MEM_SDP_MEM, 33 CORE_MEM_REG_SHM, 34 }; 35 36 /* redirect legacy tee_vbuf_is() and tee_pbuf_is() to our routines */ 37 #define tee_pbuf_is core_pbuf_is 38 #define tee_vbuf_is core_vbuf_is 39 40 /* Convenience macros */ 41 #define tee_pbuf_is_non_sec(buf, len) \ 42 core_pbuf_is(CORE_MEM_NON_SEC, (paddr_t)(buf), (len)) 43 44 #define tee_pbuf_is_sec(buf, len) \ 45 core_pbuf_is(CORE_MEM_SEC, (paddr_t)(buf), (len)) 46 47 #define tee_vbuf_is_non_sec(buf, len) \ 48 core_vbuf_is(CORE_MEM_NON_SEC, (void *)(buf), (len)) 49 50 #define tee_vbuf_is_sec(buf, len) \ 51 core_vbuf_is(CORE_MEM_SEC, (void *)(buf), (len)) 52 53 /* 54 * This function return true if the buf complies with supplied flags. 55 * If this function returns false buf doesn't comply with supplied flags 56 * or something went wrong. 57 * 58 * Note that returning false doesn't guarantee that buf complies with 59 * the complement of the supplied flags. 60 */ 61 bool core_pbuf_is(uint32_t flags, paddr_t pbuf, size_t len); 62 63 /* 64 * Translates the supplied virtual address to a physical address and uses 65 * tee_phys_buf_is() to check the compliance of the buffer. 66 */ 67 bool core_vbuf_is(uint32_t flags, const void *vbuf, size_t len); 68 69 /* 70 * Translate physical address to virtual address using specified mapping. 71 * Also tries to find proper mapping which have counterpart translation 72 * for specified length of data starting from given physical address. 73 * Len parameter can be set to 1 if caller knows that requested (pa + len) 74 * doesn`t cross mapping granule boundary. 75 * Returns NULL on failure or a valid virtual address on success. 76 */ 77 void *phys_to_virt(paddr_t pa, enum teecore_memtypes m, size_t len); 78 79 /* 80 * Translate physical address to virtual address trying MEM_AREA_IO_SEC 81 * first then MEM_AREA_IO_NSEC if not found. Like phys_to_virt() tries 82 * to find proper mapping relying on length parameter. 83 * Returns NULL on failure or a valid virtual address on success. 84 */ 85 void *phys_to_virt_io(paddr_t pa, size_t len); 86 87 /* 88 * Translate virtual address to physical address 89 * Returns 0 on failure or a valid physical address on success. 90 */ 91 paddr_t virt_to_phys(void *va); 92 93 /* 94 * Return runtime usable address, irrespective of whether 95 * the MMU is enabled or not. In case of MMU enabled also will be performed 96 * check for requested amount of data is present in found mapping. 97 */ 98 vaddr_t core_mmu_get_va(paddr_t pa, enum teecore_memtypes type, size_t len); 99 100 /* Return true if @va relates to a unpaged section else false */ 101 bool is_unpaged(void *va); 102 103 struct io_pa_va { 104 paddr_t pa; 105 vaddr_t va; 106 }; 107 108 /* 109 * Helper function to return a physical or virtual address for a device, 110 * depending on whether the MMU is enabled or not 111 * io_pa_or_va() uses secure mapped IO memory if found or fallback to 112 * non-secure mapped IO memory. 113 */ 114 vaddr_t io_pa_or_va_secure(struct io_pa_va *p, size_t len); 115 vaddr_t io_pa_or_va_nsec(struct io_pa_va *p, size_t len); 116 vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len); 117 118 #endif /* CORE_MEMPROT_H */ 119