1 /* SPDX-License-Identifier: BSD-2-Clause */
2 /*
3  * Copyright (c) 2016, Linaro Limited
4  */
5 #ifndef __MM_PGT_CACHE_H
6 #define __MM_PGT_CACHE_H
7 
8 #include <assert.h>
9 #include <kernel/tee_ta_manager.h>
10 #include <sys/queue.h>
11 #include <types_ext.h>
12 #include <util.h>
13 
14 #ifdef CFG_WITH_LPAE
15 #define PGT_SIZE	(4 * 1024)
16 #define PGT_NUM_PGT_PER_PAGE	1
17 #else
18 #define PGT_SIZE	(1 * 1024)
19 #define PGT_NUM_PGT_PER_PAGE	4
20 #endif
21 
22 struct ts_ctx;
23 
24 struct pgt {
25 	void *tbl;
26 	vaddr_t vabase;
27 #if !defined(CFG_CORE_PREALLOC_EL0_TBLS)
28 	struct ts_ctx *ctx;
29 #endif
30 	bool populated;
31 #if defined(CFG_PAGED_USER_TA)
32 	uint16_t num_used_entries;
33 #endif
34 #if defined(CFG_CORE_PREALLOC_EL0_TBLS) || \
35 	(defined(CFG_WITH_PAGER) && !defined(CFG_WITH_LPAE))
36 	struct pgt_parent *parent;
37 #endif
38 	SLIST_ENTRY(pgt) link;
39 };
40 
41 SLIST_HEAD(pgt_cache, pgt);
42 struct user_mode_ctx;
43 
44 bool pgt_check_avail(struct user_mode_ctx *uctx);
45 
46 /*
47  * pgt_get_all() - makes all needed translation tables available
48  * @uctx:	the context to own the tables
49  *
50  * Guaranteed to succeed, but may need to sleep for a while to get all the
51  * needed translation tables.
52  */
53 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
pgt_get_all(struct user_mode_ctx * uctx __unused)54 static inline void pgt_get_all(struct user_mode_ctx *uctx __unused) { }
55 #else
56 void pgt_get_all(struct user_mode_ctx *uctx);
57 #endif
58 
59 /*
60  * pgt_put_all() - informs the translation table manager that these tables
61  *		   will not be needed for a while
62  * @uctx:	the context owning the tables to make inactive
63  */
64 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
pgt_put_all(struct user_mode_ctx * uctx __unused)65 static inline void pgt_put_all(struct user_mode_ctx *uctx __unused) { }
66 #else
67 void pgt_put_all(struct user_mode_ctx *uctx);
68 #endif
69 
70 void pgt_clear_range(struct user_mode_ctx *uctx, vaddr_t begin, vaddr_t end);
71 void pgt_flush_range(struct user_mode_ctx *uctx, vaddr_t begin, vaddr_t last);
72 
73 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
pgt_pop_from_cache_list(vaddr_t vabase __unused,struct ts_ctx * ctx __unused)74 static inline struct pgt *pgt_pop_from_cache_list(vaddr_t vabase __unused,
75 						  struct ts_ctx *ctx __unused)
76 { return NULL; }
pgt_push_to_cache_list(struct pgt * pgt __unused)77 static inline void pgt_push_to_cache_list(struct pgt *pgt __unused) { }
78 #else
79 struct pgt *pgt_pop_from_cache_list(vaddr_t vabase, struct ts_ctx *ctx);
80 void pgt_push_to_cache_list(struct pgt *pgt);
81 #endif
82 
83 #if defined(CFG_CORE_PREALLOC_EL0_TBLS)
pgt_init(void)84 static inline void pgt_init(void) { }
85 #else
86 void pgt_init(void);
87 #endif
88 
89 void pgt_flush(struct user_mode_ctx *uctx);
90 
91 #if defined(CFG_PAGED_USER_TA)
pgt_inc_used_entries(struct pgt * pgt)92 static inline void pgt_inc_used_entries(struct pgt *pgt)
93 {
94 	pgt->num_used_entries++;
95 	assert(pgt->num_used_entries);
96 }
97 
pgt_dec_used_entries(struct pgt * pgt)98 static inline void pgt_dec_used_entries(struct pgt *pgt)
99 {
100 	assert(pgt->num_used_entries);
101 	pgt->num_used_entries--;
102 }
103 
pgt_set_used_entries(struct pgt * pgt,size_t val)104 static inline void pgt_set_used_entries(struct pgt *pgt, size_t val)
105 {
106 	pgt->num_used_entries = val;
107 }
108 
109 #else
pgt_inc_used_entries(struct pgt * pgt __unused)110 static inline void pgt_inc_used_entries(struct pgt *pgt __unused)
111 {
112 }
113 
pgt_dec_used_entries(struct pgt * pgt __unused)114 static inline void pgt_dec_used_entries(struct pgt *pgt __unused)
115 {
116 }
117 
pgt_set_used_entries(struct pgt * pgt __unused,size_t val __unused)118 static inline void pgt_set_used_entries(struct pgt *pgt __unused,
119 					size_t val __unused)
120 {
121 }
122 
123 #endif
124 
125 #endif /*__MM_PGT_CACHE_H*/
126