1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2021, Linaro Limited
4 */
5
6 #include <bitstring.h>
7 #include <drivers/gic.h>
8 #include <kernel/interrupt.h>
9 #include <kernel/mutex.h>
10 #include <kernel/notif.h>
11 #include <kernel/spinlock.h>
12 #include <kernel/thread.h>
13 #include <optee_rpc_cmd.h>
14 #include <types_ext.h>
15
16 #if defined(CFG_CORE_ASYNC_NOTIF)
17 static struct mutex notif_mutex = MUTEX_INITIALIZER;
18 static unsigned int notif_lock = SPINLOCK_UNLOCK;
19
20 SLIST_HEAD(notif_driver_head, notif_driver);
21 static struct notif_driver_head notif_driver_head =
22 SLIST_HEAD_INITIALIZER(¬if_driver_head);
23
24 static bitstr_t bit_decl(notif_values, NOTIF_ASYNC_VALUE_MAX + 1);
25 static bitstr_t bit_decl(notif_alloc_values, NOTIF_ASYNC_VALUE_MAX + 1);
26 static bool notif_started;
27
notif_alloc_async_value(uint32_t * val)28 TEE_Result notif_alloc_async_value(uint32_t *val)
29 {
30 static bool alloc_values_inited;
31 uint32_t old_itr_status = 0;
32 int bit = 0;
33
34 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
35
36 if (!alloc_values_inited) {
37 bit_set(notif_alloc_values, NOTIF_VALUE_DO_BOTTOM_HALF);
38 alloc_values_inited = true;
39 }
40
41 bit_ffc(notif_alloc_values, (int)NOTIF_ASYNC_VALUE_MAX + 1, &bit);
42 if (bit >= 0) {
43 *val = bit;
44 bit_set(notif_alloc_values, bit);
45 }
46
47 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
48
49 if (bit < 0)
50 return TEE_ERROR_OUT_OF_MEMORY;
51
52 return TEE_SUCCESS;
53 }
54
notif_free_async_value(uint32_t val)55 void notif_free_async_value(uint32_t val)
56 {
57 uint32_t old_itr_status = 0;
58
59 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
60
61 assert(val < NOTIF_ASYNC_VALUE_MAX);
62 assert(bit_test(notif_alloc_values, val));
63 bit_clear(notif_alloc_values, val);
64
65 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
66 }
67
notif_get_value(bool * value_valid,bool * value_pending)68 uint32_t notif_get_value(bool *value_valid, bool *value_pending)
69 {
70 uint32_t old_itr_status = 0;
71 uint32_t res = 0;
72 int bit = 0;
73
74 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
75
76 bit_ffs(notif_values, (int)NOTIF_ASYNC_VALUE_MAX + 1, &bit);
77 *value_valid = (bit >= 0);
78 if (!*value_valid) {
79 *value_pending = false;
80 goto out;
81 }
82
83 res = bit;
84 bit_clear(notif_values, res);
85 bit_ffs(notif_values, (int)NOTIF_ASYNC_VALUE_MAX + 1, &bit);
86 *value_pending = (bit >= 0);
87 out:
88 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
89
90 return res;
91 }
92
notif_send_async(uint32_t value)93 void notif_send_async(uint32_t value)
94 {
95 uint32_t old_itr_status = 0;
96
97 static_assert(CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_PPI_BASE);
98
99 assert(value <= NOTIF_ASYNC_VALUE_MAX);
100 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
101
102 DMSG("0x%"PRIx32, value);
103 bit_set(notif_values, value);
104 itr_raise_pi(CFG_CORE_ASYNC_NOTIF_GIC_INTID);
105
106 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
107 }
108
notif_async_is_started(void)109 bool notif_async_is_started(void)
110 {
111 uint32_t old_itr_status = 0;
112 bool ret = false;
113
114 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
115 ret = notif_started;
116 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
117
118 return ret;
119 }
120
notif_register_driver(struct notif_driver * ndrv)121 void notif_register_driver(struct notif_driver *ndrv)
122 {
123 uint32_t old_itr_status = 0;
124
125 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
126
127 SLIST_INSERT_HEAD(¬if_driver_head, ndrv, link);
128
129 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
130 }
131
notif_unregister_driver(struct notif_driver * ndrv)132 void notif_unregister_driver(struct notif_driver *ndrv)
133 {
134 uint32_t old_itr_status = 0;
135
136 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
137
138 SLIST_REMOVE(¬if_driver_head, ndrv, notif_driver, link);
139
140 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
141 }
142
notif_deliver_atomic_event(enum notif_event ev)143 void notif_deliver_atomic_event(enum notif_event ev)
144 {
145 uint32_t old_itr_status = 0;
146 struct notif_driver *nd = NULL;
147
148 assert(ev == NOTIF_EVENT_STARTED);
149
150 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
151
152 if (notif_started) {
153 DMSG("Already started");
154 goto out;
155 }
156 notif_started = true;
157
158 SLIST_FOREACH(nd, ¬if_driver_head, link)
159 if (nd->atomic_cb)
160 nd->atomic_cb(nd, ev);
161
162 out:
163 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
164 }
165
notif_deliver_event(enum notif_event ev)166 void notif_deliver_event(enum notif_event ev)
167 {
168 uint32_t old_itr_status = 0;
169 struct notif_driver *nd = NULL;
170 struct notif_driver *nd_tmp = NULL;
171
172 assert(ev == NOTIF_EVENT_DO_BOTTOM_HALF || ev == NOTIF_EVENT_STOPPED);
173
174 /* Serialize all yielding notifications */
175 mutex_lock(¬if_mutex);
176 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
177
178 if (!notif_started) {
179 DMSG("Not started ev %d", (int)ev);
180 goto out;
181 }
182
183 if (ev == NOTIF_EVENT_STOPPED)
184 notif_started = false;
185
186 SLIST_FOREACH_SAFE(nd, ¬if_driver_head, link, nd_tmp) {
187 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
188
189 if (nd->yielding_cb)
190 nd->yielding_cb(nd, ev);
191
192 old_itr_status = cpu_spin_lock_xsave(¬if_lock);
193
194 if (ev == NOTIF_EVENT_STOPPED && notif_started) {
195 DMSG("Started again while stopping");
196 goto out;
197 }
198 }
199
200 out:
201 cpu_spin_unlock_xrestore(¬if_lock, old_itr_status);
202 mutex_unlock(¬if_mutex);
203 }
204 #endif /*CFG_CORE_ASYNC_NOTIF*/
205
notif_rpc(uint32_t func,uint32_t value)206 static TEE_Result notif_rpc(uint32_t func, uint32_t value)
207 {
208 struct thread_param params = THREAD_PARAM_VALUE(IN, func, value, 0);
209
210 return thread_rpc_cmd(OPTEE_RPC_CMD_NOTIFICATION, 1, ¶ms);
211 }
212
notif_wait(uint32_t value)213 TEE_Result notif_wait(uint32_t value)
214 {
215 return notif_rpc(OPTEE_RPC_NOTIFICATION_WAIT, value);
216 }
217
notif_send_sync(uint32_t value)218 TEE_Result notif_send_sync(uint32_t value)
219 {
220 return notif_rpc(OPTEE_RPC_NOTIFICATION_SEND, value);
221 }
222