1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 #include <assert.h>
6 #include <hyptypes.h>
7 #include <string.h>
8 
9 #include <hypcontainers.h>
10 
11 #include <scheduler.h>
12 #include <spinlock.h>
13 #include <vic.h>
14 #include <virq.h>
15 
16 #include "event_handlers.h"
17 #include "msgqueue_common.h"
18 #include "useraccess.h"
19 
20 bool_result_t
msgqueue_send_msg(msgqueue_t * msgqueue,size_t size,kernel_or_gvaddr_t msg,bool push,bool from_kernel)21 msgqueue_send_msg(msgqueue_t *msgqueue, size_t size, kernel_or_gvaddr_t msg,
22 		  bool push, bool from_kernel)
23 {
24 	bool_result_t ret;
25 
26 	ret.r = true;
27 	ret.e = OK;
28 
29 	assert(msgqueue != NULL);
30 
31 	spinlock_acquire(&msgqueue->lock);
32 
33 	if (msgqueue->count == msgqueue->queue_depth) {
34 		ret.e = ERROR_MSGQUEUE_FULL;
35 		ret.r = false;
36 		goto out;
37 	}
38 
39 	// Enqueue message at the tail of the queue
40 	void *hyp_va =
41 		(void *)(msgqueue->buf + msgqueue->tail + sizeof(size_t));
42 
43 	if (from_kernel) {
44 		if (size > msgqueue->max_msg_size) {
45 			ret.e = ERROR_ARGUMENT_SIZE;
46 			ret.r = false;
47 			goto out;
48 		}
49 
50 		(void)memcpy(hyp_va, (void *)msg.kernel_addr, size);
51 	} else {
52 		size_result_t ret_val = useraccess_copy_from_guest_va(
53 			hyp_va, msgqueue->max_msg_size, msg.guest_addr, size);
54 		if (ret_val.e != OK) {
55 			goto out;
56 		}
57 	}
58 
59 	(void)memcpy(msgqueue->buf + msgqueue->tail, (uint8_t *)&size,
60 		     sizeof(size_t));
61 	msgqueue->count++;
62 
63 	// Update tail value
64 	msgqueue->tail += (count_t)(msgqueue->max_msg_size + sizeof(size_t));
65 
66 	if (msgqueue->tail == msgqueue->queue_size) {
67 		msgqueue->tail = 0U;
68 	}
69 
70 	// If buffer was previously below the not empty threshold, we must
71 	// wake up the receiver side by asserting the receiver virq source.
72 	if (push || (msgqueue->count == msgqueue->notempty_thd)) {
73 		(void)virq_assert(&msgqueue->rcv_source, false);
74 	}
75 
76 	if (msgqueue->count == msgqueue->queue_depth) {
77 		ret.r = false;
78 	}
79 
80 out:
81 	spinlock_release(&msgqueue->lock);
82 
83 	return ret;
84 }
85 
86 receive_info_result_t
msgqueue_receive_msg(msgqueue_t * msgqueue,kernel_or_gvaddr_t buffer,size_t max_size,bool to_kernel)87 msgqueue_receive_msg(msgqueue_t *msgqueue, kernel_or_gvaddr_t buffer,
88 		     size_t max_size, bool to_kernel)
89 {
90 	receive_info_result_t ret  = { 0 };
91 	size_t		      size = 0U;
92 
93 	ret.e	       = OK;
94 	ret.r.size     = 0U;
95 	ret.r.notempty = true;
96 
97 	assert(msgqueue != NULL);
98 	assert(msgqueue->buf != NULL);
99 
100 	spinlock_acquire(&msgqueue->lock);
101 
102 	if (msgqueue->count == 0U) {
103 		ret.e	       = ERROR_MSGQUEUE_EMPTY;
104 		ret.r.notempty = false;
105 		goto out;
106 	}
107 
108 	(void)memcpy((uint8_t *)&size, msgqueue->buf + msgqueue->head,
109 		     sizeof(size_t));
110 
111 	// Dequeue message from the head of the queue
112 	void *hyp_va =
113 		(void *)(msgqueue->buf + msgqueue->head + sizeof(size_t));
114 
115 	if (to_kernel) {
116 		if (size > max_size) {
117 			ret.e	       = ERROR_ARGUMENT_SIZE;
118 			ret.r.notempty = false;
119 			goto out;
120 		}
121 
122 		(void)memcpy((void *)buffer.kernel_addr, hyp_va, size);
123 	} else {
124 		size_result_t ret_val = useraccess_copy_to_guest_va(
125 			buffer.guest_addr, max_size, hyp_va, size, false);
126 		if (ret_val.e != OK) {
127 			goto out;
128 		}
129 	}
130 
131 	ret.r.size = size;
132 	msgqueue->count--;
133 
134 	// Update head value
135 	msgqueue->head += (count_t)(msgqueue->max_msg_size + sizeof(size_t));
136 	assert(msgqueue->head <= msgqueue->queue_size);
137 
138 	if (msgqueue->head == msgqueue->queue_size) {
139 		msgqueue->head = 0U;
140 	}
141 
142 	// If buffer was previously above the not full threshold, we must let
143 	// the sender side know that it can send more messages.
144 	if (msgqueue->count == msgqueue->notfull_thd) {
145 		// We wake up the sender side by asserting the sender virq
146 		// source.
147 		(void)virq_assert(&msgqueue->send_source, false);
148 	}
149 
150 	if (msgqueue->count == 0U) {
151 		ret.r.notempty = false;
152 	}
153 
154 out:
155 	spinlock_release(&msgqueue->lock);
156 
157 	return ret;
158 }
159 
160 void
msgqueue_flush_queue(msgqueue_t * msgqueue)161 msgqueue_flush_queue(msgqueue_t *msgqueue)
162 {
163 	assert(msgqueue != NULL);
164 	assert(msgqueue->buf != NULL);
165 
166 	spinlock_acquire(&msgqueue->lock);
167 
168 	// If there is a pending bound interrupt, it will be de-asserted
169 	if (msgqueue->count != 0U) {
170 		(void)virq_assert(&msgqueue->send_source, false);
171 		(void)virq_clear(&msgqueue->rcv_source);
172 	}
173 
174 	(void)memset_s(msgqueue->buf, msgqueue->queue_size, 0,
175 		       msgqueue->queue_size);
176 	msgqueue->count = 0U;
177 	msgqueue->head	= 0U;
178 	msgqueue->tail	= 0U;
179 
180 	spinlock_release(&msgqueue->lock);
181 }
182 
183 error_t
msgqueue_bind(msgqueue_t * msgqueue,vic_t * vic,virq_t virq,virq_source_t * source,virq_trigger_t trigger)184 msgqueue_bind(msgqueue_t *msgqueue, vic_t *vic, virq_t virq,
185 	      virq_source_t *source, virq_trigger_t trigger)
186 {
187 	assert(msgqueue != NULL);
188 	assert(vic != NULL);
189 	assert(source != NULL);
190 
191 	error_t ret = vic_bind_shared(source, vic, virq, trigger);
192 
193 	return ret;
194 }
195 
196 void
msgqueue_unbind(virq_source_t * source)197 msgqueue_unbind(virq_source_t *source)
198 {
199 	assert(source != NULL);
200 
201 	vic_unbind_sync(source);
202 }
203 
204 bool
msgqueue_rx_handle_virq_check_pending(virq_source_t * source,bool reasserted)205 msgqueue_rx_handle_virq_check_pending(virq_source_t *source, bool reasserted)
206 {
207 	bool ret;
208 
209 	assert(source != NULL);
210 
211 	msgqueue_t *msgqueue = msgqueue_container_of_rcv_source(source);
212 
213 	if (reasserted) {
214 		// Previous VIRQ wasn't delivered yet. If we return false in
215 		// this case, we can't be sure that we won't race with a
216 		// msgqueue_send_msg() on another CPU.
217 		ret = true;
218 	} else {
219 		ret = (msgqueue->count >= msgqueue->notempty_thd);
220 	}
221 
222 	return ret;
223 }
224 
225 bool
msgqueue_tx_handle_virq_check_pending(virq_source_t * source,bool reasserted)226 msgqueue_tx_handle_virq_check_pending(virq_source_t *source, bool reasserted)
227 {
228 	bool ret;
229 
230 	assert(source != NULL);
231 
232 	msgqueue_t *msgqueue = msgqueue_container_of_send_source(source);
233 
234 	if (reasserted) {
235 		// Previous VIRQ wasn't delivered yet. If we return false in
236 		// this case, we can't be sure that we won't race with a
237 		// msgqueue_receive_msg() on another CPU.
238 		ret = true;
239 	} else {
240 		ret = (msgqueue->count <= msgqueue->notfull_thd);
241 	}
242 
243 	return ret;
244 }
245