1 /*
2  * Copyright 2021 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/ffa.h"
10 
11 #include "hf/arch/plat/ffa.h"
12 
13 #include "hf/ffa_internal.h"
14 #include "hf/vcpu.h"
15 #include "hf/vm.h"
16 
arch_ffa_features(uint32_t function_id)17 struct ffa_value arch_ffa_features(uint32_t function_id)
18 {
19 	(void)function_id;
20 	return ffa_error(FFA_NOT_SUPPORTED);
21 }
22 
arch_ffa_spmc_id_get(void)23 ffa_id_t arch_ffa_spmc_id_get(void)
24 {
25 	return HF_SPMC_VM_ID;
26 }
27 
plat_ffa_log_init(void)28 void plat_ffa_log_init(void)
29 {
30 }
31 
plat_ffa_is_memory_send_valid(ffa_id_t receiver,ffa_id_t sender,uint32_t share_func,bool multiple_borrower)32 bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
33 				   uint32_t share_func, bool multiple_borrower)
34 {
35 	(void)share_func;
36 	(void)receiver;
37 	(void)sender;
38 	(void)multiple_borrower;
39 
40 	return true;
41 }
42 
plat_ffa_is_direct_request_valid(struct vcpu * current,ffa_id_t sender_vm_id,ffa_id_t receiver_vm_id)43 bool plat_ffa_is_direct_request_valid(struct vcpu *current,
44 				      ffa_id_t sender_vm_id,
45 				      ffa_id_t receiver_vm_id)
46 {
47 	(void)current;
48 	(void)sender_vm_id;
49 	(void)receiver_vm_id;
50 
51 	return true;
52 }
53 
plat_ffa_is_direct_request_supported(struct vm * sender_vm,struct vm * receiver_vm,uint32_t func)54 bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
55 					  struct vm *receiver_vm, uint32_t func)
56 {
57 	(void)sender_vm;
58 	(void)receiver_vm;
59 	(void)func;
60 
61 	return false;
62 }
63 
plat_ffa_is_direct_response_valid(struct vcpu * current,ffa_id_t sender_vm_id,ffa_id_t receiver_vm_id)64 bool plat_ffa_is_direct_response_valid(struct vcpu *current,
65 				       ffa_id_t sender_vm_id,
66 				       ffa_id_t receiver_vm_id)
67 {
68 	(void)current;
69 	(void)sender_vm_id;
70 	(void)receiver_vm_id;
71 
72 	return true;
73 }
74 
plat_ffa_run_forward(ffa_id_t vm_id,ffa_vcpu_index_t vcpu_idx,struct ffa_value * ret)75 bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
76 			  struct ffa_value *ret)
77 {
78 	(void)vm_id;
79 	(void)vcpu_idx;
80 	(void)ret;
81 
82 	return false;
83 }
84 
plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)85 void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
86 {
87 	(void)to_destroy_locked;
88 }
89 
plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)90 void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
91 {
92 	(void)vm_locked;
93 }
94 
plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,struct ffa_value args,struct ffa_value * ret)95 bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
96 				     struct ffa_value args,
97 				     struct ffa_value *ret)
98 {
99 	(void)receiver_vm_id;
100 	(void)args;
101 	(void)ret;
102 	return false;
103 }
104 
plat_ffa_rx_release_forward(struct vm_locked vm_locked,struct ffa_value * ret)105 bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
106 				 struct ffa_value *ret)
107 {
108 	(void)vm_locked;
109 	(void)ret;
110 
111 	return false;
112 }
113 
plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,struct ffa_value * ret)114 bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
115 				  struct ffa_value *ret)
116 {
117 	(void)to_locked;
118 	(void)ret;
119 
120 	return false;
121 }
122 
plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,struct vm_locked receiver_locked)123 bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
124 					struct vm_locked receiver_locked)
125 {
126 	(void)sender_locked;
127 	(void)receiver_locked;
128 
129 	return false;
130 }
131 
plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id,ffa_id_t sender_vm_id,struct ffa_value * ret)132 bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
133 				struct ffa_value *ret)
134 {
135 	(void)receiver_vm_id;
136 	(void)sender_vm_id;
137 	(void)ret;
138 
139 	return false;
140 }
141 
plat_ffa_memory_handle_make(uint64_t index)142 ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index)
143 {
144 	return index;
145 }
146 
plat_ffa_memory_handle_allocated_by_current_world(ffa_memory_handle_t handle)147 bool plat_ffa_memory_handle_allocated_by_current_world(
148 	ffa_memory_handle_t handle)
149 {
150 	(void)handle;
151 	return false;
152 }
153 
plat_ffa_other_world_mode(void)154 uint32_t plat_ffa_other_world_mode(void)
155 {
156 	return 0U;
157 }
158 
plat_ffa_is_notifications_bind_valid(struct vcpu * current,ffa_id_t sender_id,ffa_id_t receiver_id)159 bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
160 					  ffa_id_t sender_id,
161 					  ffa_id_t receiver_id)
162 {
163 	(void)current;
164 	(void)sender_id;
165 	(void)receiver_id;
166 	return false;
167 }
168 
plat_ffa_notifications_update_bindings_forward(ffa_id_t receiver_id,ffa_id_t sender_id,uint32_t flags,ffa_notifications_bitmap_t bitmap,bool is_bind,struct ffa_value * ret)169 bool plat_ffa_notifications_update_bindings_forward(
170 	ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
171 	ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
172 {
173 	(void)ret;
174 	(void)receiver_id;
175 	(void)sender_id;
176 	(void)flags;
177 	(void)bitmap;
178 	(void)is_bind;
179 	(void)ret;
180 
181 	return false;
182 }
183 
plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)184 void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
185 {
186 	(void)vm_locked;
187 }
188 
plat_ffa_partition_properties(ffa_id_t vm_id,const struct vm * target)189 ffa_partition_properties_t plat_ffa_partition_properties(
190 	ffa_id_t vm_id, const struct vm *target)
191 {
192 	(void)vm_id;
193 	(void)target;
194 	return 0;
195 }
196 
plat_ffa_vm_managed_exit_supported(struct vm * vm)197 bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
198 {
199 	(void)vm;
200 	return false;
201 }
202 
203 /**
204  * Check validity of the calls:
205  * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
206  */
plat_ffa_is_notifications_bitmap_access_valid(struct vcpu * current,ffa_id_t vm_id)207 struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
208 	struct vcpu *current, ffa_id_t vm_id)
209 {
210 	/*
211 	 * Call should only be used by the Hypervisor, so any attempt of
212 	 * invocation from NWd FF-A endpoints should fail.
213 	 */
214 	(void)current;
215 	(void)vm_id;
216 
217 	return ffa_error(FFA_NOT_SUPPORTED);
218 }
219 
plat_ffa_is_notification_set_valid(struct vcpu * current,ffa_id_t sender_id,ffa_id_t receiver_id)220 bool plat_ffa_is_notification_set_valid(struct vcpu *current,
221 					ffa_id_t sender_id,
222 					ffa_id_t receiver_id)
223 {
224 	(void)current;
225 	(void)sender_id;
226 	(void)receiver_id;
227 	return false;
228 }
229 
plat_ffa_is_notification_get_valid(struct vcpu * current,ffa_id_t receiver_id,uint32_t flags)230 bool plat_ffa_is_notification_get_valid(struct vcpu *current,
231 					ffa_id_t receiver_id, uint32_t flags)
232 {
233 	(void)flags;
234 	(void)current;
235 	(void)receiver_id;
236 	return false;
237 }
238 
plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,ffa_vcpu_index_t vcpu_id,ffa_notifications_bitmap_t * from_sp,struct ffa_value * ret)239 bool plat_ffa_notifications_get_from_sp(
240 	struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
241 	ffa_notifications_bitmap_t *from_sp,  // NOLINT
242 	struct ffa_value *ret)		      // NOLINT
243 {
244 	(void)receiver_locked;
245 	(void)vcpu_id;
246 	(void)from_sp;
247 	(void)ret;
248 
249 	return false;
250 }
251 
plat_ffa_notifications_get_framework_notifications(struct vm_locked receiver_locked,ffa_notifications_bitmap_t * from_fwk,uint32_t flags,ffa_vcpu_index_t vcpu_id,struct ffa_value * ret)252 bool plat_ffa_notifications_get_framework_notifications(
253 	struct vm_locked receiver_locked,
254 	ffa_notifications_bitmap_t *from_fwk,  // NOLINT
255 	uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret)
256 {
257 	(void)receiver_locked;
258 	(void)from_fwk;
259 	(void)flags;
260 	(void)vcpu_id;
261 	(void)ret;
262 
263 	return false;
264 }
265 
plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,ffa_id_t receiver_vm_id,uint32_t flags,ffa_notifications_bitmap_t bitmap,struct ffa_value * ret)266 bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
267 				       ffa_id_t receiver_vm_id, uint32_t flags,
268 				       ffa_notifications_bitmap_t bitmap,
269 				       struct ffa_value *ret)
270 {
271 	(void)sender_vm_id;
272 	(void)receiver_vm_id;
273 	(void)flags;
274 	(void)bitmap;
275 	(void)ret;
276 
277 	return false;
278 }
279 
plat_ffa_notifications_bitmap_create(ffa_id_t vm_id,ffa_vcpu_count_t vcpu_count)280 struct ffa_value plat_ffa_notifications_bitmap_create(
281 	ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
282 {
283 	(void)vm_id;
284 	(void)vcpu_count;
285 
286 	return ffa_error(FFA_NOT_SUPPORTED);
287 }
288 
plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)289 struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
290 {
291 	(void)vm_id;
292 
293 	return ffa_error(FFA_NOT_SUPPORTED);
294 }
295 
plat_ffa_vm_find_locked(ffa_id_t vm_id)296 struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
297 {
298 	(void)vm_id;
299 	return (struct vm_locked){.vm = NULL};
300 }
301 
plat_ffa_vm_find_locked_create(ffa_id_t vm_id)302 struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
303 {
304 	(void)vm_id;
305 	return (struct vm_locked){.vm = NULL};
306 }
307 
plat_ffa_vm_notifications_info_get(uint16_t * ids,uint32_t * ids_count,uint32_t * lists_sizes,uint32_t * lists_count,const uint32_t ids_count_max)308 bool plat_ffa_vm_notifications_info_get(     // NOLINTNEXTLINE
309 	uint16_t *ids, uint32_t *ids_count,  // NOLINTNEXTLINE
310 	uint32_t *lists_sizes,		     // NOLINTNEXTLINE
311 	uint32_t *lists_count, const uint32_t ids_count_max)
312 {
313 	(void)ids;
314 	(void)ids_count;
315 	(void)lists_sizes;
316 	(void)lists_count;
317 	(void)ids_count_max;
318 
319 	return false;
320 }
321 
plat_ffa_is_mem_perm_get_valid(const struct vcpu * current)322 bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
323 {
324 	(void)current;
325 	return false;
326 }
327 
plat_ffa_is_mem_perm_set_valid(const struct vcpu * current)328 bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
329 {
330 	(void)current;
331 	return false;
332 }
333 
334 /**
335  * Check if current VM can resume target VM/SP using FFA_RUN ABI.
336  */
plat_ffa_run_checks(struct vcpu_locked current_locked,ffa_id_t target_vm_id,ffa_vcpu_index_t vcpu_idx,struct ffa_value * run_ret,struct vcpu ** next)337 bool plat_ffa_run_checks(struct vcpu_locked current_locked,
338 			 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
339 			 struct ffa_value *run_ret, struct vcpu **next)
340 {
341 	(void)current_locked;
342 	(void)target_vm_id;
343 	(void)run_ret;
344 	(void)next;
345 	(void)vcpu_idx;
346 	return true;
347 }
348 
plat_ffa_notification_info_get_forward(uint16_t * ids,uint32_t * ids_count,uint32_t * lists_sizes,uint32_t * lists_count,const uint32_t ids_count_max)349 void plat_ffa_notification_info_get_forward(  // NOLINTNEXTLINE
350 	uint16_t *ids, uint32_t *ids_count,   // NOLINTNEXTLINE
351 	uint32_t *lists_sizes, uint32_t *lists_count,
352 	const uint32_t ids_count_max)
353 {
354 	(void)ids;
355 	(void)ids_count;
356 	(void)lists_sizes;
357 	(void)lists_count;
358 	(void)ids_count_max;
359 }
360 
plat_ffa_sri_state_set(enum plat_ffa_sri_state state)361 void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
362 {
363 	(void)state;
364 }
365 
plat_ffa_sri_trigger_if_delayed(struct cpu * cpu)366 void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
367 {
368 	(void)cpu;
369 }
370 
plat_ffa_sri_trigger_not_delayed(struct cpu * cpu)371 void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
372 {
373 	(void)cpu;
374 }
375 
plat_ffa_inject_notification_pending_interrupt(struct vcpu_locked target_locked,struct vcpu_locked current_locked,struct vm_locked receiver_locked)376 bool plat_ffa_inject_notification_pending_interrupt(
377 	struct vcpu_locked target_locked, struct vcpu_locked current_locked,
378 	struct vm_locked receiver_locked)
379 {
380 	(void)target_locked;
381 	(void)current_locked;
382 	(void)receiver_locked;
383 
384 	return false;
385 }
386 
plat_ffa_partition_info_get_regs_forward(const struct ffa_uuid * uuid,const uint16_t start_index,const uint16_t tag,struct ffa_partition_info * partitions,uint16_t partitions_len,ffa_vm_count_t * ret_count)387 bool plat_ffa_partition_info_get_regs_forward(	// NOLINTNEXTLINE
388 	const struct ffa_uuid *uuid,
389 	const uint16_t start_index,  // NOLINTNEXTLINE
390 	const uint16_t tag,
391 	struct ffa_partition_info *partitions,	// NOLINTNEXTLINE
392 	uint16_t partitions_len, ffa_vm_count_t *ret_count)
393 {
394 	(void)uuid;
395 	(void)start_index;
396 	(void)tag;
397 	(void)partitions;
398 	(void)partitions_len;
399 	(void)ret_count;
400 	return true;
401 }
402 
plat_ffa_partition_info_get_forward(const struct ffa_uuid * uuid,const uint32_t flags,struct ffa_partition_info * partitions,ffa_vm_count_t * ret_count)403 void plat_ffa_partition_info_get_forward(  // NOLINTNEXTLINE
404 	const struct ffa_uuid *uuid,	   // NOLINTNEXTLINE
405 	const uint32_t flags,		   // NOLINTNEXTLINE
406 	struct ffa_partition_info *partitions, ffa_vm_count_t *ret_count)
407 {
408 	(void)uuid;
409 	(void)flags;
410 	(void)partitions;
411 	(void)ret_count;
412 }
413 
plat_ffa_is_secondary_ep_register_supported(void)414 bool plat_ffa_is_secondary_ep_register_supported(void)
415 {
416 	return false;
417 }
418 
plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,struct vcpu ** next)419 struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
420 					   struct vcpu **next)
421 {
422 	(void)current_locked;
423 	(void)next;
424 
425 	return (struct ffa_value){.func = FFA_INTERRUPT_32};
426 }
427 
plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,ffa_id_t vm_id,ffa_id_t receiver_vm_id,struct vcpu_locked receiver_locked,uint32_t func,enum vcpu_state * next_state)428 bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
429 					     ffa_id_t vm_id,
430 					     ffa_id_t receiver_vm_id,
431 					     struct vcpu_locked receiver_locked,
432 					     uint32_t func,  // NOLINTNEXTLINE
433 					     enum vcpu_state *next_state)
434 {
435 	/* Perform state transition checks only for Secure Partitions. */
436 	(void)current_locked;
437 	(void)vm_id;
438 	(void)receiver_vm_id;
439 	(void)receiver_locked;
440 	(void)func;
441 	(void)next_state;
442 
443 	return true;
444 }
445 
plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,struct vcpu_locked target_locked)446 void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
447 					 struct vcpu_locked target_locked)
448 {
449 	/* Scheduling mode not supported in the Hypervisor/VMs. */
450 	(void)current_locked;
451 	(void)target_locked;
452 }
453 
plat_ffa_wind_call_chain_ffa_direct_req(struct vcpu_locked current_locked,struct vcpu_locked receiver_vcpu_locked,ffa_id_t sender_vm_id)454 void plat_ffa_wind_call_chain_ffa_direct_req(
455 	struct vcpu_locked current_locked,
456 	struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
457 {
458 	/* Calls chains not supported in the Hypervisor/VMs. */
459 	(void)current_locked;
460 	(void)receiver_vcpu_locked;
461 	(void)sender_vm_id;
462 }
463 
plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu_locked current_locked,struct vcpu_locked next_locked)464 void plat_ffa_unwind_call_chain_ffa_direct_resp(
465 	struct vcpu_locked current_locked, struct vcpu_locked next_locked)
466 {
467 	/* Calls chains not supported in the Hypervisor/VMs. */
468 	(void)current_locked;
469 	(void)next_locked;
470 }
471 
plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)472 bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
473 {
474 	(void)vm_id;
475 	return false;
476 }
477 
plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,struct vcpu ** next,struct ffa_value to_ret,struct ffa_value * signal_interrupt)478 bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
479 					struct vcpu **next,
480 					struct ffa_value to_ret,
481 					struct ffa_value *signal_interrupt)
482 {
483 	/*
484 	 * Only applicable to SPMC as it signals virtual secure interrupt to
485 	 * S-EL0 partitions.
486 	 */
487 	(void)current_locked;
488 	(void)next;
489 	(void)to_ret;
490 	(void)signal_interrupt;
491 
492 	return false;
493 }
494 
plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,struct vm_locked vm_locked)495 void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
496 					struct vm_locked vm_locked)
497 {
498 	(void)current_locked;
499 	(void)vm_locked;
500 }
501 
plat_ffa_other_world_mem_send(struct vm * from,uint32_t share_func,struct ffa_memory_region ** memory_region,uint32_t length,uint32_t fragment_length,struct mpool * page_pool)502 struct ffa_value plat_ffa_other_world_mem_send(
503 	struct vm *from, uint32_t share_func,
504 	struct ffa_memory_region **memory_region, uint32_t length,
505 	uint32_t fragment_length, struct mpool *page_pool)
506 {
507 	(void)from;
508 	(void)memory_region;
509 	(void)length;
510 	(void)fragment_length;
511 	(void)page_pool;
512 	(void)share_func;
513 
514 	return (struct ffa_value){0};
515 }
516 
plat_ffa_other_world_mem_reclaim(struct vm * to,ffa_memory_handle_t handle,ffa_memory_region_flags_t flags,struct mpool * page_pool)517 struct ffa_value plat_ffa_other_world_mem_reclaim(
518 	struct vm *to, ffa_memory_handle_t handle,
519 	ffa_memory_region_flags_t flags, struct mpool *page_pool)
520 {
521 	(void)handle;
522 	(void)flags;
523 	(void)page_pool;
524 	(void)to;
525 
526 	return ffa_error(FFA_INVALID_PARAMETERS);
527 }
528 
plat_ffa_other_world_mem_send_continue(struct vm * from,void * fragment,uint32_t fragment_length,ffa_memory_handle_t handle,struct mpool * page_pool)529 struct ffa_value plat_ffa_other_world_mem_send_continue(
530 	struct vm *from, void *fragment, uint32_t fragment_length,
531 	ffa_memory_handle_t handle, struct mpool *page_pool)
532 {
533 	(void)from;
534 	(void)fragment;
535 	(void)fragment_length;
536 	(void)handle;
537 	(void)page_pool;
538 
539 	return ffa_error(FFA_INVALID_PARAMETERS);
540 }
541 
plat_ffa_msg_send(ffa_id_t sender_vm_id,ffa_id_t receiver_vm_id,uint32_t size,struct vcpu * current,struct vcpu ** next)542 struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
543 				   ffa_id_t receiver_vm_id, uint32_t size,
544 				   struct vcpu *current, struct vcpu **next)
545 {
546 	(void)sender_vm_id;
547 	(void)receiver_vm_id;
548 	(void)size;
549 	(void)current;
550 	(void)next;
551 
552 	return ffa_error(FFA_NOT_SUPPORTED);
553 }
554 
plat_ffa_yield_prepare(struct vcpu_locked current_locked,struct vcpu ** next,uint32_t timeout_low,uint32_t timeout_high)555 struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
556 					struct vcpu **next,
557 					uint32_t timeout_low,
558 					uint32_t timeout_high)
559 {
560 	(void)current_locked;
561 	(void)next;
562 	(void)timeout_low;
563 	(void)timeout_high;
564 
565 	return ffa_error(FFA_NOT_SUPPORTED);
566 }
567 
arch_vm_init_mm(struct vm * vm,struct mpool * ppool)568 bool arch_vm_init_mm(struct vm *vm, struct mpool *ppool)
569 {
570 	(void)vm;
571 	(void)ppool;
572 
573 	return true;
574 }
575 
arch_vm_iommu_init_mm(struct vm * vm,struct mpool * ppool)576 bool arch_vm_iommu_init_mm(struct vm *vm, struct mpool *ppool)
577 {
578 	(void)vm;
579 	(void)ppool;
580 
581 	return true;
582 }
583 
arch_vm_identity_prepare(struct vm_locked vm_locked,paddr_t begin,paddr_t end,uint32_t mode,struct mpool * ppool)584 bool arch_vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin,
585 			      paddr_t end, uint32_t mode, struct mpool *ppool)
586 {
587 	(void)vm_locked;
588 	(void)begin;
589 	(void)end;
590 	(void)mode;
591 	(void)ppool;
592 
593 	return true;
594 }
595 
arch_vm_identity_commit(struct vm_locked vm_locked,paddr_t begin,paddr_t end,uint32_t mode,struct mpool * ppool,ipaddr_t * ipa)596 void arch_vm_identity_commit(struct vm_locked vm_locked, paddr_t begin,
597 			     paddr_t end, uint32_t mode, struct mpool *ppool,
598 			     ipaddr_t *ipa)
599 {
600 	(void)vm_locked;
601 	(void)begin;
602 	(void)end;
603 	(void)mode;
604 	(void)ppool;
605 	(void)ipa;
606 }
607 
arch_vm_unmap(struct vm_locked vm_locked,paddr_t begin,paddr_t end,struct mpool * ppool)608 bool arch_vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
609 		   struct mpool *ppool)
610 {
611 	(void)vm_locked;
612 	(void)begin;
613 	(void)end;
614 	(void)ppool;
615 
616 	return true;
617 }
618 
arch_vm_ptable_defrag(struct vm_locked vm_locked,struct mpool * ppool)619 void arch_vm_ptable_defrag(struct vm_locked vm_locked, struct mpool *ppool)
620 {
621 	(void)vm_locked;
622 	(void)ppool;
623 }
624 
arch_vm_mem_get_mode(struct vm_locked vm_locked,ipaddr_t begin,ipaddr_t end,uint32_t * mode)625 bool arch_vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin,
626 			  ipaddr_t end, uint32_t *mode)	 // NOLINT
627 {
628 	(void)vm_locked;
629 	(void)begin;
630 	(void)end;
631 	(void)mode;
632 
633 	return true;
634 }
635 
plat_ffa_memory_security_mode(ffa_memory_attributes_t attributes,uint32_t mode)636 ffa_memory_attributes_t plat_ffa_memory_security_mode(
637 	ffa_memory_attributes_t attributes, uint32_t mode)
638 {
639 	(void)mode;
640 
641 	return attributes;
642 }
643 
plat_ffa_error_32(struct vcpu * current,struct vcpu ** next,enum ffa_error error_code)644 struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
645 				   enum ffa_error error_code)
646 {
647 	(void)current;
648 	(void)next;
649 	(void)error_code;
650 
651 	return ffa_error(FFA_NOT_SUPPORTED);
652 }
653 
plat_ffa_mailbox_waiter_get(ffa_id_t vm_id,const struct vcpu * current)654 int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current)
655 {
656 	(void)vm_id;
657 	(void)current;
658 
659 	return -1;
660 }
661 
plat_ffa_mailbox_writable_get(const struct vcpu * current)662 int64_t plat_ffa_mailbox_writable_get(const struct vcpu *current)
663 {
664 	(void)current;
665 
666 	return -1;
667 }
668 
plat_ffa_partition_info_get_regs_forward_allowed(void)669 bool plat_ffa_partition_info_get_regs_forward_allowed(void)
670 {
671 	return false;
672 }
673 
plat_ffa_free_vm_resources(struct vm_locked vm_locked)674 void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
675 {
676 	(void)vm_locked;
677 }
678 
arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked,paddr_t begin,paddr_t end,uint32_t mode,struct mpool * ppool,ipaddr_t * ipa,struct dma_device_properties * dma_prop)679 bool arch_vm_iommu_mm_identity_map(struct vm_locked vm_locked, paddr_t begin,
680 				   paddr_t end, uint32_t mode,
681 				   struct mpool *ppool, ipaddr_t *ipa,
682 				   struct dma_device_properties *dma_prop)
683 {
684 	(void)vm_locked;
685 	(void)begin;
686 	(void)end;
687 	(void)mode;
688 	(void)ppool;
689 	(void)ipa;
690 	(void)dma_prop;
691 
692 	return true;
693 }
694