1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7 #include <stdatomic.h>
8 #include <string.h>
9
10 #include <hypcontainers.h>
11 #include <hyprights.h>
12
13 #include <atomic.h>
14 #include <bitmap.h>
15 #include <compiler.h>
16 #include <cspace.h>
17 #include <list.h>
18 #include <object.h>
19 #include <panic.h>
20 #include <partition.h>
21 #include <prng.h>
22 #include <rcu.h>
23 #include <refcount.h>
24 #include <spinlock.h>
25 #include <thread.h>
26 #include <util.h>
27
28 #include "cspace_object.h"
29 #include "event_handlers.h"
30
31 #define CSPACE_MAX_CAP_COUNT_SUPPORTED \
32 (CAP_TABLE_NUM_CAP_SLOTS * CSPACE_NUM_CAP_TABLES)
33
34 static_assert(sizeof(cap_data_t) == 16U, "Cap data must be 16 bytes");
35 static_assert(atomic_is_lock_free((cap_data_t *)NULL),
36 "Cap data is not lock free");
37 static_assert(sizeof(cap_t) == 32U, "Cap must be 32 bytes");
38 static_assert(sizeof(cap_table_t) == CAP_TABLE_ALLOC_SIZE,
39 "Cap table not sized correctly");
40 static_assert(_Alignof(cap_table_t) == CAP_TABLE_ALLOC_SIZE,
41 "Cap table not aligned correctly");
42 static_assert(sizeof(cspace_t) == CSPACE_ALLOC_SIZE,
43 "Cspace not sized correctly");
44
45 cspace_t *
cspace_get_self(void)46 cspace_get_self(void)
47 {
48 return thread_get_self()->cspace_cspace;
49 }
50
51 static cap_table_t *
cspace_get_cap_table(const cap_t * cap)52 cspace_get_cap_table(const cap_t *cap)
53 {
54 return (cap_table_t *)util_balign_down((uintptr_t)cap,
55 sizeof(cap_table_t));
56 }
57
58 static index_t
cspace_get_cap_slot_index(const cap_table_t * table,const cap_t * cap)59 cspace_get_cap_slot_index(const cap_table_t *table, const cap_t *cap)
60 {
61 ptrdiff_t index = cap - &table->cap_slots[0];
62
63 assert((index >= 0U) && (index < (ptrdiff_t)CAP_TABLE_NUM_CAP_SLOTS));
64 assert(cap == &table->cap_slots[index]);
65
66 return (index_t)index;
67 }
68
69 // VM visible cap-IDs are randomized. The encode and decode operations turn an
70 // internally linear cspace index and apply a random base and index multiply.
71 // This ensures that for each cspace the cap-IDs are unique and randomized on
72 // each boot.
73 //
74 // Currently only a 16-bit random multiplier is used. A larger 64-bit
75 // multiplier would be better, however that will require 128-bit multiplies and
76 // a more complex algorithm to find the inverse.
77
78 static error_t
cspace_init_id_encoder(cspace_t * cspace)79 cspace_init_id_encoder(cspace_t *cspace)
80 {
81 error_t err;
82 #if !defined(DISABLE_CSPACE_RAND) || !DISABLE_CSPACE_RAND
83 uint64_result_t rand_base, rand_mult;
84
85 // Generate randomized ID space
86
87 // We need to preserve the Cap-Id space of 0xffffffff.xxxxxxxx
88 // for special capability values. Invalid cap is -1 for example.
89 // We pick a rand_base that won't allow such id ranges to be generated.
90 do {
91 rand_base = prng_get64();
92 err = rand_base.e;
93 if (err != OK) {
94 goto out;
95 }
96 } while ((rand_base.r >> 32) >= 0xffffff00U);
97
98 rand_mult = prng_get64();
99 err = rand_mult.e;
100 if (err == OK) {
101 cspace->id_rand_base = rand_base.r;
102 // Pick a non-zero random 16-bit number
103 while ((rand_mult.r & 0xffffU) == 0U) {
104 rand_mult.r = ((uint64_t)0x5555U << 48U) |
105 (rand_mult.r >> 16U);
106 }
107 // Calculate a 16-bit random multiplier and its inverse
108 cspace->id_mult = rand_mult.r & 0xffffU;
109 cspace->id_inv = (util_bit(32) / cspace->id_mult) + 1U;
110 }
111
112 out:
113 #else
114 cspace->id_rand_base = 0U;
115 cspace->id_mult = 1U;
116 cspace->id_inv = util_bit(32U) + 1U;
117 err = OK;
118 #endif
119 return err;
120 }
121
122 static cap_id_t
cspace_encode_cap_id(const cspace_t * cspace,cap_value_t val)123 cspace_encode_cap_id(const cspace_t *cspace, cap_value_t val)
124 {
125 uint64_t v = (uint64_t)cap_value_raw(val);
126
127 return (cap_id_t)((v * cspace->id_mult) ^ cspace->id_rand_base);
128 }
129
130 static cap_value_result_t
cspace_decode_cap_id(const cspace_t * cspace,cap_id_t id)131 cspace_decode_cap_id(const cspace_t *cspace, cap_id_t id)
132 {
133 cap_value_result_t ret;
134 uint64_t r = id ^ cspace->id_rand_base;
135 uint64_t v = (r * cspace->id_inv) >> 32U;
136
137 if (compiler_expected((r == (uint64_t)(uint32_t)r) &&
138 (v == (uint64_t)(uint16_t)v))) {
139 ret = cap_value_result_ok(cap_value_cast((uint16_t)v));
140 } else {
141 ret = cap_value_result_error(ERROR_ARGUMENT_INVALID);
142 }
143
144 return ret;
145 }
146
147 static error_t
cspace_cap_id_to_indices(const cspace_t * cspace,cap_id_t cap_id,index_t * upper,index_t * lower)148 cspace_cap_id_to_indices(const cspace_t *cspace, cap_id_t cap_id,
149 index_t *upper, index_t *lower)
150 {
151 error_t err;
152 cap_value_result_t ret = cspace_decode_cap_id(cspace, cap_id);
153
154 if (compiler_expected(ret.e == OK)) {
155 *lower = cap_value_get_lower_index(&ret.r);
156 *upper = cap_value_get_upper_index(&ret.r);
157 if (compiler_expected((*upper < CSPACE_NUM_CAP_TABLES) &&
158 (*lower < CAP_TABLE_NUM_CAP_SLOTS))) {
159 err = OK;
160 } else {
161 err = ERROR_ARGUMENT_INVALID;
162 }
163 } else {
164 err = ret.e;
165 }
166
167 return err;
168 }
169
170 static cap_id_t
cspace_indices_to_cap_id(const cspace_t * cspace,index_t upper,index_t lower)171 cspace_indices_to_cap_id(const cspace_t *cspace, index_t upper, index_t lower)
172 {
173 cap_value_t val = cap_value_default();
174
175 cap_value_set_lower_index(&val, lower);
176 cap_value_set_upper_index(&val, upper);
177
178 return cspace_encode_cap_id(cspace, val);
179 }
180
181 static error_t
cspace_check_cap_data(cap_data_t data,object_type_t type,cap_rights_t rights)182 cspace_check_cap_data(cap_data_t data, object_type_t type, cap_rights_t rights)
183 {
184 error_t err;
185 object_type_t obj_type = cap_info_get_type(&data.info);
186 cap_state_t state = cap_info_get_state(&data.info);
187 cap_rights_t masked_rights = data.rights & rights;
188
189 if (compiler_expected(state == CAP_STATE_VALID)) {
190 if (compiler_expected(obj_type == type) ||
191 (type == OBJECT_TYPE_ANY)) {
192 err = OK;
193 } else {
194 err = ERROR_CSPACE_WRONG_OBJECT_TYPE;
195 goto out;
196 }
197 } else if (state == CAP_STATE_NULL) {
198 err = ERROR_CSPACE_CAP_NULL;
199 goto out;
200 } else if (state == CAP_STATE_REVOKED) {
201 err = ERROR_CSPACE_CAP_REVOKED;
202 goto out;
203 } else {
204 panic("invalid cap state");
205 }
206
207 if (compiler_unexpected(masked_rights != rights)) {
208 err = ERROR_CSPACE_INSUFFICIENT_RIGHTS;
209 }
210 out:
211 return err;
212 }
213
214 // Update the cap data for the given cap. Will only succeed if cap hasn't been
215 // modified since it was last read. As such, this function can also be used to
216 // check if a cap is unchanged after a previous read.
217 static error_t
cspace_update_cap_slot(cap_t * cap,cap_data_t * expected_data,cap_data_t new_data)218 cspace_update_cap_slot(cap_t *cap, cap_data_t *expected_data,
219 cap_data_t new_data)
220 {
221 bool success = atomic_compare_exchange_strong_explicit(
222 &cap->data, expected_data, new_data, memory_order_relaxed,
223 memory_order_relaxed);
224
225 return success ? OK : ERROR_BUSY;
226 }
227
228 static error_t
cspace_lookup_cap_slot(const cspace_t * cspace,cap_id_t cap_id,cap_t ** cap)229 cspace_lookup_cap_slot(const cspace_t *cspace, cap_id_t cap_id, cap_t **cap)
230 {
231 error_t err;
232 index_t upper_index, lower_index;
233 cap_table_t *table;
234
235 err = cspace_cap_id_to_indices(cspace, cap_id, &upper_index,
236 &lower_index);
237 if (compiler_expected(err == OK)) {
238 table = atomic_load_consume(&cspace->tables[upper_index]);
239 if (compiler_expected(table != NULL)) {
240 *cap = &table->cap_slots[lower_index];
241 err = OK;
242 } else {
243 err = ERROR_CSPACE_CAP_NULL;
244 }
245 }
246
247 return err;
248 }
249
250 static error_t
cspace_allocate_cap_table(cspace_t * cspace,cap_table_t ** table,index_t * upper_index)251 cspace_allocate_cap_table(cspace_t *cspace, cap_table_t **table,
252 index_t *upper_index)
253 {
254 error_t err;
255 void_ptr_result_t ret;
256 index_t index;
257 cap_table_t *new_table;
258 partition_t *partition = cspace->header.partition;
259
260 do {
261 if (!bitmap_atomic_ffc(cspace->allocated_tables,
262 CSPACE_NUM_CAP_TABLES, &index)) {
263 err = ERROR_CSPACE_FULL;
264 goto allocate_cap_table_error;
265 }
266 // Loop until we successfully change bit state.
267 } while (bitmap_atomic_test_and_set(cspace->allocated_tables, index,
268 memory_order_relaxed));
269
270 ret = partition_alloc(partition, sizeof(cap_table_t),
271 alignof(cap_table_t));
272 if (ret.e != OK) {
273 (void)bitmap_atomic_test_and_clear(cspace->allocated_tables,
274 index, memory_order_relaxed);
275 err = ERROR_NOMEM;
276 goto allocate_cap_table_error;
277 }
278
279 new_table = (cap_table_t *)ret.r;
280 (void)memset_s(new_table, sizeof(*new_table), 0, sizeof(*new_table));
281
282 new_table->partition = object_get_partition_additional(partition);
283 new_table->cspace = cspace;
284 new_table->index = index;
285
286 *table = new_table;
287 *upper_index = index;
288 err = OK;
289
290 allocate_cap_table_error:
291 return err;
292 }
293
294 rcu_update_status_t
cspace_destroy_cap_table(rcu_entry_t * entry)295 cspace_destroy_cap_table(rcu_entry_t *entry)
296 {
297 index_t index;
298 cap_table_t *table = cap_table_container_of_rcu_entry(entry);
299 partition_t *partition = table->partition;
300 rcu_update_status_t ret = rcu_update_status_default();
301
302 // If called via cspace destroy, there may still
303 // be valid caps which also require destruction.
304 for (; table->cap_count > 0U; table->cap_count--) {
305 cap_t *cap;
306 cap_data_t data;
307 object_type_t type;
308 object_header_t *header;
309 bool cap_list_empty;
310
311 if (compiler_unexpected(!bitmap_atomic_ffs(
312 table->used_slots, CAP_TABLE_NUM_CAP_SLOTS,
313 &index))) {
314 panic("cap table has incorrect cap_count on delete");
315 }
316
317 cap = &table->cap_slots[index];
318 data = atomic_load_relaxed(&cap->data);
319
320 bitmap_atomic_clear(table->used_slots, index,
321 memory_order_relaxed);
322
323 if (cap_info_get_state(&data.info) != CAP_STATE_VALID) {
324 continue;
325 }
326
327 type = cap_info_get_type(&data.info);
328 header = object_get_header(type, data.object);
329 spinlock_acquire(&header->cap_list_lock);
330 (void)list_delete_node(&header->cap_list, &cap->cap_list_node);
331 cap_list_empty = list_is_empty(&header->cap_list);
332 spinlock_release(&header->cap_list_lock);
333
334 if (cap_list_empty) {
335 object_put(type, data.object);
336 }
337 }
338
339 (void)partition_free(partition, table, sizeof(cap_table_t));
340 object_put_partition(partition);
341
342 return ret;
343 }
344
345 static error_t
cspace_allocate_cap_slot(cspace_t * cspace,cap_t ** cap,cap_id_t * cap_id)346 cspace_allocate_cap_slot(cspace_t *cspace, cap_t **cap, cap_id_t *cap_id)
347 REQUIRE_RCU_READ
348 {
349 error_t err;
350 cap_table_t *table;
351 index_t upper_index, lower_index;
352
353 spinlock_acquire(&cspace->cap_allocation_lock);
354
355 if (cspace->cap_count == cspace->max_caps) {
356 spinlock_release(&cspace->cap_allocation_lock);
357 err = ERROR_CSPACE_FULL;
358 goto allocate_cap_slot_error;
359 }
360
361 if (bitmap_ffs(cspace->available_tables, CSPACE_NUM_CAP_TABLES,
362 &upper_index)) {
363 table = atomic_load_relaxed(&cspace->tables[upper_index]);
364 } else {
365 // Allocation may require preemption, so release the lock.
366 spinlock_release(&cspace->cap_allocation_lock);
367 rcu_read_finish();
368 err = cspace_allocate_cap_table(cspace, &table, &upper_index);
369 rcu_read_start();
370 if (err != OK) {
371 goto allocate_cap_slot_error;
372 }
373 // Re-acquire lock and attach table.
374 spinlock_acquire(&cspace->cap_allocation_lock);
375 // Store with release, as table initialisation
376 // must be ordered before table attachment.
377 atomic_store_release(&cspace->tables[upper_index], table);
378 bitmap_set(cspace->available_tables, upper_index);
379 }
380
381 table->cap_count++;
382 cspace->cap_count++;
383
384 if (table->cap_count == CAP_TABLE_NUM_CAP_SLOTS) {
385 bitmap_clear(cspace->available_tables, upper_index);
386 }
387
388 spinlock_release(&cspace->cap_allocation_lock);
389
390 do {
391 if (compiler_unexpected(!bitmap_atomic_ffc(
392 table->used_slots, CAP_TABLE_NUM_CAP_SLOTS,
393 &lower_index))) {
394 panic("cap table has incorrect cap_count on allocate");
395 }
396 // Loop until we successfully change bit state.
397 } while (bitmap_atomic_test_and_set(table->used_slots, lower_index,
398 memory_order_relaxed));
399
400 *cap = &table->cap_slots[lower_index];
401 *cap_id = cspace_indices_to_cap_id(cspace, upper_index, lower_index);
402 err = OK;
403
404 allocate_cap_slot_error:
405 return err;
406 }
407
408 // Assumes cap data is already set to null
409 static void
cspace_free_cap_slot(cspace_t * cspace,cap_t * cap)410 cspace_free_cap_slot(cspace_t *cspace, cap_t *cap)
411 {
412 cap_table_t *table;
413 index_t upper_index, lower_index;
414
415 table = cspace_get_cap_table(cap);
416 lower_index = cspace_get_cap_slot_index(table, cap);
417 upper_index = table->index;
418
419 (void)bitmap_atomic_test_and_clear(table->used_slots, lower_index,
420 memory_order_relaxed);
421
422 spinlock_acquire(&cspace->cap_allocation_lock);
423
424 if (table->cap_count == CAP_TABLE_NUM_CAP_SLOTS) {
425 bitmap_set(cspace->available_tables, upper_index);
426 }
427
428 table->cap_count--;
429 cspace->cap_count--;
430
431 if (table->cap_count == 0U) {
432 (void)bitmap_atomic_test_and_clear(cspace->allocated_tables,
433 upper_index,
434 memory_order_relaxed);
435 bitmap_clear(cspace->available_tables, upper_index);
436 atomic_store_relaxed(&cspace->tables[upper_index], NULL);
437 rcu_enqueue(&table->rcu_entry,
438 RCU_UPDATE_CLASS_CSPACE_RELEASE_LEVEL);
439 }
440
441 spinlock_release(&cspace->cap_allocation_lock);
442 }
443
444 object_ptr_result_t
cspace_lookup_object(cspace_t * cspace,cap_id_t cap_id,object_type_t type,cap_rights_t rights,bool active_only)445 cspace_lookup_object(cspace_t *cspace, cap_id_t cap_id, object_type_t type,
446 cap_rights_t rights, bool active_only)
447 {
448 error_t err;
449 cap_t *cap;
450 cap_data_t cap_data;
451 object_ptr_result_t ret;
452
453 assert(type != OBJECT_TYPE_ANY);
454
455 rcu_read_start();
456
457 err = cspace_lookup_cap_slot(cspace, cap_id, &cap);
458 if (compiler_unexpected(err != OK)) {
459 ret = object_ptr_result_error(err);
460 goto lookup_object_error;
461 }
462
463 cap_data = atomic_load_consume(&cap->data);
464 err = cspace_check_cap_data(cap_data, type, rights);
465 if (compiler_unexpected(err != OK)) {
466 ret = object_ptr_result_error(err);
467 goto lookup_object_error;
468 }
469 if (active_only) {
470 object_state_t obj_state = atomic_load_acquire(
471 &object_get_header(type, cap_data.object)->state);
472 if (compiler_unexpected(obj_state != OBJECT_STATE_ACTIVE)) {
473 ret = object_ptr_result_error(ERROR_OBJECT_STATE);
474 goto lookup_object_error;
475 }
476 }
477 if (compiler_unexpected(!object_get_safe(type, cap_data.object))) {
478 ret = object_ptr_result_error(ERROR_CSPACE_CAP_NULL);
479 goto lookup_object_error;
480 }
481 ret = object_ptr_result_ok(cap_data.object);
482
483 lookup_object_error:
484 rcu_read_finish();
485
486 return ret;
487 }
488
489 object_ptr_result_t
cspace_lookup_object_any(cspace_t * cspace,cap_id_t cap_id,cap_rights_generic_t rights,object_type_t * type)490 cspace_lookup_object_any(cspace_t *cspace, cap_id_t cap_id,
491 cap_rights_generic_t rights, object_type_t *type)
492 {
493 error_t err;
494 cap_t *cap;
495 cap_data_t cap_data;
496 object_ptr_result_t ret;
497 object_type_t obj_type = OBJECT_TYPE_ANY;
498
499 assert(type != NULL);
500 // Only valid generic object rights may be specified
501 assert((~cap_rights_generic_raw(CAP_RIGHTS_GENERIC_ALL) &
502 cap_rights_generic_raw(rights)) == 0U);
503
504 rcu_read_start();
505
506 err = cspace_lookup_cap_slot(cspace, cap_id, &cap);
507 if (compiler_unexpected(err != OK)) {
508 ret = object_ptr_result_error(err);
509 goto lookup_object_error;
510 }
511
512 cap_data = atomic_load_consume(&cap->data);
513 obj_type = cap_info_get_type(&cap_data.info);
514 err = cspace_check_cap_data(cap_data, OBJECT_TYPE_ANY,
515 cap_rights_generic_raw(rights));
516 if (compiler_unexpected(err != OK)) {
517 ret = object_ptr_result_error(err);
518 goto lookup_object_error;
519 }
520 if (compiler_unexpected(!object_get_safe(obj_type, cap_data.object))) {
521 ret = object_ptr_result_error(ERROR_CSPACE_CAP_NULL);
522 goto lookup_object_error;
523 }
524 ret = object_ptr_result_ok(cap_data.object);
525
526 lookup_object_error:
527 *type = obj_type;
528 rcu_read_finish();
529
530 return ret;
531 }
532
533 error_t
cspace_twolevel_handle_object_create_cspace(cspace_create_t cspace_create)534 cspace_twolevel_handle_object_create_cspace(cspace_create_t cspace_create)
535 {
536 error_t err;
537 cspace_t *cspace = cspace_create.cspace;
538
539 // The cspace has been zeroed on allocation,
540 // so just initialise non-zero fields.
541 spinlock_init(&cspace->cap_allocation_lock);
542 spinlock_init(&cspace->revoked_cap_list_lock);
543 list_init(&cspace->revoked_cap_list);
544 err = cspace_init_id_encoder(cspace);
545
546 return err;
547 }
548
549 error_t
cspace_configure(cspace_t * cspace,count_t max_caps)550 cspace_configure(cspace_t *cspace, count_t max_caps)
551 {
552 error_t err;
553
554 assert(atomic_load_relaxed(&cspace->header.state) == OBJECT_STATE_INIT);
555
556 if (max_caps <= CSPACE_MAX_CAP_COUNT_SUPPORTED) {
557 cspace->max_caps = max_caps;
558 err = OK;
559 } else {
560 err = ERROR_ARGUMENT_INVALID;
561 }
562
563 return err;
564 }
565
566 error_t
cspace_twolevel_handle_object_activate_cspace(cspace_t * cspace)567 cspace_twolevel_handle_object_activate_cspace(cspace_t *cspace)
568 {
569 if (cspace->max_caps != 0U) {
570 return OK;
571 } else {
572 return ERROR_OBJECT_CONFIG;
573 }
574 }
575
576 void
cspace_twolevel_handle_object_cleanup_cspace(cspace_t * cspace)577 cspace_twolevel_handle_object_cleanup_cspace(cspace_t *cspace)
578 {
579 // Ensure all lower levels destroyed
580 for (index_t i = 0U; i < CSPACE_NUM_CAP_TABLES; i++) {
581 cap_table_t *table = atomic_load_relaxed(&cspace->tables[i]);
582 if (table != NULL) {
583 (void)cspace_destroy_cap_table(&table->rcu_entry);
584 }
585 }
586 }
587
588 cap_id_result_t
cspace_create_master_cap(cspace_t * cspace,object_ptr_t object,object_type_t type)589 cspace_create_master_cap(cspace_t *cspace, object_ptr_t object,
590 object_type_t type)
591 {
592 error_t err;
593 cap_t *new_cap;
594 cap_data_t cap_data;
595 cap_id_t new_cap_id;
596 cap_id_result_t ret;
597
598 assert(type != OBJECT_TYPE_ANY);
599
600 // Objects are initialized with a refcount of 1 which is for the master
601 // cap reference here.
602 cap_data.object = object;
603 cap_data.rights = cspace_get_rights_all(type);
604
605 cap_info_init(&cap_data.info);
606 cap_info_set_master_cap(&cap_data.info, true);
607 cap_info_set_type(&cap_data.info, type);
608 cap_info_set_state(&cap_data.info, CAP_STATE_VALID);
609
610 rcu_read_start();
611
612 err = cspace_allocate_cap_slot(cspace, &new_cap, &new_cap_id);
613 if (err == OK) {
614 object_header_t *header = object_get_header(type, object);
615 // No need to hold cap list lock prior to cap being available
616 // in the cspace. Instead, store cap data with release to
617 // ensure object & cap list initialisation is ordered-before.
618 list_insert_at_head(&header->cap_list, &new_cap->cap_list_node);
619 atomic_store_release(&new_cap->data, cap_data);
620 ret = cap_id_result_ok(new_cap_id);
621 } else {
622 ret = cap_id_result_error(err);
623 }
624
625 rcu_read_finish();
626
627 return ret;
628 }
629
630 cap_id_result_t
cspace_copy_cap(cspace_t * target_cspace,cspace_t * parent_cspace,cap_id_t parent_id,cap_rights_t rights_mask)631 cspace_copy_cap(cspace_t *target_cspace, cspace_t *parent_cspace,
632 cap_id_t parent_id, cap_rights_t rights_mask)
633 {
634 error_t err;
635 cap_t *new_cap, *parent_cap;
636 cap_data_t cap_data;
637 cap_id_t new_cap_id;
638 object_header_t *header;
639 cap_id_result_t ret;
640
641 rcu_read_start();
642
643 // We try to allocate the cap slot first, as we may need to
644 // be preempted if allocating a cap table is required.
645 err = cspace_allocate_cap_slot(target_cspace, &new_cap, &new_cap_id);
646 if (err != OK) {
647 ret = cap_id_result_error(err);
648 goto copy_cap_error;
649 }
650
651 err = cspace_lookup_cap_slot(parent_cspace, parent_id, &parent_cap);
652 if (err != OK) {
653 cspace_free_cap_slot(target_cspace, new_cap);
654 ret = cap_id_result_error(err);
655 goto copy_cap_error;
656 }
657
658 cap_data = atomic_load_consume(&parent_cap->data);
659
660 err = cspace_check_cap_data(cap_data, OBJECT_TYPE_ANY, 0U);
661 if (err != OK) {
662 cspace_free_cap_slot(target_cspace, new_cap);
663 ret = cap_id_result_error(err);
664 goto copy_cap_error;
665 }
666 cap_rights_t masked_rights = cap_data.rights & rights_mask;
667 if (masked_rights == 0U) {
668 cspace_free_cap_slot(target_cspace, new_cap);
669 ret = cap_id_result_error(ERROR_CSPACE_INSUFFICIENT_RIGHTS);
670 goto copy_cap_error;
671 }
672
673 header = object_get_header(cap_info_get_type(&cap_data.info),
674 cap_data.object);
675 spinlock_acquire(&header->cap_list_lock);
676
677 // Reload the parent cap data for the new cap and
678 // ensure it has not changed.
679 err = cspace_update_cap_slot(parent_cap, &cap_data, cap_data);
680 if (err == OK) {
681 // Reuse parent cap data with updated rights.
682 cap_data.rights = masked_rights;
683 // Ensure this is not created as the master cap
684 cap_info_set_master_cap(&cap_data.info, false);
685 atomic_store_relaxed(&new_cap->data, cap_data);
686 list_insert_after_node(&header->cap_list,
687 &parent_cap->cap_list_node,
688 &new_cap->cap_list_node);
689 }
690
691 spinlock_release(&header->cap_list_lock);
692
693 if (err == OK) {
694 ret = cap_id_result_ok(new_cap_id);
695 } else {
696 cspace_free_cap_slot(target_cspace, new_cap);
697 ret = cap_id_result_error(err);
698 }
699
700 copy_cap_error:
701 rcu_read_finish();
702 return ret;
703 }
704
705 error_t
cspace_delete_cap(cspace_t * cspace,cap_id_t cap_id)706 cspace_delete_cap(cspace_t *cspace, cap_id_t cap_id)
707 {
708 error_t err;
709 cap_t *cap;
710 cap_data_t cap_data, null_cap_data = { 0 };
711 cap_state_t state;
712 object_type_t type;
713 object_ptr_t object;
714 bool cap_list_empty = false;
715
716 rcu_read_start();
717
718 err = cspace_lookup_cap_slot(cspace, cap_id, &cap);
719 if (err != OK) {
720 goto delete_cap_error;
721 }
722
723 cap_data = atomic_load_consume(&cap->data);
724 state = cap_info_get_state(&cap_data.info);
725 type = cap_info_get_type(&cap_data.info);
726 object = cap_data.object;
727
728 if (state == CAP_STATE_VALID) {
729 object_header_t *header = object_get_header(type, object);
730 spinlock_acquire(&header->cap_list_lock);
731
732 err = cspace_update_cap_slot(cap, &cap_data, null_cap_data);
733 if (err == OK) {
734 (void)list_delete_node(&header->cap_list,
735 &cap->cap_list_node);
736 cap_list_empty = list_is_empty(&header->cap_list);
737 }
738
739 spinlock_release(&header->cap_list_lock);
740 } else if (state == CAP_STATE_REVOKED) {
741 spinlock_acquire(&cspace->revoked_cap_list_lock);
742
743 err = cspace_update_cap_slot(cap, &cap_data, null_cap_data);
744 if (err == OK) {
745 (void)list_delete_node(&cspace->revoked_cap_list,
746 &cap->cap_list_node);
747 }
748
749 spinlock_release(&cspace->revoked_cap_list_lock);
750 } else {
751 err = ERROR_CSPACE_CAP_NULL;
752 }
753
754 if (err == OK) {
755 cspace_free_cap_slot(cspace, cap);
756 if (cap_list_empty) {
757 object_put(type, object);
758 }
759 }
760
761 delete_cap_error:
762 rcu_read_finish();
763 return err;
764 }
765
766 error_t
cspace_revoke_caps(cspace_t * cspace,cap_id_t master_cap_id)767 cspace_revoke_caps(cspace_t *cspace, cap_id_t master_cap_id)
768 {
769 error_t err;
770 cap_t *master_cap;
771 cap_data_t master_cap_data;
772 object_header_t *header;
773
774 rcu_read_start();
775
776 err = cspace_lookup_cap_slot(cspace, master_cap_id, &master_cap);
777 if (err != OK) {
778 goto revoke_caps_error;
779 }
780
781 master_cap_data = atomic_load_consume(&master_cap->data);
782 err = cspace_check_cap_data(master_cap_data, OBJECT_TYPE_ANY, 0U);
783 if (err != OK) {
784 goto revoke_caps_error;
785 }
786 if (!cap_info_get_master_cap(&master_cap_data.info)) {
787 err = ERROR_CSPACE_INSUFFICIENT_RIGHTS;
788 goto revoke_caps_error;
789 }
790
791 header = object_get_header(cap_info_get_type(&master_cap_data.info),
792 master_cap_data.object);
793 spinlock_acquire(&header->cap_list_lock);
794
795 // Perform a no-op update on the master cap. If this fails,
796 // the master cap data has changed.
797 err = cspace_update_cap_slot(master_cap, &master_cap_data,
798 master_cap_data);
799 if (err != OK) {
800 spinlock_release(&header->cap_list_lock);
801 goto revoke_caps_error;
802 }
803
804 // Child caps are always inserted after the parent, so the
805 // master cap will be at the head of the object cap list.
806 list_t *list = &header->cap_list;
807 assert(list_get_head(list) == &master_cap->cap_list_node);
808
809 // FIXME:
810 cap_t *curr_cap = NULL;
811
812 list_foreach_container_maydelete (curr_cap, list, cap, cap_list_node) {
813 if (curr_cap == master_cap) {
814 continue;
815 }
816
817 cap_data_t curr_cap_data = atomic_load_relaxed(&curr_cap->data);
818
819 cap_info_set_state(&curr_cap_data.info, CAP_STATE_REVOKED);
820
821 // Clear the object this cap points to, since the object could
822 // be deleted by deleting the last valid cap, revoked caps
823 // pointing to freed memory would make debugging confusing.
824 curr_cap_data.object = (object_ptr_t){ 0 };
825
826 // It is safe to get the child cap's cspace, as the child
827 // cap must be destroyed before the cspace can be, and
828 // this cannot happen while we have the cap list lock.
829 cspace_t *curr_cspace = cspace_get_cap_table(curr_cap)->cspace;
830 spinlock_acquire_nopreempt(&curr_cspace->revoked_cap_list_lock);
831
832 // Child cap data won't change while we hold the locks,
833 // so just atomically store the invalid data.
834 atomic_store_relaxed(&curr_cap->data, curr_cap_data);
835 (void)list_delete_node(&header->cap_list,
836 &curr_cap->cap_list_node);
837 list_insert_at_head(&curr_cspace->revoked_cap_list,
838 &curr_cap->cap_list_node);
839 spinlock_release_nopreempt(&curr_cspace->revoked_cap_list_lock);
840 }
841
842 spinlock_release(&header->cap_list_lock);
843
844 revoke_caps_error:
845 rcu_read_finish();
846 return err;
847 }
848
849 error_t
cspace_attach_thread(cspace_t * cspace,thread_t * thread)850 cspace_attach_thread(cspace_t *cspace, thread_t *thread)
851 {
852 assert(thread != NULL);
853 assert(cspace != NULL);
854 assert(atomic_load_relaxed(&cspace->header.state) ==
855 OBJECT_STATE_ACTIVE);
856 assert(atomic_load_relaxed(&thread->header.state) == OBJECT_STATE_INIT);
857
858 if (thread->cspace_cspace != NULL) {
859 object_put_cspace(thread->cspace_cspace);
860 }
861
862 thread->cspace_cspace = object_get_cspace_additional(cspace);
863
864 return OK;
865 }
866
867 void
cspace_twolevel_handle_object_deactivate_thread(thread_t * thread)868 cspace_twolevel_handle_object_deactivate_thread(thread_t *thread)
869 {
870 assert(thread != NULL);
871
872 cspace_t *cspace = thread->cspace_cspace;
873
874 if (cspace != NULL) {
875 object_put_cspace(thread->cspace_cspace);
876 thread->cspace_cspace = NULL;
877 }
878 }
879