1 /*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/cpu.h"
10
11 #include <stdalign.h>
12
13 #include "hf/arch/cache.h"
14
15 #include "hf/api.h"
16 #include "hf/check.h"
17 #include "hf/dlog.h"
18
19 #include "vmapi/hf/call.h"
20
21 #include "system/sys/cdefs.h"
22
23 /**
24 * The stacks to be used by the CPUs.
25 *
26 * Align to page boundaries to ensure that cache lines are not shared between a
27 * CPU's stack and data that can be accessed from other CPUs. If this did
28 * happen, there may be coherency problems when the stack is being used before
29 * caching is enabled.
30 */
31 alignas(PAGE_SIZE) static char callstacks[MAX_CPUS][STACK_SIZE] __section(
32 .stacks);
33
34 /* NOLINTNEXTLINE(misc-redundant-expression) */
35 static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned.");
36 static_assert((PAGE_SIZE % STACK_ALIGN) == 0,
37 "Page alignment is too weak for the stack.");
38
39 /**
40 * Internal buffer used to store FF-A messages from a VM Tx. Its usage prevents
41 * TOCTOU issues while Hafnium performs actions on information that would
42 * otherwise be re-writable by the VM.
43 *
44 * Each buffer is owned by a single CPU. The buffer can only be used for
45 * ffa_msg_send. The information stored in the buffer is only valid during the
46 * ffa_msg_send request is performed.
47 */
alignas(PAGE_SIZE)48 alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][PAGE_SIZE];
49
50 uint8_t *cpu_get_buffer(struct cpu *c)
51 {
52 size_t cpu_indx = cpu_index(c);
53
54 CHECK(cpu_indx < MAX_CPUS);
55
56 return cpu_message_buffer[cpu_indx];
57 }
58
cpu_get_buffer_size(struct cpu * c)59 uint32_t cpu_get_buffer_size(struct cpu *c)
60 {
61 size_t cpu_indx = cpu_index(c);
62
63 CHECK(cpu_indx < MAX_CPUS);
64
65 return sizeof(cpu_message_buffer[cpu_indx]);
66 }
67
68 /* State of all supported CPUs. The stack of the first one is initialized. */
69 struct cpu cpus[MAX_CPUS] = {
70 {
71 .is_on = 1,
72 .stack_bottom = &callstacks[0][STACK_SIZE],
73 },
74 };
75
76 uint32_t cpu_count = 1;
77
cpu_module_init(const cpu_id_t * cpu_ids,size_t count)78 void cpu_module_init(const cpu_id_t *cpu_ids, size_t count)
79 {
80 uint32_t i;
81 uint32_t j;
82 cpu_id_t boot_cpu_id = cpus[0].id;
83 bool found_boot_cpu = false;
84
85 cpu_count = count;
86
87 /*
88 * Initialize CPUs with the IDs from the configuration passed in. The
89 * CPUs after the boot CPU are initialized in reverse order. The boot
90 * CPU is initialized when it is found or in place of the last CPU if it
91 * is not found.
92 */
93 j = cpu_count;
94 for (i = 0; i < cpu_count; ++i) {
95 struct cpu *c;
96 cpu_id_t id = cpu_ids[i];
97
98 if (found_boot_cpu || id != boot_cpu_id) {
99 --j;
100 c = &cpus[j];
101 c->stack_bottom = &callstacks[j][STACK_SIZE];
102 } else {
103 found_boot_cpu = true;
104 c = &cpus[0];
105 CHECK(c->stack_bottom == &callstacks[0][STACK_SIZE]);
106 }
107
108 sl_init(&c->lock);
109 c->id = id;
110 }
111
112 if (!found_boot_cpu) {
113 /* Boot CPU was initialized but with wrong ID. */
114 dlog_warning("Boot CPU's ID not found in config.\n");
115 cpus[0].id = boot_cpu_id;
116 }
117
118 /*
119 * Clean the cache for the cpus array such that secondary cores
120 * hitting the entry point can read the cpus array consistently
121 * with MMU off (hence data cache off).
122 */
123 arch_cache_data_clean_range(va_from_ptr(cpus), sizeof(cpus));
124
125 arch_cache_data_clean_range(va_from_ptr(&cpu_count), sizeof(cpu_count));
126 }
127
cpu_index(struct cpu * c)128 size_t cpu_index(struct cpu *c)
129 {
130 return c - cpus;
131 }
132
133 /*
134 * Return cpu with the given index.
135 */
cpu_find_index(size_t index)136 struct cpu *cpu_find_index(size_t index)
137 {
138 return (index < MAX_CPUS) ? &cpus[index] : NULL;
139 }
140
141 /**
142 * Turns CPU on and returns the previous state.
143 */
cpu_on(struct cpu * c,ipaddr_t entry,uintreg_t arg)144 bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg)
145 {
146 bool prev;
147
148 sl_lock(&c->lock);
149 prev = c->is_on;
150 c->is_on = true;
151 sl_unlock(&c->lock);
152
153 if (!prev) {
154 /* This returns the first booted VM (e.g. primary in the NWd) */
155 struct vm *vm = vm_get_first_boot();
156 struct vcpu *vcpu = vm_get_vcpu(vm, cpu_index(c));
157 struct vcpu_locked vcpu_locked;
158
159 vcpu_locked = vcpu_lock(vcpu);
160 vcpu_on(vcpu_locked, entry, arg);
161 vcpu_unlock(&vcpu_locked);
162 }
163
164 return prev;
165 }
166
167 /**
168 * Prepares the CPU for turning itself off.
169 */
cpu_off(struct cpu * c)170 void cpu_off(struct cpu *c)
171 {
172 sl_lock(&c->lock);
173 c->is_on = false;
174 sl_unlock(&c->lock);
175 }
176
177 /**
178 * Searches for a CPU based on its ID.
179 */
cpu_find(cpu_id_t id)180 struct cpu *cpu_find(cpu_id_t id)
181 {
182 size_t i;
183
184 for (i = 0; i < cpu_count; i++) {
185 if (cpus[i].id == id) {
186 return &cpus[i];
187 }
188 }
189
190 return NULL;
191 }
192