1 /*
2  * Copyright 2018 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/cpu.h"
10 
11 #include "hf/arch/cache.h"
12 
13 #include "hf/api.h"
14 #include "hf/check.h"
15 #include "hf/dlog.h"
16 #include "hf/list.h"
17 #include "hf/types.h"
18 
19 #include "vmapi/hf/call.h"
20 
21 /**
22  * The stacks to be used by the CPUs.
23  *
24  * Defined in assembly for aarch64 in "src/arch/aarch64/stacks.S."
25  * Defined for host-based unit tests in "src/cpu_test.cc".
26  */
27 
28 extern char callstacks[MAX_CPUS][STACK_SIZE];
29 
30 /* NOLINTNEXTLINE(misc-redundant-expression) */
31 static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned.");
32 static_assert((PAGE_SIZE % STACK_ALIGN) == 0,
33 	      "Page alignment is too weak for the stack.");
34 
35 /**
36  * Internal buffer used to store FF-A messages from a VM Tx. Its usage prevents
37  * TOCTOU issues while Hafnium performs actions on information that would
38  * otherwise be re-writable by the VM.
39  *
40  * Each buffer is owned by a single CPU. Can be used when handling FF-A
41  * messages, from and to the SPMC. E.g. FF-A memory sharing, indirect messaging
42  * and partition info get.
43  */
alignas(PAGE_SIZE)44 alignas(PAGE_SIZE) static uint8_t cpu_message_buffer[MAX_CPUS][HF_MAILBOX_SIZE];
45 
46 uint8_t *cpu_get_buffer(struct cpu *c)
47 {
48 	size_t cpu_indx = cpu_index(c);
49 
50 	CHECK(cpu_indx < MAX_CPUS);
51 
52 	return cpu_message_buffer[cpu_indx];
53 }
54 
cpu_get_buffer_size(struct cpu * c)55 uint32_t cpu_get_buffer_size(struct cpu *c)
56 {
57 	size_t cpu_indx = cpu_index(c);
58 
59 	CHECK(cpu_indx < MAX_CPUS);
60 
61 	return sizeof(cpu_message_buffer[cpu_indx]);
62 }
63 
64 /* State of all supported CPUs. The stack of the first one is initialized. */
65 struct cpu cpus[MAX_CPUS] = {
66 	{
67 		.is_on = 1,
68 		.stack_bottom = &callstacks[0][STACK_SIZE],
69 	},
70 };
71 
72 uint32_t cpu_count = 1;
73 
cpu_module_init(const cpu_id_t * cpu_ids,size_t count)74 void cpu_module_init(const cpu_id_t *cpu_ids, size_t count)
75 {
76 	uint32_t i;
77 	uint32_t j;
78 	cpu_id_t boot_cpu_id = cpus[0].id;
79 	bool found_boot_cpu = false;
80 
81 	cpu_count = count;
82 
83 	/*
84 	 * Initialize CPUs with the IDs from the configuration passed in. The
85 	 * CPUs after the boot CPU are initialized in reverse order. The boot
86 	 * CPU is initialized when it is found or in place of the last CPU if it
87 	 * is not found.
88 	 */
89 	j = cpu_count;
90 	for (i = 0; i < cpu_count; ++i) {
91 		struct cpu *c;
92 		struct timer_pending_vcpu_list *timer_list;
93 		cpu_id_t id = cpu_ids[i];
94 
95 		if (found_boot_cpu || id != boot_cpu_id) {
96 			--j;
97 			c = &cpus[j];
98 			c->stack_bottom = &callstacks[j][STACK_SIZE];
99 		} else {
100 			found_boot_cpu = true;
101 			c = &cpus[0];
102 			CHECK(c->stack_bottom == &callstacks[0][STACK_SIZE]);
103 		}
104 
105 		sl_init(&c->lock);
106 		c->id = id;
107 
108 		timer_list = &c->pending_timer_vcpus_list;
109 
110 		/*
111 		 * Initialize the list of vCPUs with pending arch timer for
112 		 * each CPU. The root entry fields is configured such that
113 		 * its `prev` and `next` fields point to itself.
114 		 */
115 		list_init(&(timer_list->root_entry));
116 
117 		/*
118 		 * Initialize the list of vCPUs with pending IPIs for
119 		 * each CPU. The root entry fields is configured such that
120 		 * its `prev` and `next` fields point to itself.
121 		 */
122 		list_init(&c->pending_ipis);
123 	}
124 
125 	if (!found_boot_cpu) {
126 		/* Boot CPU was initialized but with wrong ID. */
127 		dlog_warning("Boot CPU's ID not found in config.\n");
128 		cpus[0].id = boot_cpu_id;
129 	}
130 
131 	/*
132 	 * Clean the cache for the cpus array such that secondary cores
133 	 * hitting the entry point can read the cpus array consistently
134 	 * with MMU off (hence data cache off).
135 	 */
136 	arch_cache_data_clean_range(va_from_ptr(cpus), sizeof(cpus));
137 
138 	arch_cache_data_clean_range(va_from_ptr(&cpu_count), sizeof(cpu_count));
139 }
140 
cpu_index(struct cpu * c)141 size_t cpu_index(struct cpu *c)
142 {
143 	return c - cpus;
144 }
145 
146 /*
147  * Return cpu with the given index.
148  */
cpu_find_index(size_t index)149 struct cpu *cpu_find_index(size_t index)
150 {
151 	return (index < MAX_CPUS) ? &cpus[index] : NULL;
152 }
153 
154 /**
155  * Turns CPU on and returns the previous state.
156  */
cpu_on(struct cpu * c)157 bool cpu_on(struct cpu *c)
158 {
159 	bool prev;
160 
161 	sl_lock(&c->lock);
162 	prev = c->is_on;
163 	c->is_on = true;
164 	sl_unlock(&c->lock);
165 
166 	return prev;
167 }
168 
169 /**
170  * Prepares the CPU for turning itself off.
171  */
cpu_off(struct cpu * c)172 void cpu_off(struct cpu *c)
173 {
174 	sl_lock(&c->lock);
175 	c->is_on = false;
176 	c->last_sp_initialized = false;
177 	sl_unlock(&c->lock);
178 }
179 
180 /**
181  * Searches for a CPU based on its ID.
182  */
cpu_find(cpu_id_t id)183 struct cpu *cpu_find(cpu_id_t id)
184 {
185 	size_t i;
186 
187 	for (i = 0; i < cpu_count; i++) {
188 		if (cpus[i].id == id) {
189 			return &cpus[i];
190 		}
191 	}
192 
193 	return NULL;
194 }
195