1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4  */
5 
6 #include <arch_features.h>
7 #include <assert.h>
8 #include <buffer.h>
9 #include <debug.h>
10 #include <feature.h>
11 #include <granule.h>
12 #include <measurement.h>
13 #include <realm.h>
14 #include <s2tt.h>
15 #include <simd.h>
16 #include <smc-handler.h>
17 #include <smc-rmi.h>
18 #include <smc.h>
19 #include <stddef.h>
20 #include <string.h>
21 #include <vmid.h>
22 
23 #define RMI_FEATURE_MIN_IPA_SIZE	PARANGE_WIDTH_32BITS
24 
smc_realm_activate(unsigned long rd_addr)25 unsigned long smc_realm_activate(unsigned long rd_addr)
26 {
27 	struct rd *rd;
28 	struct granule *g_rd;
29 	unsigned long ret;
30 
31 	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
32 	if (g_rd == NULL) {
33 		return RMI_ERROR_INPUT;
34 	}
35 
36 	rd = buffer_granule_map(g_rd, SLOT_RD);
37 	assert(rd != NULL);
38 
39 	if (get_rd_state_locked(rd) == REALM_NEW) {
40 		set_rd_state(rd, REALM_ACTIVE);
41 		ret = RMI_SUCCESS;
42 	} else {
43 		ret = RMI_ERROR_REALM;
44 	}
45 	buffer_unmap(rd);
46 
47 	granule_unlock(g_rd);
48 
49 	return ret;
50 }
51 
get_realm_params(struct rmi_realm_params * realm_params,unsigned long realm_params_addr)52 static bool get_realm_params(struct rmi_realm_params *realm_params,
53 				unsigned long realm_params_addr)
54 {
55 	bool ns_access_ok;
56 	struct granule *g_realm_params;
57 
58 	g_realm_params = find_granule(realm_params_addr);
59 	if ((g_realm_params == NULL) ||
60 		(granule_unlocked_state(g_realm_params) != GRANULE_STATE_NS)) {
61 		return false;
62 	}
63 
64 	ns_access_ok = ns_buffer_read(SLOT_NS, g_realm_params, 0U,
65 				      sizeof(*realm_params), realm_params);
66 
67 	return ns_access_ok;
68 }
69 
is_lpa2_requested(struct rmi_realm_params * p)70 static bool is_lpa2_requested(struct rmi_realm_params *p)
71 {
72 	return (EXTRACT(RMI_REALM_FLAGS0_LPA2, p->flags0) == RMI_FEATURE_TRUE);
73 }
74 
75 /*
76  * See the library pseudocode
77  * aarch64/translation/vmsa_faults/AArch64.S2InconsistentSL on which this is
78  * modeled.
79  */
s2_inconsistent_sl(unsigned int ipa_bits,int sl,bool lpa2)80 static bool s2_inconsistent_sl(unsigned int ipa_bits, int sl, bool lpa2)
81 {
82 	unsigned int levels = (unsigned int)(S2TT_PAGE_LEVEL - sl);
83 	unsigned int sl_min_ipa_bits, sl_max_ipa_bits;
84 
85 	sl_min_ipa_bits = (levels * S2TTE_STRIDE) + GRANULE_SHIFT + 1U;
86 
87 	/*
88 	 * The stride for level -1 is only four bits, and we cannot have
89 	 * concatenated tables at this level, so adjust sl_max_ipa_bits
90 	 * accordingly.
91 	 */
92 	if ((sl == S2TT_MIN_STARTING_LEVEL_LPA2) && (lpa2 == true)) {
93 		sl_max_ipa_bits = sl_min_ipa_bits + (S2TTE_STRIDE_LM1 - 1U);
94 	} else {
95 		sl_max_ipa_bits = sl_min_ipa_bits + (S2TTE_STRIDE - 1U);
96 	}
97 
98 	/*
99 	 * The maximum number of concatenated tables is 16,
100 	 * hence we are adding 4 to the 'sl_max_ipa_bits' for sl > 0 or
101 	 * sl == 0 when FEAT_LPA2 is enabled.
102 	 */
103 	if ((sl > 0) || ((sl == 0) && (lpa2 == true))) {
104 		sl_max_ipa_bits += 4U;
105 	}
106 
107 	return ((ipa_bits < sl_min_ipa_bits) || (ipa_bits > sl_max_ipa_bits));
108 }
109 
validate_ipa_bits_and_sl(unsigned int ipa_bits,long sl,bool lpa2)110 static bool validate_ipa_bits_and_sl(unsigned int ipa_bits, long sl, bool lpa2)
111 {
112 	long min_starting_level;
113 	unsigned int max_ipa_bits;
114 
115 	max_ipa_bits = (lpa2 == true) ?
116 				S2TT_MAX_IPA_BITS_LPA2 : S2TT_MAX_IPA_BITS;
117 
118 	/* cppcheck-suppress misra-c2012-10.6 */
119 	min_starting_level = (lpa2 == true) ?
120 				S2TT_MIN_STARTING_LEVEL_LPA2 : S2TT_MIN_STARTING_LEVEL;
121 
122 	if ((ipa_bits < S2TT_MIN_IPA_BITS) || (ipa_bits > max_ipa_bits)) {
123 		return false;
124 	}
125 
126 	if ((sl < min_starting_level) || (sl > S2TT_PAGE_LEVEL)) {
127 		return false;
128 	}
129 
130 	/*
131 	 * We assume ARMv8.4-TTST is supported with RME so the only SL
132 	 * configuration we need to check with 4K granules is SL == 0 following
133 	 * the library pseudocode aarch64/translation/vmsa_faults/AArch64.S2InvalidSL.
134 	 *
135 	 * Note that this only checks invalid SL values against the properties
136 	 * of the hardware platform, other misconfigurations between IPA size
137 	 * and SL is checked in s2_inconsistent_sl.
138 	 */
139 	if ((sl == 0L) && (arch_feat_get_pa_width() < 44U)) {
140 		return false;
141 	}
142 
143 	return !s2_inconsistent_sl(ipa_bits, (int)sl, lpa2);
144 }
145 
146 /*
147  * Calculate the number of s2 root translation tables needed given the
148  * starting level and the IPA size in bits. This function assumes that
149  * the 'sl' and 'ipa_bits' are consistent with each other and 'ipa_bits'
150  * is within architectural boundaries.
151  */
s2_num_root_rtts(unsigned int ipa_bits,int sl)152 static unsigned int s2_num_root_rtts(unsigned int ipa_bits, int sl)
153 {
154 	unsigned int levels = (unsigned int)(S2TT_PAGE_LEVEL - sl);
155 	unsigned int sl_ipa_bits;
156 
157 	/* First calculate how many bits can be resolved without concatenation */
158 	sl_ipa_bits = (levels * S2TTE_STRIDE) /* Bits resolved by table walk without SL */
159 		      + GRANULE_SHIFT	      /* Bits directly mapped to OA */
160 		      + S2TTE_STRIDE;	      /* Bits resolved by single SL */
161 
162 	/*
163 	 * If 'sl' were < 0, sl_ipa_bits would already be at least >= than
164 	 * 'ipa_bits' as the latter is assumed to be within boundary limits.
165 	 * This will make the check below pass and return 1U as the number of
166 	 * s2 root tables, which is the only valid value for a start level < 0.
167 	 */
168 	if ((sl_ipa_bits >= ipa_bits)) {
169 		return U(1);
170 	}
171 
172 	return (U(1) << (ipa_bits - sl_ipa_bits));
173 }
174 
175 /*
176  * Initialize the starting level of stage 2 translation tables.
177  *
178  * The protected half of the IPA space is initialized to
179  * unassigned_empty type of s2tte.
180  * The unprotected half of the IPA space is initialized to
181  * unassigned_ns type of s2tte.
182  * The remaining entries are not initialized.
183  */
init_s2_starting_level(struct rd * rd)184 static void init_s2_starting_level(struct rd *rd)
185 {
186 	unsigned long current_ipa = 0U;
187 	struct granule *g_rtt = rd->s2_ctx.g_rtt;
188 	unsigned int num_root_rtts;
189 	unsigned int s2ttes_per_s2tt = (unsigned int)(
190 		(rd->s2_ctx.s2_starting_level == S2TT_MIN_STARTING_LEVEL_LPA2) ?
191 			S2TTES_PER_S2TT_LM1 : S2TTES_PER_S2TT);
192 	unsigned int levels = (unsigned int)(S2TT_PAGE_LEVEL -
193 						rd->s2_ctx.s2_starting_level);
194 	/*
195 	 * The size of the IPA space that is covered by one S2TTE at
196 	 * the starting level.
197 	 */
198 	unsigned long sl_entry_map_size =
199 			(UL(1)) << U(U(levels * S2TTE_STRIDE) + U(GRANULE_SHIFT));
200 
201 	num_root_rtts = rd->s2_ctx.num_root_rtts;
202 	for (unsigned int rtt = 0U; rtt < num_root_rtts; rtt++) {
203 		unsigned long *s2tt = buffer_granule_map_zeroed(g_rtt, SLOT_RTT);
204 
205 		assert(s2tt != NULL);
206 
207 		for (unsigned int rtte = 0U; rtte < s2ttes_per_s2tt; rtte++) {
208 			if (addr_in_par(rd, current_ipa)) {
209 				s2tt[rtte] = s2tte_create_unassigned_empty(
210 								&(rd->s2_ctx));
211 			} else {
212 				s2tt[rtte] = s2tte_create_unassigned_ns(
213 								&(rd->s2_ctx));
214 			}
215 
216 			current_ipa += sl_entry_map_size;
217 			if (current_ipa == realm_ipa_size(rd)) {
218 				buffer_unmap(s2tt);
219 				return;
220 			}
221 
222 		}
223 		buffer_unmap(s2tt);
224 		g_rtt++;
225 	}
226 
227 	/*
228 	 * We have come to the end of starting level s2tts but we haven't
229 	 * reached the ipa size.
230 	 */
231 	assert(false);
232 }
233 
validate_realm_params(struct rmi_realm_params * p)234 static bool validate_realm_params(struct rmi_realm_params *p)
235 {
236 	unsigned long feat_reg0 = get_feature_register_0();
237 
238 	/* Validate LPA2 flag */
239 	if (is_lpa2_requested(p)  &&
240 	    (EXTRACT(RMI_FEATURE_REGISTER_0_LPA2, feat_reg0) ==
241 							RMI_FEATURE_FALSE)) {
242 		return false;
243 	}
244 
245 	/* Validate S2SZ field */
246 	if ((p->s2sz < RMI_FEATURE_MIN_IPA_SIZE) ||
247 	    (p->s2sz > EXTRACT(RMI_FEATURE_REGISTER_0_S2SZ, feat_reg0))) {
248 		return false;
249 	}
250 
251 	/*
252 	 * Validate number of breakpoints and watchpoins.
253 	 * The values 0 are reserved.
254 	 */
255 	if ((p->num_bps == 0U) || (p->num_bps >
256 		EXTRACT(RMI_FEATURE_REGISTER_0_NUM_BPS, feat_reg0)) ||
257 		(p->num_wps == 0U) || (p->num_wps >
258 		EXTRACT(RMI_FEATURE_REGISTER_0_NUM_WPS, feat_reg0))) {
259 		return false;
260 	}
261 
262 	/* Validate RMI_REALM_FLAGS_SVE flag */
263 	if (EXTRACT(RMI_REALM_FLAGS0_SVE, p->flags0) == RMI_FEATURE_TRUE) {
264 		if (EXTRACT(RMI_FEATURE_REGISTER_0_SVE_EN, feat_reg0) ==
265 						      RMI_FEATURE_FALSE) {
266 			return false;
267 		}
268 
269 		/* Validate SVE_VL value */
270 		if (p->sve_vl >
271 			EXTRACT(RMI_FEATURE_REGISTER_0_SVE_VL, feat_reg0)) {
272 			return false;
273 		}
274 	}
275 
276 	/*
277 	 * Skip validation of RMI_REALM_FLAGS0_PMU flag
278 	 * as RMM always assumes that PMUv3p7+ is present.
279 	 */
280 
281 	/* Validate number of PMU counters if PMUv3 is enabled */
282 	if (EXTRACT(RMI_REALM_FLAGS0_PMU, p->flags0) == RMI_FEATURE_TRUE) {
283 		if (p->pmu_num_ctrs >
284 		    EXTRACT(RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS, feat_reg0)) {
285 			return false;
286 		}
287 
288 		/*
289 		 * Check if number of PMU counters is 0 and
290 		 * FEAT_HMPN0 is implemented
291 		 */
292 		if ((p->pmu_num_ctrs == 0U) && !is_feat_hpmn0_present()) {
293 			return false;
294 		}
295 	}
296 
297 	if (!validate_ipa_bits_and_sl(p->s2sz, p->rtt_level_start,
298 						is_lpa2_requested(p))) {
299 		return false;
300 	}
301 
302 	if (s2_num_root_rtts(p->s2sz, (int)p->rtt_level_start) !=
303 						p->rtt_num_start) {
304 		return false;
305 	}
306 
307 	/*
308 	 * TODO: Check the VMSA configuration which is either static for the
309 	 * RMM or per realm with the supplied parameters and store the
310 	 * configuration on the RD, and it can potentially be copied into RECs
311 	 * later.
312 	 */
313 
314 	switch (p->algorithm) {
315 	case RMI_HASH_SHA_256:
316 	case RMI_HASH_SHA_512:
317 		break;
318 	default:
319 		return false;
320 	}
321 
322 	/* Check VMID collision and reserve it atomically if available */
323 	return vmid_reserve((unsigned int)p->vmid);
324 }
325 
free_sl_rtts(struct granule * g_rtt,unsigned int num_rtts)326 static void free_sl_rtts(struct granule *g_rtt, unsigned int num_rtts)
327 {
328 	for (unsigned int i = 0U; i < num_rtts; i++) {
329 		struct granule *g = (struct granule *)((uintptr_t)g_rtt +
330 						(i * sizeof(struct granule)));
331 
332 		granule_lock(g, GRANULE_STATE_RTT);
333 		granule_unlock_transition_to_delegated(g);
334 	}
335 }
336 
find_lock_rd_granules(unsigned long rd_addr,struct granule ** p_g_rd,unsigned long rtt_base_addr,unsigned int num_rtts,struct granule ** p_g_rtt_base)337 static bool find_lock_rd_granules(unsigned long rd_addr,
338 				  struct granule **p_g_rd,
339 				  unsigned long rtt_base_addr,
340 				  unsigned int num_rtts,
341 				  struct granule **p_g_rtt_base)
342 {
343 	struct granule *g_rd = NULL, *g_rtt_base = NULL;
344 	unsigned int i = 0U;
345 
346 	if (rd_addr < rtt_base_addr) {
347 		g_rd = find_lock_granule(rd_addr, GRANULE_STATE_DELEGATED);
348 		if (g_rd == NULL) {
349 			goto out_err;
350 		}
351 	}
352 
353 	for (; i < num_rtts; i++) {
354 		unsigned long rtt_addr = rtt_base_addr + (i * GRANULE_SIZE);
355 		struct granule *g_rtt;
356 
357 		g_rtt = find_lock_granule(rtt_addr, GRANULE_STATE_DELEGATED);
358 		if (g_rtt == NULL) {
359 			goto out_err;
360 		}
361 
362 		if (i == 0U) {
363 			g_rtt_base = g_rtt;
364 		}
365 	}
366 
367 	if (g_rd == NULL) {
368 		g_rd = find_lock_granule(rd_addr, GRANULE_STATE_DELEGATED);
369 		if (g_rd == NULL) {
370 			goto out_err;
371 		}
372 	}
373 
374 	*p_g_rd = g_rd;
375 	*p_g_rtt_base = g_rtt_base;
376 
377 	return true;
378 
379 out_err:
380 	while (i != 0U) {
381 		granule_unlock((struct granule *)((uintptr_t)g_rtt_base +
382 						(--i * sizeof(struct granule))));
383 	}
384 
385 	if (g_rd != NULL) {
386 		granule_unlock(g_rd);
387 	}
388 
389 	return false;
390 }
391 
smc_realm_create(unsigned long rd_addr,unsigned long realm_params_addr)392 unsigned long smc_realm_create(unsigned long rd_addr,
393 			       unsigned long realm_params_addr)
394 {
395 	struct granule *g_rd, *g_rtt_base;
396 	struct rd *rd;
397 	struct rmi_realm_params p;
398 
399 	if (!get_realm_params(&p, realm_params_addr)) {
400 		return RMI_ERROR_INPUT;
401 	}
402 
403 	/* coverity[uninit_use_in_call:SUPPRESS] */
404 	if (!validate_realm_params(&p)) {
405 		return RMI_ERROR_INPUT;
406 	}
407 
408 	/*
409 	 * At this point VMID is reserved for the Realm
410 	 *
411 	 * Check for aliasing between rd_addr and
412 	 * starting level RTT address(es)
413 	 */
414 	if (addr_is_contained(p.rtt_base,
415 			      p.rtt_base + (p.rtt_num_start * GRANULE_SIZE),
416 			      rd_addr)) {
417 
418 		/* Free reserved VMID before returning */
419 		vmid_free((unsigned int)p.vmid);
420 		return RMI_ERROR_INPUT;
421 	}
422 
423 	if (!find_lock_rd_granules(rd_addr, &g_rd, p.rtt_base,
424 				  p.rtt_num_start, &g_rtt_base)) {
425 		/* Free reserved VMID */
426 		vmid_free((unsigned int)p.vmid);
427 		return RMI_ERROR_INPUT;
428 	}
429 
430 	rd = buffer_granule_map_zeroed(g_rd, SLOT_RD);
431 	assert(rd != NULL);
432 
433 	set_rd_state(rd, REALM_NEW);
434 	set_rd_rec_count(rd, 0UL);
435 	rd->s2_ctx.g_rtt = find_granule(p.rtt_base);
436 	rd->s2_ctx.ipa_bits = p.s2sz;
437 	rd->s2_ctx.s2_starting_level = (int)p.rtt_level_start;
438 	rd->s2_ctx.num_root_rtts = p.rtt_num_start;
439 	rd->s2_ctx.enable_lpa2 = is_lpa2_requested(&p);
440 	(void)memcpy(&rd->rpv[0], &p.rpv[0], RPV_SIZE);
441 
442 	rd->s2_ctx.vmid = (unsigned int)p.vmid;
443 
444 	rd->num_rec_aux = MAX_REC_AUX_GRANULES;
445 
446 	rd->simd_cfg.sve_en = EXTRACT(RMI_REALM_FLAGS0_SVE, p.flags0) != 0UL;
447 	if (rd->simd_cfg.sve_en) {
448 		rd->simd_cfg.sve_vq = (uint32_t)p.sve_vl;
449 	}
450 
451 	if (p.algorithm == RMI_HASH_SHA_256) {
452 		rd->algorithm = HASH_SHA_256;
453 	} else {
454 		rd->algorithm = HASH_SHA_512;
455 	}
456 
457 	rd->pmu_enabled = EXTRACT(RMI_REALM_FLAGS0_PMU, p.flags0) != 0UL;
458 	rd->pmu_num_ctrs = p.pmu_num_ctrs;
459 
460 	init_s2_starting_level(rd);
461 
462 	measurement_realm_params_measure(rd->measurement[RIM_MEASUREMENT_SLOT],
463 					 rd->algorithm,
464 					 &p);
465 	buffer_unmap(rd);
466 
467 	granule_unlock_transition(g_rd, GRANULE_STATE_RD);
468 
469 	for (unsigned int i = 0U; i < p.rtt_num_start; i++) {
470 		granule_unlock_transition(
471 			(struct granule *)((uintptr_t)g_rtt_base +
472 			(i * sizeof(struct granule))), GRANULE_STATE_RTT);
473 	}
474 
475 	return RMI_SUCCESS;
476 }
477 
total_root_rtt_refcount(struct granule * g_rtt,unsigned int num_rtts)478 static unsigned long total_root_rtt_refcount(struct granule *g_rtt,
479 					     unsigned int num_rtts)
480 {
481 	unsigned long refcount = 0UL;
482 
483 	for (unsigned int i = 0U; i < num_rtts; i++) {
484 		struct granule *g = (struct granule *)((uintptr_t)g_rtt +
485 					(i * sizeof(struct granule)));
486 	       /*
487 		* Lock starting from the RTT root.
488 		* Enforcing locking order RD->RTT is enough to ensure
489 		* deadlock free locking guarentee.
490 		*/
491 		granule_lock(g, GRANULE_STATE_RTT);
492 		refcount += (unsigned long)granule_refcount_read(g);
493 		granule_unlock(g);
494 	}
495 
496 	return refcount;
497 }
498 
smc_realm_destroy(unsigned long rd_addr)499 unsigned long smc_realm_destroy(unsigned long rd_addr)
500 {
501 	struct granule *g_rd;
502 	struct granule *g_rtt;
503 	struct rd *rd;
504 	unsigned int num_rtts;
505 	int res;
506 
507 	/* RD should not be destroyed if refcount != 0. */
508 	res = find_lock_unused_granule(rd_addr, GRANULE_STATE_RD, &g_rd);
509 	if (res != 0) {
510 		switch (res) {
511 		case -EINVAL:
512 			return RMI_ERROR_INPUT;
513 		default:
514 			assert(res == -EBUSY);
515 			return RMI_ERROR_REALM;
516 		}
517 	}
518 
519 	rd = buffer_granule_map(g_rd, SLOT_RD);
520 	assert(rd != NULL);
521 
522 	g_rtt = rd->s2_ctx.g_rtt;
523 	num_rtts = rd->s2_ctx.num_root_rtts;
524 
525 	/* Check if granules are unused */
526 	if (total_root_rtt_refcount(g_rtt, num_rtts) != 0UL) {
527 		buffer_unmap(rd);
528 		granule_unlock(g_rd);
529 		return RMI_ERROR_REALM;
530 	}
531 
532 	/*
533 	 * All the mappings in the Realm have been removed and the TLB caches
534 	 * are invalidated. Therefore, there are no TLB entries tagged with
535 	 * this Realm's VMID (in this security state).
536 	 * Just release the VMID value so it can be used in another Realm.
537 	 */
538 	vmid_free(rd->s2_ctx.vmid);
539 
540 	free_sl_rtts(g_rtt, num_rtts);
541 
542 	buffer_unmap(rd);
543 
544 	/*
545 	 * The measurement data in rd will be destroyed eventually when
546 	 * the granule is reclaimed for another Realm or by NS Host.
547 	 */
548 	granule_unlock_transition_to_delegated(g_rd);
549 
550 	return RMI_SUCCESS;
551 }
552