1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4  */
5 
6 #include <arch.h>
7 #include <arch_features.h>
8 #include <buffer.h>
9 #include <debug.h>
10 #include <gic.h>
11 #include <granule.h>
12 #include <measurement.h>
13 #include <psci.h>
14 #include <realm.h>
15 #include <rec.h>
16 #include <s2tt.h>
17 #include <smc-handler.h>
18 #include <smc-rmi.h>
19 #include <smc.h>
20 #include <stdbool.h>
21 #include <stddef.h>
22 #include <string.h>
23 #include <xlat_high_va.h>
24 
init_rec_sysregs(struct rec * rec,unsigned long rec_mpidr)25 static void init_rec_sysregs(struct rec *rec, unsigned long rec_mpidr)
26 {
27 	/* Set non-zero values only */
28 	rec->sysregs.sctlr_el1 = SCTLR_EL1_FLAGS;
29 	rec->sysregs.mdscr_el1 = MDSCR_EL1_TDCC_BIT;
30 	rec->sysregs.vmpidr_el2 = rec_mpidr_to_mpidr(rec_mpidr) | VMPIDR_EL2_RES1;
31 	rec->sysregs.cnthctl_el2 = CNTHCTL_EL2_NO_TRAPS;
32 	rec->sysregs.cptr_el2 = CPTR_EL2_VHE_INIT;
33 }
34 
35 /*
36  * Starting level of the stage 2 translation
37  * lookup to VTCR_EL2.SL0[7:6].
38  */
39 static const unsigned long sl0_val[] = {
40 	VTCR_SL0_4K_LM1,
41 	VTCR_SL0_4K_L0,
42 	VTCR_SL0_4K_L1,
43 	VTCR_SL0_4K_L2,
44 	VTCR_SL0_4K_L3
45 };
46 
realm_vtcr_ps(unsigned int parange)47 static unsigned long realm_vtcr_ps(unsigned int parange)
48 {
49 	switch (parange) {
50 	case PARANGE_WIDTH_36BITS:
51 		return VTCR_PS_36;
52 	case PARANGE_WIDTH_40BITS:
53 		return VTCR_PS_40;
54 	case PARANGE_WIDTH_42BITS:
55 		return VTCR_PS_42;
56 	case PARANGE_WIDTH_44BITS:
57 		return VTCR_PS_44;
58 	case PARANGE_WIDTH_48BITS:
59 		return VTCR_PS_48;
60 	case PARANGE_WIDTH_52BITS:
61 		return VTCR_PS_52;
62 	case PARANGE_WIDTH_32BITS:
63 	default:
64 		return VTCR_PS_32;
65 	}
66 }
67 
realm_vtcr(struct rd * rd)68 static unsigned long realm_vtcr(struct rd *rd)
69 {
70 	unsigned long t0sz, sl0;
71 	unsigned long vtcr = is_feat_vmid16_present() ?
72 				(VTCR_FLAGS | VTCR_VS) : VTCR_FLAGS;
73 	unsigned int parange = arch_feat_get_pa_width();
74 	int s2_starting_level = realm_rtt_starting_level(rd);
75 	bool lpa2 = rd->s2_ctx.enable_lpa2;
76 
77 	assert(((!lpa2) && (s2_starting_level >= S2TT_MIN_STARTING_LEVEL)) ||
78 	       ((lpa2) && (s2_starting_level >= S2TT_MIN_STARTING_LEVEL_LPA2)));
79 	assert(s2_starting_level <= S2TT_PAGE_LEVEL);
80 
81 	/*
82 	 * sl_starting_level can be -1, so add an offset to compensate for that
83 	 * to index sl0_val.
84 	 */
85 	sl0 = sl0_val[s2_starting_level + 1];
86 
87 	t0sz = 64UL - realm_ipa_bits(rd);
88 	t0sz &= MASK(VTCR_T0SZ);
89 
90 	vtcr |= t0sz;
91 	vtcr |= sl0;
92 	vtcr |= realm_vtcr_ps(parange);
93 
94 	if (lpa2 == true) {
95 		if (s2_starting_level == -1) {
96 			vtcr |= VCTR_SL2_4K_LM1;
97 		}
98 		vtcr |= VTCR_DS_52BIT;
99 	}
100 
101 	return vtcr;
102 }
103 
init_common_sysregs(struct rec * rec,struct rd * rd)104 static void init_common_sysregs(struct rec *rec, struct rd *rd)
105 {
106 	unsigned long mdcr_el2_val = read_mdcr_el2();
107 	bool lpa2 = rd->s2_ctx.enable_lpa2;
108 
109 	/* Set non-zero values only */
110 	rec->common_sysregs.hcr_el2 = HCR_REALM_FLAGS;
111 	rec->common_sysregs.vtcr_el2 =  realm_vtcr(rd);
112 	rec->common_sysregs.vttbr_el2 = (granule_addr(rd->s2_ctx.g_rtt) &
113 					MASK(TTBRx_EL2_BADDR));
114 	if (lpa2 == true) {
115 		rec->common_sysregs.vttbr_el2 &= ~MASK(TTBRx_EL2_BADDR_MSB_LPA2);
116 		rec->common_sysregs.vttbr_el2 |=
117 			INPLACE(TTBRx_EL2_BADDR_MSB_LPA2,
118 						EXTRACT(EL2_BADDR_MSB_LPA2,
119 							granule_addr(rd->s2_ctx.g_rtt)));
120 	}
121 
122 	rec->common_sysregs.vttbr_el2 |= INPLACE(VTTBR_EL2_VMID, rd->s2_ctx.vmid);
123 
124 	/* Control trapping of accesses to PMU registers */
125 	if (rd->pmu_enabled) {
126 		mdcr_el2_val &= ~(MDCR_EL2_TPM_BIT | MDCR_EL2_TPMCR_BIT);
127 
128 		/*
129 		 * Set MDCR_EL2.HPMN to assign event counters into
130 		 * the first range
131 		 */
132 		mdcr_el2_val &= ~MASK(MDCR_EL2_HPMN);
133 		mdcr_el2_val |= INPLACE(MDCR_EL2_HPMN, rd->pmu_num_ctrs);
134 	} else {
135 		mdcr_el2_val |= (MDCR_EL2_TPM_BIT | MDCR_EL2_TPMCR_BIT);
136 	}
137 
138 	rec->common_sysregs.mdcr_el2 = mdcr_el2_val;
139 
140 }
141 
init_rec_regs(struct rec * rec,struct rmi_rec_params * rec_params,struct rd * rd)142 static void init_rec_regs(struct rec *rec,
143 			  struct rmi_rec_params *rec_params,
144 			  struct rd *rd)
145 {
146 	unsigned int i;
147 
148 	/*
149 	 * We only need to set non-zero values here because we're intializing
150 	 * data structures in the rec granule which was just converted from
151 	 * the DELEGATED state to REC state, and we can rely on the RMM
152 	 * invariant that DELEGATED granules are always zero-filled.
153 	 */
154 
155 	for (i = 0U; i < REC_CREATE_NR_GPRS; i++) {
156 		rec->regs[i] = rec_params->gprs[i];
157 	}
158 
159 	rec->pc = rec_params->pc;
160 	rec->pstate = SPSR_EL2_MODE_EL1h |
161 		      SPSR_EL2_nRW_AARCH64 |
162 		      SPSR_EL2_F_BIT |
163 		      SPSR_EL2_I_BIT |
164 		      SPSR_EL2_A_BIT |
165 		      SPSR_EL2_D_BIT;
166 
167 	init_rec_sysregs(rec, rec_params->mpidr);
168 	init_common_sysregs(rec, rd);
169 }
170 
171 /*
172  * This function will only be invoked when the REC create fails
173  * or when REC is being destroyed. Hence the REC will not be in
174  * use when this function is called and therefore no lock is
175  * acquired before its invocation.
176  */
free_rec_aux_granules(struct granule * rec_aux[],unsigned int cnt)177 static void free_rec_aux_granules(struct granule *rec_aux[], unsigned int cnt)
178 {
179 	for (unsigned int i = 0U; i < cnt; i++) {
180 		struct granule *g_rec_aux = rec_aux[i];
181 
182 		granule_lock(g_rec_aux, GRANULE_STATE_REC_AUX);
183 		granule_unlock_transition_to_delegated(g_rec_aux);
184 	}
185 }
186 
187 /* Initialize rec SIMD state */
rec_simd_state_init(struct rec * r)188 static void rec_simd_state_init(struct rec *r)
189 {
190 	int __unused retval;
191 
192 	retval = simd_context_init(SIMD_OWNER_REL1, r->aux_data.simd_ctx,
193 				   &r->realm_info.simd_cfg);
194 	assert(retval == 0);
195 }
196 
197 /* Initialize rec PMU state */
rec_pmu_state_init(struct rec * r)198 static void rec_pmu_state_init(struct rec *r)
199 {
200 	r->aux_data.pmu->pmcr_el0 = r->realm_info.pmu_enabled ?
201 					PMCR_EL0_INIT_RESET : PMCR_EL0_INIT;
202 }
203 
204 /*
205  * Initializes granule pages that are used for attestation heap, PMU and SIMD.
206  * As part of initialization this function maps and unmaps the rec aux granules.
207  */
rec_aux_granules_init(struct rec * r)208 static void rec_aux_granules_init(struct rec *r)
209 {
210 	void *rec_aux;
211 	struct rec_aux_data *aux_data;
212 	size_t i;
213 	int ret;
214 
215 	/* Map auxiliary granules */
216 	/* coverity[overrun-buffer-val:SUPPRESS] */
217 	rec_aux = buffer_rec_aux_granules_map_zeroed(r->g_aux, r->num_rec_aux);
218 	assert(rec_aux != NULL);
219 
220 	/*
221 	 * Ensure we have enough aux granules for use by REC:
222 	 * - REC_PMU_PAGES for PMU state
223 	 * - REC_SIMD_PAGES for SIMD state
224 	 * - REC_ATTEST_PAGES for 'rec_attest_data' structure
225 	 * - REC_ATTEST_BUFFER_PAGES for attestation buffer
226 	 */
227 	assert(r->num_rec_aux >= REC_NUM_PAGES);
228 
229 	/*
230 	 * Assign base address for attestation heap, PMU, SIMD, attestation
231 	 * data and buffer.
232 	 */
233 	aux_data = &r->aux_data;
234 	aux_data->pmu = (struct pmu_state *)rec_aux;
235 	aux_data->simd_ctx = (struct simd_context *)
236 		((uintptr_t)aux_data->pmu + REC_PMU_SIZE);
237 	aux_data->attest_data = (struct rec_attest_data *)
238 		((uintptr_t)aux_data->simd_ctx + REC_SIMD_SIZE);
239 
240 	size_t used_aux_pages =
241 		((uintptr_t)aux_data->attest_data + REC_ATTEST_SIZE -
242 			(uintptr_t)rec_aux) / GRANULE_SIZE;
243 
244 	assert(used_aux_pages < r->num_rec_aux);
245 
246 	rec_simd_state_init(r);
247 	rec_pmu_state_init(r);
248 
249 	/* Use the rest of the aux pages for the app */
250 	uintptr_t granule_pas[MAX_REC_AUX_GRANULES];
251 	size_t granule_pa_count = r->num_rec_aux - used_aux_pages;
252 
253 	for (i = 0; i < granule_pa_count; ++i) {
254 		granule_pas[i] = granule_addr(r->g_aux[used_aux_pages + i]);
255 	}
256 
257 	ret = attest_app_init(&r->attest_app_data,
258 		granule_pas,
259 		granule_pa_count,
260 		(void *)(SLOT_VIRT +
261 			(((unsigned long)SLOT_REC_AUX0 + used_aux_pages) * GRANULE_SIZE)));
262 	if (ret != 0) {
263 		panic();
264 	}
265 
266 	/* Unmap auxiliary granules */
267 	buffer_rec_aux_unmap(rec_aux, r->num_rec_aux);
268 }
269 
smc_rec_create(unsigned long rd_addr,unsigned long rec_addr,unsigned long rec_params_addr)270 unsigned long smc_rec_create(unsigned long rd_addr,
271 			     unsigned long rec_addr,
272 			     unsigned long rec_params_addr)
273 {
274 	struct granule *g_rd;
275 	struct granule *g_rec;
276 	struct granule *rec_aux_granules[MAX_REC_AUX_GRANULES];
277 	struct granule *g_rec_params;
278 	struct rec *rec;
279 	struct rd *rd;
280 	struct rmi_rec_params rec_params;
281 	unsigned long rec_idx;
282 	unsigned long ret;
283 	bool ns_access_ok;
284 	unsigned int num_rec_aux;
285 
286 	g_rec_params = find_granule(rec_params_addr);
287 	if ((g_rec_params == NULL) ||
288 		(granule_unlocked_state(g_rec_params) != GRANULE_STATE_NS)) {
289 		return RMI_ERROR_INPUT;
290 	}
291 
292 	ns_access_ok = ns_buffer_read(SLOT_NS, g_rec_params, 0U,
293 				      sizeof(rec_params), &rec_params);
294 
295 	if (!ns_access_ok) {
296 		return RMI_ERROR_INPUT;
297 	}
298 
299 	/* coverity[uninit_use:SUPPRESS] */
300 	num_rec_aux = (unsigned int)rec_params.num_aux;
301 	if (num_rec_aux > MAX_REC_AUX_GRANULES) {
302 		return RMI_ERROR_INPUT;
303 	}
304 
305 	/* Loop through rec_aux_granules and transit them */
306 	for (unsigned int i = 0U; i < num_rec_aux; i++) {
307 		struct granule *g_rec_aux = find_lock_granule(
308 						rec_params.aux[i],
309 						GRANULE_STATE_DELEGATED);
310 		if (g_rec_aux == NULL) {
311 			free_rec_aux_granules(rec_aux_granules, i);
312 			return RMI_ERROR_INPUT;
313 		}
314 
315 		granule_unlock_transition(g_rec_aux, GRANULE_STATE_REC_AUX);
316 		rec_aux_granules[i] = g_rec_aux;
317 	}
318 
319 	if (!find_lock_two_granules(rec_addr,
320 				GRANULE_STATE_DELEGATED,
321 				&g_rec,
322 				rd_addr,
323 				GRANULE_STATE_RD,
324 				&g_rd)) {
325 		ret = RMI_ERROR_INPUT;
326 		goto out_free_aux;
327 	}
328 
329 	/*
330 	 * Check if the maximum supported number of granules
331 	 * was already reached
332 	 */
333 	if (granule_refcount_read(g_rd) == REFCOUNT_MAX) {
334 		ret = RMI_ERROR_REALM;
335 		goto out_unlock;
336 	}
337 
338 	rd = buffer_granule_map(g_rd, SLOT_RD);
339 	assert(rd != NULL);
340 
341 	if (get_rd_state_locked(rd) != REALM_NEW) {
342 		ret = RMI_ERROR_REALM;
343 		goto out_unmap;
344 	}
345 
346 	rec_idx = get_rd_rec_count_locked(rd);
347 	if (!rec_mpidr_is_valid(rec_params.mpidr) ||
348 	   (rec_idx != rec_mpidr_to_idx(rec_params.mpidr))) {
349 		ret = RMI_ERROR_INPUT;
350 		goto out_unmap;
351 	}
352 
353 	/* Verify the auxiliary granule count with rd lock held */
354 	if (num_rec_aux != rd->num_rec_aux) {
355 		ret = RMI_ERROR_INPUT;
356 		goto out_unmap;
357 	}
358 
359 	rec = buffer_granule_map_zeroed(g_rec, SLOT_REC);
360 	assert(rec != NULL);
361 
362 	rec->g_rec = g_rec;
363 	rec->rec_idx = rec_idx;
364 	rec->num_rec_aux = num_rec_aux;
365 	rec->realm_info.s2_ctx = rd->s2_ctx;
366 	rec->realm_info.g_rd = g_rd;
367 	rec->realm_info.pmu_enabled = rd->pmu_enabled;
368 	rec->realm_info.pmu_num_ctrs = rd->pmu_num_ctrs;
369 	rec->realm_info.algorithm = rd->algorithm;
370 	rec->realm_info.simd_cfg = rd->simd_cfg;
371 
372 	init_rec_regs(rec, &rec_params, rd);
373 	gic_cpu_state_init(&rec->sysregs.gicstate);
374 
375 	/* Copy addresses of auxiliary granules */
376 	(void)memcpy((void *)rec->g_aux, (const void *)rec_aux_granules,
377 			num_rec_aux * sizeof(struct granule *));
378 
379 	rec->runnable = (rec_params.flags & REC_PARAMS_FLAG_RUNNABLE) != 0UL;
380 	if (rec->runnable) {
381 		measurement_rec_params_measure(rd->measurement[RIM_MEASUREMENT_SLOT],
382 					       rd->algorithm,
383 					       &rec_params);
384 	}
385 
386 	/*
387 	 * RD has a lock-free access from RMI_REC_DESTROY, hence increment
388 	 * refcount atomically.
389 	 */
390 	atomic_granule_get(g_rd);
391 
392 	/*
393 	 * Map REC aux granules, initialize aux data and unmap REC aux
394 	 * granules.
395 	 */
396 	rec_aux_granules_init(rec);
397 
398 	set_rd_rec_count(rd, rec_idx + 1U);
399 
400 	buffer_unmap(rec);
401 
402 	ret = RMI_SUCCESS;
403 
404 out_unmap:
405 	buffer_unmap(rd);
406 
407 out_unlock:
408 	granule_unlock(g_rd);
409 	if (ret == RMI_SUCCESS) {
410 		granule_unlock_transition(g_rec, GRANULE_STATE_REC);
411 	} else {
412 		granule_unlock(g_rec);
413 	}
414 
415 out_free_aux:
416 	if (ret != RMI_SUCCESS) {
417 		free_rec_aux_granules(rec_aux_granules, num_rec_aux);
418 	}
419 	return ret;
420 }
421 
smc_rec_destroy(unsigned long rec_addr)422 unsigned long smc_rec_destroy(unsigned long rec_addr)
423 {
424 	struct granule *g_rec;
425 	struct granule *g_rd;
426 	struct rec *rec;
427 	int res;
428 
429 	/* REC should not be destroyed if refcount != 0 */
430 	res = find_lock_unused_granule(rec_addr, GRANULE_STATE_REC, &g_rec);
431 	if (res != 0) {
432 		switch (res) {
433 		case -EINVAL:
434 			return RMI_ERROR_INPUT;
435 		default:
436 			assert(res == -EBUSY);
437 			return RMI_ERROR_REC;
438 		}
439 	}
440 
441 	rec = buffer_granule_map(g_rec, SLOT_REC);
442 	assert(rec != NULL);
443 
444 	g_rd = rec->realm_info.g_rd;
445 
446 	/* Free and scrub the auxiliary granules */
447 	free_rec_aux_granules(rec->g_aux, rec->num_rec_aux);
448 	buffer_unmap(rec);
449 
450 	granule_unlock_transition_to_delegated(g_rec);
451 
452 	/*
453 	 * Decrement refcount. The refcount should be balanced before
454 	 * RMI_REC_DESTROY returns, and until this occurs a transient
455 	 * over-estimate of the refcount (in-between the unlock and decreasing
456 	 * the refcount) is legitimate.
457 	 * We use release semantic here to match acquire semantic for refcount
458 	 * in RMI_REALM_DESTROY.
459 	 */
460 	atomic_granule_put_release(g_rd);
461 
462 	return RMI_SUCCESS;
463 }
464 
smc_rec_aux_count(unsigned long rd_addr,struct smc_result * res)465 void smc_rec_aux_count(unsigned long rd_addr, struct smc_result *res)
466 {
467 	unsigned int num_rec_aux;
468 	struct granule *g_rd;
469 	struct rd *rd;
470 
471 	g_rd = find_lock_granule(rd_addr, GRANULE_STATE_RD);
472 	if (g_rd == NULL) {
473 		res->x[0] = RMI_ERROR_INPUT;
474 		return;
475 	}
476 
477 	rd = buffer_granule_map(g_rd, SLOT_RD);
478 	assert(rd != NULL);
479 
480 	num_rec_aux = rd->num_rec_aux;
481 	buffer_unmap(rd);
482 	granule_unlock(g_rd);
483 
484 	res->x[0] = RMI_SUCCESS;
485 	res->x[1] = (unsigned long)num_rec_aux;
486 }
487 
smc_psci_complete(unsigned long calling_rec_addr,unsigned long target_rec_addr,unsigned long status)488 unsigned long smc_psci_complete(unsigned long calling_rec_addr,
489 				unsigned long target_rec_addr,
490 				unsigned long status)
491 {
492 	struct granule *g_calling_rec, *g_target_rec;
493 	struct rec  *calling_rec, *target_rec;
494 	unsigned long ret;
495 
496 	if (!GRANULE_ALIGNED(calling_rec_addr)) {
497 		return RMI_ERROR_INPUT;
498 	}
499 
500 	if (!GRANULE_ALIGNED(target_rec_addr)) {
501 		return RMI_ERROR_INPUT;
502 	}
503 
504 	if (!find_lock_two_granules(calling_rec_addr,
505 					GRANULE_STATE_REC,
506 					&g_calling_rec,
507 					target_rec_addr,
508 					GRANULE_STATE_REC,
509 					&g_target_rec)) {
510 		return RMI_ERROR_INPUT;
511 	}
512 
513 	/*
514 	 * The access to a REC from RMI_REC_ENTER is only protected by the
515 	 * reference counter. Here, we may access the volatile (non constant)
516 	 * members of REC structure (such as rec->running) only if the counter
517 	 * is zero.
518 	 */
519 	if (granule_refcount_read_acquire(g_calling_rec) != 0U) {
520 		/*
521 		 * The `calling` REC is running on another PE and therefore it
522 		 * may not have a pending PSCI request.
523 		 */
524 		ret = RMI_ERROR_INPUT;
525 		goto out_unlock;
526 	}
527 
528 	calling_rec = buffer_granule_map(g_calling_rec, SLOT_REC);
529 	assert(calling_rec != NULL);
530 
531 	target_rec = buffer_granule_map(g_target_rec, SLOT_REC2);
532 	assert(target_rec != NULL);
533 
534 	ret = psci_complete_request(calling_rec, target_rec, status);
535 
536 	buffer_unmap(target_rec);
537 	buffer_unmap(calling_rec);
538 out_unlock:
539 	granule_unlock(g_calling_rec);
540 	granule_unlock(g_target_rec);
541 
542 	return ret;
543 }
544