1 /*
2  * Copyright 2021 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/check.h"
10 #include "hf/cpu.h"
11 #include "hf/dlog.h"
12 #include "hf/interrupt_desc.h"
13 #include "hf/io.h"
14 #include "hf/panic.h"
15 #include "hf/plat/interrupts.h"
16 #include "hf/static_assert.h"
17 #include "hf/types.h"
18 
19 #include "gicv3_helpers.h"
20 #include "msr.h"
21 
22 #define GICD_SIZE (0x10000)
23 
24 /**
25  * In GICv3, each Redistributor has two 64KB frames:
26  * 1. RD_base
27  * 2. SGI_base
28  */
29 #define GICV3_REDIST_SIZE_PER_PE (0x20000) /* 128 KB */
30 
31 /**
32  * In GICv4, each Redistributor has two additional 64KB frames:
33  * 3. VLPI_base
34  * 4. Reserved
35  */
36 #define GICV4_REDIST_SIZE_PER_PE (0x40000) /* 256 KB */
37 
38 #if GIC_VERSION == 3
39 #define GIC_REDIST_SIZE_PER_PE GICV3_REDIST_SIZE_PER_PE
40 #elif GIC_VERSION == 4
41 #define GIC_REDIST_SIZE_PER_PE GICV4_REDIST_SIZE_PER_PE
42 #endif
43 
44 #define GIC_REDIST_FRAMES_OFFSET GIC_REDIST_SIZE_PER_PE
45 #define REDIST_LAST_FRAME_MASK (1 << 4)
46 
47 struct gicv3_driver {
48 	uintptr_t dist_base;
49 	uintptr_t base_redist_frame;
50 	uintptr_t all_redist_frames[MAX_CPUS];
51 	struct spinlock lock;
52 };
53 
54 static struct gicv3_driver plat_gicv3_driver;
55 
affinity_to_core_id(uint64_t reg)56 static uint32_t affinity_to_core_id(uint64_t reg)
57 {
58 	struct cpu *this_cpu;
59 	uint32_t core_id;
60 
61 	this_cpu = cpu_find(reg & MPIDR_AFFINITY_MASK);
62 
63 	if (this_cpu == NULL) {
64 		/*
65 		 * There might be holes in all redistributor frames (some CPUs
66 		 * don't exist). For these CPUs, return MAX_CPUS, so that the
67 		 * caller has a chance to recover.
68 		 */
69 		core_id = MAX_CPUS;
70 	} else {
71 		core_id = cpu_index(this_cpu);
72 	}
73 
74 	return core_id;
75 }
76 
77 /**
78  * This function checks the interrupt ID and returns true for SGIs and (E)PPIs
79  * and false for (E)SPIs IDs.
80  */
is_sgi_ppi(uint32_t id)81 static bool is_sgi_ppi(uint32_t id)
82 {
83 	/* SGIs: 0-15, PPIs: 16-31, EPPIs: 1056-1119. */
84 	if (IS_SGI_PPI(id)) {
85 		return true;
86 	}
87 
88 	/* SPIs: 32-1019, ESPIs: 4096-5119. */
89 	if (IS_SPI(id)) {
90 		return false;
91 	}
92 
93 	CHECK(false);
94 	return false;
95 }
96 
97 /**
98  * This function returns the id of the highest priority pending interrupt at
99  * the GIC cpu interface.
100  */
gicv3_get_pending_interrupt_id(void)101 uint32_t gicv3_get_pending_interrupt_id(void)
102 {
103 	return (uint32_t)read_msr(ICC_IAR1_EL1) & IAR1_EL1_INTID_MASK;
104 }
105 
106 /**
107  * This function returns the type of the interrupt id depending on the group
108  * this interrupt has been configured under by the interrupt controller i.e.
109  * group0 or group1 Secure / Non Secure. The return value can be one of the
110  * following :
111  *    INTR_GROUP0  : The interrupt type is a Secure Group 0 interrupt
112  *    INTR_GROUP1S : The interrupt type is a Secure Group 1 secure interrupt.
113  *    INTR_GROUP1NS: The interrupt type is a Secure Group 1 non secure
114  *                   interrupt.
115  */
gicv3_get_interrupt_type(uint32_t id,uint32_t proc_num)116 uint32_t gicv3_get_interrupt_type(uint32_t id, uint32_t proc_num)
117 {
118 	uint32_t igroup;
119 	uint32_t grpmodr;
120 	uintptr_t gicr_base;
121 
122 	/* Ensure the parameters are valid. */
123 	CHECK((id < PENDING_G1S_INTID) || (id >= MIN_LPI_ID));
124 	CHECK(proc_num < MAX_CPUS);
125 
126 	/* All LPI interrupts are Group 1 non secure. */
127 	if (id >= MIN_LPI_ID) {
128 		return INTR_GROUP1NS;
129 	}
130 
131 	/* Check interrupt ID. */
132 	if (is_sgi_ppi(id)) {
133 		/* SGIs: 0-15, PPIs: 16-31, EPPIs: 1056-1119. */
134 		gicr_base = plat_gicv3_driver.all_redist_frames[proc_num];
135 		igroup = gicr_get_igroupr(gicr_base, id);
136 		grpmodr = gicr_get_igrpmodr(gicr_base, id);
137 	} else {
138 		/* SPIs: 32-1019, ESPIs: 4096-5119. */
139 		igroup = gicd_get_igroupr(plat_gicv3_driver.dist_base, id);
140 		grpmodr = gicd_get_igrpmodr(plat_gicv3_driver.dist_base, id);
141 	}
142 
143 	/*
144 	 * If the IGROUP bit is set, then it is a Group 1 Non secure
145 	 * interrupt.
146 	 */
147 	if (igroup != 0U) {
148 		return INTR_GROUP1NS;
149 	}
150 
151 	/* If the GRPMOD bit is set, then it is a Group 1 Secure interrupt. */
152 	if (grpmodr != 0U) {
153 		return INTR_GROUP1S;
154 	}
155 
156 	CHECK(false);
157 
158 	/* Else it is a Group 0 Secure interrupt */
159 	return INTR_GROUP0;
160 }
161 
162 /**
163  * This function enables the interrupt identified by id. The proc_num
164  * is used if the interrupt is SGI or PPI, and programs the corresponding
165  * Redistributor interface.
166  */
gicv3_enable_interrupt(uint32_t id,uint32_t proc_num)167 void gicv3_enable_interrupt(uint32_t id, uint32_t proc_num)
168 {
169 	CHECK(plat_gicv3_driver.dist_base != 0U);
170 	CHECK(plat_gicv3_driver.base_redist_frame != 0U);
171 	CHECK(proc_num < MAX_CPUS);
172 
173 	/*
174 	 * Ensure that any shared variable updates depending on out of band
175 	 * interrupt trigger are observed before enabling interrupt.
176 	 */
177 	dsb(ish);
178 
179 	/* Check interrupt ID. */
180 	if (is_sgi_ppi(id)) {
181 		/* For SGIs: 0-15, PPIs: 16-31 and EPPIs: 1056-1119. */
182 		gicr_set_isenabler(
183 			plat_gicv3_driver.all_redist_frames[proc_num], id);
184 	} else {
185 		/* For SPIs: 32-1019 and ESPIs: 4096-5119. */
186 		gicd_set_isenabler(plat_gicv3_driver.dist_base, id);
187 	}
188 }
189 
190 /**
191  * This function disables the interrupt identified by id. The proc_num
192  * is used if the interrupt is SGI or PPI, and programs the corresponding
193  * Redistributor interface.
194  */
gicv3_disable_interrupt(uint32_t id,uint32_t proc_num)195 void gicv3_disable_interrupt(uint32_t id, uint32_t proc_num)
196 {
197 	CHECK(plat_gicv3_driver.dist_base != 0U);
198 	CHECK(plat_gicv3_driver.base_redist_frame != 0U);
199 	CHECK(proc_num < MAX_CPUS);
200 
201 	/*
202 	 * Disable interrupt, and ensure that any shared variable updates
203 	 * depending on out of band interrupt trigger are observed afterwards.
204 	 */
205 
206 	/* Check interrupt ID. */
207 	if (is_sgi_ppi(id)) {
208 		/* For SGIs: 0-15, PPIs: 16-31 and EPPIs: 1056-1119. */
209 		gicr_set_icenabler(
210 			plat_gicv3_driver.all_redist_frames[proc_num], id);
211 
212 		/* Write to clear enable requires waiting for pending writes. */
213 		gicr_wait_for_pending_write(
214 			plat_gicv3_driver.all_redist_frames[proc_num]);
215 	} else {
216 		/* For SPIs: 32-1019 and ESPIs: 4096-5119. */
217 		gicd_set_icenabler(plat_gicv3_driver.dist_base, id);
218 
219 		/* Write to clear enable requires waiting for pending writes. */
220 		gicd_wait_for_pending_write(plat_gicv3_driver.dist_base);
221 	}
222 
223 	dsb(ish);
224 }
225 
226 /**
227  * This function sets the interrupt priority as supplied for the given interrupt
228  * id.
229  */
gicv3_set_interrupt_priority(uint32_t id,uint32_t core_pos,uint32_t priority)230 void gicv3_set_interrupt_priority(uint32_t id, uint32_t core_pos,
231 				  uint32_t priority)
232 {
233 	uintptr_t gicr_base;
234 
235 	/* Core index cannot exceed maximum core count. */
236 	CHECK(core_pos < MAX_CPUS);
237 
238 	/* Check interrupt ID. */
239 	if (is_sgi_ppi(id)) {
240 		/* For SGIs: 0-15, PPIs: 16-31 and EPPIs: 1056-1119. */
241 		gicr_base = plat_gicv3_driver.all_redist_frames[core_pos];
242 		gicr_set_ipriorityr(gicr_base, id, priority);
243 	} else {
244 		/* For SPIs: 32-1019 and ESPIs: 4096-5119. */
245 		gicd_set_ipriorityr(plat_gicv3_driver.dist_base, id, priority);
246 	}
247 }
248 
249 /**
250  * This function assigns group for the interrupt identified by id. The proc_num
251  * is used if the interrupt is SGI or (E)PPI, and programs the corresponding
252  * Redistributor interface. The group can be any of GICV3_INTR_GROUP*.
253  */
gicv3_set_interrupt_type(uint32_t id,uint32_t proc_num,uint32_t type)254 void gicv3_set_interrupt_type(uint32_t id, uint32_t proc_num, uint32_t type)
255 {
256 	bool igroup = false;
257 	bool grpmod = false;
258 	uintptr_t gicr_base;
259 
260 	CHECK(plat_gicv3_driver.dist_base != 0U);
261 	CHECK(proc_num < MAX_CPUS);
262 
263 	switch (type) {
264 	case INTR_GROUP1S:
265 		igroup = false;
266 		grpmod = true;
267 		break;
268 	case INTR_GROUP1NS:
269 		igroup = true;
270 		grpmod = false;
271 		break;
272 	default:
273 		CHECK(false);
274 		break;
275 	}
276 
277 	/* Check interrupt ID. */
278 	if (is_sgi_ppi(id)) {
279 		/* For SGIs: 0-15, PPIs: 16-31 and EPPIs: 1056-1119. */
280 		gicr_base = plat_gicv3_driver.all_redist_frames[proc_num];
281 
282 		igroup ? gicr_set_igroupr(gicr_base, id)
283 		       : gicr_clr_igroupr(gicr_base, id);
284 		grpmod ? gicr_set_igrpmodr(gicr_base, id)
285 		       : gicr_clr_igrpmodr(gicr_base, id);
286 	} else {
287 		/* For SPIs: 32-1019 and ESPIs: 4096-5119. */
288 
289 		/* Serialize read-modify-write to Distributor registers. */
290 		sl_lock(&plat_gicv3_driver.lock);
291 
292 		igroup ? gicd_set_igroupr(plat_gicv3_driver.dist_base, id)
293 		       : gicd_clr_igroupr(plat_gicv3_driver.dist_base, id);
294 		grpmod ? gicd_set_igrpmodr(plat_gicv3_driver.dist_base, id)
295 		       : gicd_clr_igrpmodr(plat_gicv3_driver.dist_base, id);
296 
297 		sl_unlock(&plat_gicv3_driver.lock);
298 	}
299 }
300 
gicv3_end_of_interrupt(uint32_t id)301 void gicv3_end_of_interrupt(uint32_t id)
302 {
303 	/*
304 	 * Interrupt request deassertion from peripheral to GIC happens
305 	 * by clearing interrupt condition by a write to the peripheral
306 	 * register. It is desired that the write transfer is complete
307 	 * before the core tries to change GIC state from 'AP/Active' to
308 	 * a new state on seeing 'EOI write'.
309 	 * Since ICC interface writes are not ordered against Device
310 	 * memory writes, a barrier is required to ensure the ordering.
311 	 * The dsb will also ensure *completion* of previous writes with
312 	 * DEVICE nGnRnE attribute.
313 	 */
314 	dsb(ish);
315 	write_msr(ICC_EOIR1_EL1, id);
316 }
317 
read_gicr_typer_reg(uintptr_t gicr_frame_addr)318 uint64_t read_gicr_typer_reg(uintptr_t gicr_frame_addr)
319 {
320 	return io_read64(IO64_C(gicr_frame_addr + GICR_TYPER));
321 }
322 
read_gicd_typer_reg(uintptr_t base)323 uint64_t read_gicd_typer_reg(uintptr_t base)
324 {
325 	return io_read32(IO32_C(base + GICD_TYPER));
326 }
327 
328 /*
329  * This function calculates the core position from the affinity values
330  * provided by the GICR_TYPER register. This function may return MAX_CORES
331  * if typer_reg doesn't match a known core.
332  */
gicr_affinity_to_core_pos(uint64_t typer_reg)333 static inline uint32_t gicr_affinity_to_core_pos(uint64_t typer_reg)
334 {
335 	uint64_t aff3;
336 	uint64_t aff2;
337 	uint64_t aff1;
338 	uint64_t aff0;
339 	uint64_t reg;
340 
341 	aff3 = (typer_reg >> RDIST_AFF3_SHIFT) & (0xff);
342 	aff2 = (typer_reg >> RDIST_AFF2_SHIFT) & (0xff);
343 	aff1 = (typer_reg >> RDIST_AFF1_SHIFT) & (0xff);
344 	aff0 = (typer_reg >> RDIST_AFF0_SHIFT) & (0xff);
345 
346 	/* Construct mpidr based on above affinities. */
347 	reg = (aff3 << MPIDR_AFF3_SHIFT) | (aff2 << MPIDR_AFF2_SHIFT) |
348 	      (aff1 << MPIDR_AFF1_SHIFT) | (aff0 << MPIDR_AFF0_SHIFT);
349 
350 	return affinity_to_core_id(reg);
351 }
352 
populate_redist_base_addrs(void)353 static inline void populate_redist_base_addrs(void)
354 {
355 	uintptr_t current_rdist_frame;
356 	uint64_t typer_reg;
357 	uint32_t core_idx;
358 
359 	current_rdist_frame = plat_gicv3_driver.base_redist_frame;
360 
361 	while (true) {
362 		typer_reg = read_gicr_typer_reg(current_rdist_frame);
363 		core_idx = gicr_affinity_to_core_pos(typer_reg);
364 
365 		/*
366 		 * If the PE in redistributor does not exist, core_idx
367 		 * will be MAX_CPUS, then do not fill up frame entry
368 		 * and just move to next frame.
369 		 */
370 		if (core_idx < MAX_CPUS) {
371 			plat_gicv3_driver.all_redist_frames[core_idx] =
372 				current_rdist_frame;
373 		}
374 
375 		/* Check if this is the last frame. */
376 		if (typer_reg & REDIST_LAST_FRAME_MASK) {
377 			return;
378 		}
379 
380 		current_rdist_frame += GIC_REDIST_FRAMES_OFFSET;
381 	}
382 }
383 
find_core_pos(void)384 static uint32_t find_core_pos(void)
385 {
386 	uint64_t mpidr_reg;
387 	uint32_t core_id;
388 
389 	mpidr_reg = read_msr(MPIDR_EL1);
390 
391 	core_id = affinity_to_core_id(mpidr_reg);
392 	CHECK(core_id < MAX_CPUS);
393 	return core_id;
394 }
395 
396 /**
397  * Currently, TF-A has complete access to GIC driver and configures
398  * GIC Distributor, GIC Re-distributor and CPU interfaces as needed.
399  */
gicv3_distif_init(void)400 void gicv3_distif_init(void)
401 {
402 	/* TODO: Currently, we skip this. */
403 	return;
404 
405 	/* Enable G1S and G1NS interrupts. */
406 	gicd_write_ctlr(
407 		plat_gicv3_driver.dist_base,
408 		CTLR_ENABLE_G1NS_BIT | CTLR_ENABLE_G1S_BIT | CTLR_ARE_S_BIT);
409 }
410 
gicv3_rdistif_init(uint32_t core_pos)411 void gicv3_rdistif_init(uint32_t core_pos)
412 {
413 	/* TODO: Currently, we skip this. */
414 	(void)core_pos;
415 }
416 
gicv3_cpuif_enable(uint32_t core_pos)417 void gicv3_cpuif_enable(uint32_t core_pos)
418 {
419 	/* TODO: Currently, we skip this. */
420 	(void)core_pos;
421 }
422 
gicv3_send_sgi(uint32_t sgi_id,bool send_to_all,uint64_t mpidr_target,bool to_this_security_state)423 void gicv3_send_sgi(uint32_t sgi_id, bool send_to_all, uint64_t mpidr_target,
424 		    bool to_this_security_state)
425 {
426 	uint64_t sgir;
427 	uint64_t irm;
428 
429 	CHECK(is_sgi_ppi(sgi_id));
430 
431 	sgir = (sgi_id & SGIR_INTID_MASK) << SGIR_INTID_SHIFT;
432 
433 	/* Check the interrupt routing mode. */
434 	if (send_to_all) {
435 		irm = 1;
436 	} else {
437 		irm = 0;
438 
439 		/*
440 		 * Find the affinity path of the PE for which SGI will be
441 		 * generated.
442 		 */
443 
444 		uint64_t aff0;
445 		uint64_t aff1;
446 		uint64_t aff2;
447 		uint64_t aff3;
448 
449 		/*
450 		 * Target List is a one hot encoding representing which cores
451 		 * will be delivered the interrupt. At least one has to be
452 		 * enabled.
453 		 */
454 		aff3 = (mpidr_target >> MPIDR_AFF3_SHIFT) & (0xff);
455 		aff2 = (mpidr_target >> MPIDR_AFF2_SHIFT) & (0xff);
456 		aff1 = (mpidr_target >> MPIDR_AFF1_SHIFT) & (0xff);
457 		aff0 = (mpidr_target >> MPIDR_AFF0_SHIFT) & (0xff);
458 
459 		/* Populate the various affinity fields. */
460 		sgir |= ((aff3 & SGIR_AFF_MASK) << SGIR_AFF3_SHIFT) |
461 			((aff2 & SGIR_AFF_MASK) << SGIR_AFF2_SHIFT) |
462 			((aff1 & SGIR_AFF_MASK) << SGIR_AFF1_SHIFT);
463 
464 		/* Construct the SGI target affinity. */
465 		sgir |= ((1U << aff0) & SGIR_TGT_MASK) << SGIR_TGT_SHIFT;
466 	}
467 
468 	/* Populate the Interrupt Routing Mode field. */
469 	sgir |= (irm & SGIR_IRM_MASK) << SGIR_IRM_SHIFT;
470 
471 	if (to_this_security_state) {
472 		write_msr(ICC_SGI1R_EL1, sgir);
473 	} else {
474 		write_msr(ICC_ASGI1R_EL1, sgir);
475 	}
476 
477 	isb();
478 }
479 
480 #if GIC_EXT_INTID
481 /*******************************************************************************
482  * Helper function to get the maximum ESPI INTID + 1.
483  ******************************************************************************/
gicv3_get_espi_limit(uintptr_t gicd_base)484 unsigned int gicv3_get_espi_limit(uintptr_t gicd_base)
485 {
486 	unsigned int typer_reg = read_gicd_typer_reg(gicd_base);
487 
488 	/* Check if extended SPI range is implemented */
489 	if ((typer_reg & TYPER_ESPI) != 0U) {
490 		/*
491 		 * (maximum ESPI INTID + 1) is equal to
492 		 * 32 * (GICD_TYPER.ESPI_range + 1) + 4096
493 		 */
494 		return ((((typer_reg >> TYPER_ESPI_RANGE_SHIFT) &
495 			  TYPER_ESPI_RANGE_MASK) +
496 			 1U)
497 			<< 5) +
498 		       MIN_ESPI_ID;
499 	}
500 
501 	return 0U;
502 }
503 #endif /* GIC_EXT_INTID */
504 
gicv3_driver_init(struct mm_stage1_locked stage1_locked,struct mpool * ppool)505 bool gicv3_driver_init(struct mm_stage1_locked stage1_locked,
506 		       struct mpool *ppool)
507 {
508 	void *base_addr;
509 	uint32_t gic_version;
510 	uint32_t reg_pidr;
511 
512 	base_addr = mm_identity_map(stage1_locked, pa_init(GICD_BASE),
513 				    pa_init(GICD_BASE + GICD_SIZE),
514 				    MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool);
515 	if (base_addr == NULL) {
516 		dlog_error("Could not map GICv3 into Hafnium memory map\n");
517 		return false;
518 	}
519 
520 	plat_gicv3_driver.dist_base = (uintptr_t)base_addr;
521 
522 	base_addr = mm_identity_map(
523 		stage1_locked, pa_init(GICR_BASE),
524 		pa_init(GICR_BASE + GICR_FRAMES * GIC_REDIST_SIZE_PER_PE),
525 		MM_MODE_R | MM_MODE_W | MM_MODE_D, ppool);
526 
527 	if (base_addr == NULL) {
528 		dlog_error("Could not map GICv3 into Hafnium memory map\n");
529 		return false;
530 	}
531 
532 	plat_gicv3_driver.base_redist_frame = (uintptr_t)base_addr;
533 
534 	/* Check GIC version reported by the Peripheral register. */
535 	reg_pidr = gicd_read_pidr2(plat_gicv3_driver.dist_base);
536 	gic_version = (reg_pidr >> PIDR2_ARCH_REV_SHIFT) & PIDR2_ARCH_REV_MASK;
537 
538 #if GIC_VERSION == 3
539 	CHECK(gic_version == ARCH_REV_GICV3);
540 #elif GIC_VERSION == 4
541 	CHECK(gic_version == ARCH_REV_GICV4);
542 #endif
543 	populate_redist_base_addrs();
544 
545 #if GIC_EXT_INTID
546 	CHECK((read_gicd_typer_reg(plat_gicv3_driver.dist_base) & TYPER_ESPI) ==
547 	      TYPER_ESPI);
548 	CHECK(gicv3_get_espi_limit(plat_gicv3_driver.dist_base) != 0);
549 #endif
550 	return true;
551 }
552 
plat_interrupts_controller_driver_init(const struct fdt * fdt,struct mm_stage1_locked stage1_locked,struct mpool * ppool)553 bool plat_interrupts_controller_driver_init(
554 	const struct fdt *fdt, struct mm_stage1_locked stage1_locked,
555 	struct mpool *ppool)
556 {
557 	(void)fdt;
558 
559 	if (!gicv3_driver_init(stage1_locked, ppool)) {
560 		dlog_error("Failed to initialize GICv3 driver\n");
561 		return false;
562 	}
563 
564 	gicv3_distif_init();
565 	gicv3_rdistif_init(find_core_pos());
566 
567 	return true;
568 }
569 
plat_interrupts_controller_hw_init(struct cpu * c)570 void plat_interrupts_controller_hw_init(struct cpu *c)
571 {
572 	(void)c;
573 	gicv3_cpuif_enable(find_core_pos());
574 }
575 
plat_interrupts_set_priority_mask(uint8_t min_priority)576 void plat_interrupts_set_priority_mask(uint8_t min_priority)
577 {
578 	write_msr(ICC_PMR_EL1, min_priority);
579 }
580 
plat_interrupts_get_priority_mask(void)581 uint8_t plat_interrupts_get_priority_mask(void)
582 {
583 	return read_msr(ICC_PMR_EL1);
584 }
585 
plat_interrupts_set_priority(uint32_t id,uint32_t core_pos,uint32_t priority)586 void plat_interrupts_set_priority(uint32_t id, uint32_t core_pos,
587 				  uint32_t priority)
588 {
589 	gicv3_set_interrupt_priority(id, core_pos, priority);
590 }
591 
plat_interrupts_enable(uint32_t id,uint32_t core_pos)592 void plat_interrupts_enable(uint32_t id, uint32_t core_pos)
593 {
594 	gicv3_enable_interrupt(id, core_pos);
595 }
596 
plat_interrupts_disable(uint32_t id,uint32_t core_pos)597 void plat_interrupts_disable(uint32_t id, uint32_t core_pos)
598 {
599 	gicv3_disable_interrupt(id, core_pos);
600 }
601 
plat_interrupts_set_type(uint32_t id,uint32_t type)602 void plat_interrupts_set_type(uint32_t id, uint32_t type)
603 {
604 	gicv3_set_interrupt_type(id, find_core_pos(), type);
605 }
606 
plat_interrupts_get_type(uint32_t id)607 uint32_t plat_interrupts_get_type(uint32_t id)
608 {
609 	return gicv3_get_interrupt_type(id, find_core_pos());
610 }
611 
plat_interrupts_get_pending_interrupt_id(void)612 uint32_t plat_interrupts_get_pending_interrupt_id(void)
613 {
614 	return gicv3_get_pending_interrupt_id();
615 }
616 
plat_interrupts_end_of_interrupt(uint32_t id)617 void plat_interrupts_end_of_interrupt(uint32_t id)
618 {
619 	gicv3_end_of_interrupt(id);
620 }
621 
622 /**
623  * Configure Group, priority, edge/level of the interrupt and enable it.
624  */
plat_interrupts_configure_interrupt(struct interrupt_descriptor int_desc)625 void plat_interrupts_configure_interrupt(struct interrupt_descriptor int_desc)
626 {
627 	uint32_t core_idx = find_core_pos();
628 	uint32_t level_cfg = 0U;
629 	uint32_t intr_num = interrupt_desc_get_id(int_desc);
630 
631 	CHECK(core_idx < MAX_CPUS);
632 	CHECK(IS_SGI_PPI(intr_num) || IS_SPI(intr_num));
633 
634 	/* Configure the interrupt as either G1S or G1NS. */
635 	if (interrupt_desc_get_sec_state(int_desc) != 0) {
636 		gicv3_set_interrupt_type(intr_num, core_idx, INTR_GROUP1S);
637 	} else {
638 		gicv3_set_interrupt_type(intr_num, core_idx, INTR_GROUP1NS);
639 	}
640 
641 	/* Program the interrupt priority. */
642 	gicv3_set_interrupt_priority(intr_num, core_idx,
643 				     interrupt_desc_get_priority(int_desc));
644 
645 	if (interrupt_desc_get_config(int_desc) != 0) {
646 		level_cfg = 1U;
647 	}
648 
649 	/* Set interrupt configuration. */
650 	if (is_sgi_ppi(intr_num)) {
651 		/* GICR interface. */
652 		gicr_set_icfgr(plat_gicv3_driver.all_redist_frames[core_idx],
653 			       intr_num, level_cfg);
654 	} else {
655 		/* GICD interface. */
656 		gicd_set_icfgr(plat_gicv3_driver.dist_base, intr_num,
657 			       level_cfg);
658 	}
659 
660 	/* Target SPI to primary CPU using affinity routing. */
661 	if (IS_SPI(intr_num)) {
662 		uint64_t gic_affinity_val;
663 
664 		gic_affinity_val =
665 			gicd_irouter_val_from_mpidr(read_msr(MPIDR_EL1), 0U);
666 		gicd_write_irouter(plat_gicv3_driver.dist_base, intr_num,
667 				   gic_affinity_val);
668 	}
669 
670 	/* Enable the interrupt now. */
671 	gicv3_enable_interrupt(intr_num, core_idx);
672 }
673 
plat_interrupts_send_sgi(uint32_t id,struct cpu * cpu,bool to_this_security_state)674 void plat_interrupts_send_sgi(uint32_t id, struct cpu *cpu,
675 			      bool to_this_security_state)
676 {
677 	gicv3_send_sgi(id, false, cpu->id, to_this_security_state);
678 }
679