1 /*
2  * Copyright 2021 BayLibre, SAS
3  * Copyright 2025 NXP
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/logging/log.h>
9 LOG_MODULE_REGISTER(intc_gicv3_its, LOG_LEVEL_ERR);
10 
11 #include <zephyr/cache.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/device.h>
14 #include <zephyr/drivers/interrupt_controller/gicv3_its.h>
15 #include <zephyr/sys/barrier.h>
16 
17 #include "intc_gic_common_priv.h"
18 #include "intc_gicv3_priv.h"
19 
20 #define DT_DRV_COMPAT   arm_gic_v3_its
21 
22 /*
23  * Current ITS implementation only handle GICv3 ITS physical interruption generation
24  * Implementation is designed for the PCIe MSI/MSI-X use-case in mind.
25  */
26 
27 #define GITS_BASER_NR_REGS              8
28 
29 /* convenient access to all redistributors base address */
30 extern mem_addr_t gic_rdists[CONFIG_MP_MAX_NUM_CPUS];
31 
32 #define SIZE_256                        256
33 #define SIZE_4K                         KB(4)
34 #define SIZE_16K                        KB(16)
35 #define SIZE_64K                        KB(64)
36 
37 struct its_cmd_block {
38 	uint64_t raw_cmd[4];
39 };
40 
41 #define ITS_CMD_QUEUE_SIZE              SIZE_64K
42 #define ITS_CMD_QUEUE_NR_ENTRIES        (ITS_CMD_QUEUE_SIZE / sizeof(struct its_cmd_block))
43 
44 /* The level 1 entry size is a 64bit pointer */
45 #define GITS_LVL1_ENTRY_SIZE (8UL)
46 
47 struct gicv3_its_data {
48 	mm_reg_t base;
49 	struct its_cmd_block *cmd_base;
50 	struct its_cmd_block *cmd_write;
51 	bool dev_table_is_indirect;
52 	uint64_t *indirect_dev_lvl1_table;
53 	size_t indirect_dev_lvl1_width;
54 	size_t indirect_dev_lvl2_width;
55 	size_t indirect_dev_page_size;
56 };
57 
58 struct gicv3_its_config {
59 	uintptr_t base_addr;
60 	size_t base_size;
61 	struct its_cmd_block *cmd_queue;
62 	size_t cmd_queue_size;
63 };
64 
fls_z(unsigned int x)65 static inline int fls_z(unsigned int x)
66 {
67 	unsigned int bits = sizeof(x) * 8;
68 	unsigned int cmp = 1 << (bits - 1);
69 
70 	while (bits) {
71 		if (x & cmp) {
72 			return bits;
73 		}
74 		cmp >>= 1;
75 		bits--;
76 	}
77 
78 	return 0;
79 }
80 
81 /* wait 500ms & wakeup every millisecond */
82 #define WAIT_QUIESCENT 500
83 
its_force_quiescent(struct gicv3_its_data * data)84 static int its_force_quiescent(struct gicv3_its_data *data)
85 {
86 	unsigned int count = WAIT_QUIESCENT;
87 	uint32_t reg = sys_read32(data->base + GITS_CTLR);
88 
89 	if (GITS_CTLR_ENABLED_GET(reg)) {
90 		/* Disable ITS */
91 		reg &= ~MASK(GITS_CTLR_ENABLED);
92 		sys_write32(reg, data->base + GITS_CTLR);
93 	}
94 
95 	while (1) {
96 		if (GITS_CTLR_QUIESCENT_GET(reg)) {
97 			return 0;
98 		}
99 
100 		count--;
101 		if (!count) {
102 			return -EBUSY;
103 		}
104 
105 		k_msleep(1);
106 		reg = sys_read32(data->base + GITS_CTLR);
107 	}
108 
109 	return 0;
110 }
111 
112 static const char *const its_base_type_string[] = {
113 	[GITS_BASER_TYPE_DEVICE] = "Devices",
114 	[GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
115 };
116 
117 /* Probe the BASER(i) to get the largest supported page size */
its_probe_baser_page_size(struct gicv3_its_data * data,int i)118 static size_t its_probe_baser_page_size(struct gicv3_its_data *data, int i)
119 {
120 	uint64_t page_size = GITS_BASER_PAGE_SIZE_64K;
121 
122 	while (page_size > GITS_BASER_PAGE_SIZE_4K) {
123 		uint64_t reg = sys_read64(data->base + GITS_BASER(i));
124 
125 		reg &= ~MASK(GITS_BASER_PAGE_SIZE);
126 		reg |= MASK_SET(page_size, GITS_BASER_PAGE_SIZE);
127 
128 		sys_write64(reg, data->base + GITS_BASER(i));
129 
130 		reg = sys_read64(data->base + GITS_BASER(i));
131 
132 		if (MASK_GET(reg, GITS_BASER_PAGE_SIZE) == page_size) {
133 			break;
134 		}
135 
136 		switch (page_size) {
137 		case GITS_BASER_PAGE_SIZE_64K:
138 			page_size = GITS_BASER_PAGE_SIZE_16K;
139 			break;
140 		default:
141 			page_size = GITS_BASER_PAGE_SIZE_4K;
142 		}
143 	}
144 
145 	switch (page_size) {
146 	case GITS_BASER_PAGE_SIZE_64K:
147 		return SIZE_64K;
148 	case GITS_BASER_PAGE_SIZE_16K:
149 		return SIZE_16K;
150 	default:
151 		return SIZE_4K;
152 	}
153 }
154 
its_alloc_tables(struct gicv3_its_data * data)155 static int its_alloc_tables(struct gicv3_its_data *data)
156 {
157 	unsigned int device_ids = GITS_TYPER_DEVBITS_GET(sys_read64(data->base + GITS_TYPER)) + 1;
158 	int i;
159 
160 	for (i = 0; i < GITS_BASER_NR_REGS; ++i) {
161 		uint64_t reg = sys_read64(data->base + GITS_BASER(i));
162 		unsigned int type = GITS_BASER_TYPE_GET(reg);
163 		size_t page_size, entry_size, page_cnt, lvl2_width = 0;
164 		bool indirect = false;
165 		void *alloc_addr;
166 
167 		entry_size = GITS_BASER_ENTRY_SIZE_GET(reg) + 1;
168 
169 		switch (GITS_BASER_PAGE_SIZE_GET(reg)) {
170 		case GITS_BASER_PAGE_SIZE_4K:
171 			page_size = SIZE_4K;
172 			break;
173 		case GITS_BASER_PAGE_SIZE_16K:
174 			page_size = SIZE_16K;
175 			break;
176 		case GITS_BASER_PAGE_SIZE_64K:
177 			page_size = SIZE_64K;
178 			break;
179 		default:
180 			page_size = SIZE_4K;
181 		}
182 
183 		switch (type) {
184 		case GITS_BASER_TYPE_DEVICE:
185 			if (device_ids > 16) {
186 				/* Use the largest possible page size for indirect */
187 				page_size = its_probe_baser_page_size(data, i);
188 
189 				/*
190 				 * lvl1 table size:
191 				 * subtract ID bits that sparse lvl2 table from 'ids'
192 				 * which is reported by ITS hardware times lvl1 table
193 				 * entry size.
194 				 */
195 				lvl2_width = fls_z(page_size / entry_size) - 1;
196 				device_ids -= lvl2_width + 1;
197 
198 				entry_size = GITS_LVL1_ENTRY_SIZE;
199 
200 				indirect = true;
201 			}
202 
203 			page_cnt = ROUND_UP(entry_size << device_ids, page_size) / page_size;
204 			break;
205 		case GITS_BASER_TYPE_COLLECTION:
206 			page_cnt =
207 				ROUND_UP(entry_size * CONFIG_MP_MAX_NUM_CPUS, page_size)/page_size;
208 			break;
209 		default:
210 			continue;
211 		}
212 
213 		LOG_INF("Allocating %s table of %ldx%ldK pages (%ld bytes entry)",
214 			its_base_type_string[type], page_cnt, page_size / 1024, entry_size);
215 
216 		alloc_addr = k_aligned_alloc(page_size, page_size * page_cnt);
217 		if (!alloc_addr) {
218 			return -ENOMEM;
219 		}
220 
221 		memset(alloc_addr, 0, page_size * page_cnt);
222 
223 		switch (page_size) {
224 		case SIZE_4K:
225 			reg = MASK_SET(GITS_BASER_PAGE_SIZE_4K, GITS_BASER_PAGE_SIZE);
226 			break;
227 		case SIZE_16K:
228 			reg = MASK_SET(GITS_BASER_PAGE_SIZE_16K, GITS_BASER_PAGE_SIZE);
229 			break;
230 		case SIZE_64K:
231 			reg = MASK_SET(GITS_BASER_PAGE_SIZE_64K, GITS_BASER_PAGE_SIZE);
232 			break;
233 		}
234 
235 		reg |= MASK_SET(page_cnt - 1, GITS_BASER_SIZE);
236 		reg |= MASK_SET((uintptr_t)alloc_addr >> GITS_BASER_ADDR_SHIFT, GITS_BASER_ADDR);
237 		reg |= MASK_SET(GIC_BASER_CACHE_INNERLIKE, GITS_BASER_OUTER_CACHE);
238 #ifdef CONFIG_GIC_V3_ITS_DMA_NONCOHERENT
239 		reg |= MASK_SET(GIC_BASER_SHARE_NO, GITS_BASER_SHAREABILITY);
240 		reg |= MASK_SET(GIC_BASER_CACHE_NCACHEABLE, GITS_BASER_INNER_CACHE);
241 #else
242 		reg |= MASK_SET(GIC_BASER_SHARE_INNER, GITS_BASER_SHAREABILITY);
243 		reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_BASER_INNER_CACHE);
244 #endif
245 		reg |= MASK_SET(indirect ? 1 : 0, GITS_BASER_INDIRECT);
246 		reg |= MASK_SET(1, GITS_BASER_VALID);
247 
248 #ifdef CONFIG_GIC_V3_ITS_DMA_NONCOHERENT
249 		arch_dcache_flush_and_invd_range(alloc_addr, page_size * page_cnt);
250 #endif
251 
252 		sys_write64(reg, data->base + GITS_BASER(i));
253 
254 		/* TOFIX: check page size & SHAREABILITY validity after write */
255 
256 		if (type == GITS_BASER_TYPE_DEVICE && indirect) {
257 			data->dev_table_is_indirect = indirect;
258 			data->indirect_dev_lvl1_table = alloc_addr;
259 			data->indirect_dev_lvl1_width = device_ids;
260 			data->indirect_dev_lvl2_width = lvl2_width;
261 			data->indirect_dev_page_size = page_size;
262 			LOG_DBG("%s table Indirection enabled", its_base_type_string[type]);
263 		}
264 	}
265 
266 	return 0;
267 }
268 
its_queue_full(struct gicv3_its_data * data)269 static bool its_queue_full(struct gicv3_its_data *data)
270 {
271 	int widx;
272 	int ridx;
273 
274 	widx = data->cmd_write - data->cmd_base;
275 	ridx = sys_read32(data->base + GITS_CREADR) / sizeof(struct its_cmd_block);
276 
277 	/* This is incredibly unlikely to happen, unless the ITS locks up. */
278 	return (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx);
279 }
280 
its_allocate_entry(struct gicv3_its_data * data)281 static struct its_cmd_block *its_allocate_entry(struct gicv3_its_data *data)
282 {
283 	struct its_cmd_block *cmd;
284 	unsigned int count = 1000000;   /* 1s! */
285 
286 	while (its_queue_full(data)) {
287 		count--;
288 		if (!count) {
289 			LOG_ERR("ITS queue not draining");
290 			return NULL;
291 		}
292 		k_usleep(1);
293 	}
294 
295 	cmd = data->cmd_write++;
296 
297 	/* Handle queue wrapping */
298 	if (data->cmd_write == (data->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) {
299 		data->cmd_write = data->cmd_base;
300 	}
301 
302 	/* Clear command  */
303 	cmd->raw_cmd[0] = 0;
304 	cmd->raw_cmd[1] = 0;
305 	cmd->raw_cmd[2] = 0;
306 	cmd->raw_cmd[3] = 0;
307 
308 	return cmd;
309 }
310 
its_post_command(struct gicv3_its_data * data,struct its_cmd_block * cmd)311 static int its_post_command(struct gicv3_its_data *data, struct its_cmd_block *cmd)
312 {
313 	uint64_t wr_idx, rd_idx, idx;
314 	unsigned int count = 1000000;   /* 1s! */
315 
316 #ifdef CONFIG_GIC_V3_ITS_DMA_NONCOHERENT
317 	arch_dcache_flush_and_invd_range(cmd, sizeof(*cmd));
318 #endif
319 
320 	wr_idx = (data->cmd_write - data->cmd_base) * sizeof(*cmd);
321 	rd_idx = sys_read32(data->base + GITS_CREADR);
322 
323 	barrier_dsync_fence_full();
324 
325 	sys_write32(wr_idx, data->base + GITS_CWRITER);
326 
327 	while (1) {
328 		idx = sys_read32(data->base + GITS_CREADR);
329 
330 		if (idx == wr_idx) {
331 			break;
332 		}
333 
334 		count--;
335 		if (!count) {
336 			LOG_ERR("ITS queue timeout (rd %lld => %lld => wr %lld)",
337 				rd_idx, idx, wr_idx);
338 			return -ETIMEDOUT;
339 		}
340 		if (k_is_pre_kernel()) {
341 			k_busy_wait(1);
342 		} else {
343 			k_usleep(1);
344 		}
345 	}
346 
347 	return 0;
348 }
349 
its_send_sync_cmd(struct gicv3_its_data * data,uintptr_t rd_addr)350 static int its_send_sync_cmd(struct gicv3_its_data *data, uintptr_t rd_addr)
351 {
352 	struct its_cmd_block *cmd = its_allocate_entry(data);
353 
354 	if (!cmd) {
355 		return -EBUSY;
356 	}
357 
358 	cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_SYNC, GITS_CMD_ID);
359 	cmd->raw_cmd[2] = MASK_SET(rd_addr, GITS_CMD_RDBASE);
360 
361 	return its_post_command(data, cmd);
362 }
363 
its_send_mapc_cmd(struct gicv3_its_data * data,uint32_t icid,uintptr_t rd_addr,bool valid)364 static int its_send_mapc_cmd(struct gicv3_its_data *data, uint32_t icid,
365 			     uintptr_t rd_addr, bool valid)
366 {
367 	struct its_cmd_block *cmd = its_allocate_entry(data);
368 
369 	if (!cmd) {
370 		return -EBUSY;
371 	}
372 
373 	cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPC, GITS_CMD_ID);
374 	cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID) | MASK_SET(rd_addr, GITS_CMD_RDBASE) |
375 			  MASK_SET(valid ? 1 : 0, GITS_CMD_VALID);
376 
377 	return its_post_command(data, cmd);
378 }
379 
its_send_mapd_cmd(struct gicv3_its_data * data,uint32_t device_id,uint32_t size,uintptr_t itt_addr,bool valid)380 static int its_send_mapd_cmd(struct gicv3_its_data *data, uint32_t device_id,
381 			     uint32_t size, uintptr_t itt_addr, bool valid)
382 {
383 	struct its_cmd_block *cmd = its_allocate_entry(data);
384 
385 	if (!cmd) {
386 		return -EBUSY;
387 	}
388 
389 	cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPD, GITS_CMD_ID) |
390 			  MASK_SET(device_id, GITS_CMD_DEVICEID);
391 	cmd->raw_cmd[1] = MASK_SET(size, GITS_CMD_SIZE);
392 	cmd->raw_cmd[2] = MASK_SET(itt_addr >> GITS_CMD_ITTADDR_ALIGN, GITS_CMD_ITTADDR) |
393 			  MASK_SET(valid ? 1 : 0, GITS_CMD_VALID);
394 
395 	return its_post_command(data, cmd);
396 }
397 
its_send_mapti_cmd(struct gicv3_its_data * data,uint32_t device_id,uint32_t event_id,uint32_t intid,uint32_t icid)398 static int its_send_mapti_cmd(struct gicv3_its_data *data, uint32_t device_id,
399 			      uint32_t event_id, uint32_t intid, uint32_t icid)
400 {
401 	struct its_cmd_block *cmd = its_allocate_entry(data);
402 
403 	if (!cmd) {
404 		return -EBUSY;
405 	}
406 
407 	cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_MAPTI, GITS_CMD_ID) |
408 			  MASK_SET(device_id, GITS_CMD_DEVICEID);
409 	cmd->raw_cmd[1] = MASK_SET(event_id, GITS_CMD_EVENTID) |
410 			  MASK_SET(intid, GITS_CMD_PINTID);
411 	cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID);
412 
413 	return its_post_command(data, cmd);
414 }
415 
its_send_int_cmd(struct gicv3_its_data * data,uint32_t device_id,uint32_t event_id)416 static int its_send_int_cmd(struct gicv3_its_data *data, uint32_t device_id,
417 			    uint32_t event_id)
418 {
419 	struct its_cmd_block *cmd = its_allocate_entry(data);
420 
421 	if (!cmd) {
422 		return -EBUSY;
423 	}
424 
425 	cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_INT, GITS_CMD_ID) |
426 			  MASK_SET(device_id, GITS_CMD_DEVICEID);
427 	cmd->raw_cmd[1] = MASK_SET(event_id, GITS_CMD_EVENTID);
428 
429 	return its_post_command(data, cmd);
430 }
431 
its_send_invall_cmd(struct gicv3_its_data * data,uint32_t icid)432 static int its_send_invall_cmd(struct gicv3_its_data *data, uint32_t icid)
433 {
434 	struct its_cmd_block *cmd = its_allocate_entry(data);
435 
436 	if (!cmd) {
437 		return -EBUSY;
438 	}
439 
440 	cmd->raw_cmd[0] = MASK_SET(GITS_CMD_ID_INVALL, GITS_CMD_ID);
441 	cmd->raw_cmd[2] = MASK_SET(icid, GITS_CMD_ICID);
442 
443 	return its_post_command(data, cmd);
444 }
445 
gicv3_its_send_int(const struct device * dev,uint32_t device_id,uint32_t event_id)446 static int gicv3_its_send_int(const struct device *dev, uint32_t device_id, uint32_t event_id)
447 {
448 	struct gicv3_its_data *data = dev->data;
449 	/* TOFIX check device_id & event_id bounds */
450 
451 	return its_send_int_cmd(data, device_id, event_id);
452 }
453 
its_setup_cmd_queue(const struct device * dev)454 static void its_setup_cmd_queue(const struct device *dev)
455 {
456 	const struct gicv3_its_config *cfg = dev->config;
457 	struct gicv3_its_data *data = dev->data;
458 	uint64_t reg = 0, tmp;
459 
460 	/* Zero out cmd table */
461 	memset(cfg->cmd_queue, 0, cfg->cmd_queue_size);
462 
463 	reg |= MASK_SET(cfg->cmd_queue_size / SIZE_4K, GITS_CBASER_SIZE);
464 	reg |= MASK_SET(GIC_BASER_SHARE_INNER, GITS_CBASER_SHAREABILITY);
465 	reg |= MASK_SET((uintptr_t)cfg->cmd_queue >> GITS_CBASER_ADDR_SHIFT, GITS_CBASER_ADDR);
466 	reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_CBASER_OUTER_CACHE);
467 	reg |= MASK_SET(GIC_BASER_CACHE_RAWAWB, GITS_CBASER_INNER_CACHE);
468 	reg |= MASK_SET(1, GITS_CBASER_VALID);
469 
470 	sys_write64(reg, data->base + GITS_CBASER);
471 
472 #ifdef CONFIG_GIC_V3_ITS_DMA_NONCOHERENT
473 	reg &= ~(MASK(GITS_BASER_SHAREABILITY));
474 #endif
475 	/* Check whether hardware supports sharable */
476 	tmp = sys_read64(data->base + GITS_CBASER);
477 	if (!(tmp & MASK(GITS_BASER_SHAREABILITY))) {
478 		reg &= ~(MASK(GITS_BASER_SHAREABILITY) | MASK(GITS_BASER_INNER_CACHE));
479 		reg |= MASK_SET(GIC_BASER_CACHE_NCACHEABLE, GITS_CBASER_INNER_CACHE);
480 		sys_write64(reg, data->base + GITS_CBASER);
481 	}
482 
483 	data->cmd_base = (struct its_cmd_block *)cfg->cmd_queue;
484 	data->cmd_write = data->cmd_base;
485 
486 	LOG_INF("Allocated %ld entries for command table", ITS_CMD_QUEUE_NR_ENTRIES);
487 
488 	sys_write64(0, data->base + GITS_CWRITER);
489 }
490 
gicv3_rdist_get_rdbase(const struct device * dev,unsigned int cpuid)491 static uintptr_t gicv3_rdist_get_rdbase(const struct device *dev, unsigned int cpuid)
492 {
493 	struct gicv3_its_data *data = dev->data;
494 	uint64_t typer = sys_read64(data->base + GITS_TYPER);
495 	uintptr_t rdbase;
496 
497 	if (GITS_TYPER_PTA_GET(typer)) {
498 		rdbase = gic_rdists[cpuid];
499 		/* RDbase must be 64KB aligned, only return bits[51:16] of the address */
500 		rdbase = rdbase >> GITS_CMD_RDBASE_ALIGN;
501 	} else {
502 		rdbase =
503 			GICR_TYPER_PROCESSOR_NUMBER_GET(sys_read64(gic_rdists[cpuid] + GICR_TYPER));
504 	}
505 
506 	return rdbase;
507 }
508 
gicv3_its_map_intid(const struct device * dev,uint32_t device_id,uint32_t event_id,unsigned int intid)509 static int gicv3_its_map_intid(const struct device *dev, uint32_t device_id, uint32_t event_id,
510 			       unsigned int intid)
511 {
512 	struct gicv3_its_data *data = dev->data;
513 	int ret;
514 
515 	/* TOFIX check device_id, event_id & intid bounds */
516 
517 	if (intid < 8192) {
518 		return -EINVAL;
519 	}
520 
521 	/* The CPU id directly maps as ICID for the current CPU redistributor */
522 	ret = its_send_mapti_cmd(data, device_id, event_id, intid, arch_curr_cpu()->id);
523 	if (ret) {
524 		LOG_ERR("Failed to map eventid %d to intid %d for deviceid %x",
525 			event_id, intid, device_id);
526 		return ret;
527 	}
528 
529 	return its_send_sync_cmd(data, gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id));
530 }
531 
gicv3_its_init_device_id(const struct device * dev,uint32_t device_id,unsigned int nites)532 static int gicv3_its_init_device_id(const struct device *dev, uint32_t device_id,
533 				    unsigned int nites)
534 {
535 	struct gicv3_its_data *data = dev->data;
536 	size_t entry_size, alloc_size;
537 	int nr_ites;
538 	void *itt;
539 	int ret;
540 
541 	/* TOFIX check device_id & nites bounds */
542 
543 	entry_size = GITS_TYPER_ITT_ENTRY_SIZE_GET(sys_read64(data->base + GITS_TYPER)) + 1;
544 
545 	if (data->dev_table_is_indirect) {
546 		size_t offset = device_id >> data->indirect_dev_lvl2_width;
547 
548 		/* Check if DeviceID can fit in the Level 1 table */
549 		if (offset > (1 << data->indirect_dev_lvl1_width)) {
550 			return -EINVAL;
551 		}
552 
553 		/* Check if a Level 2 table has already been allocated for the DeviceID */
554 		if (!data->indirect_dev_lvl1_table[offset]) {
555 			void *alloc_addr;
556 
557 			LOG_INF("Allocating Level 2 Device %ldK table",
558 				data->indirect_dev_page_size / 1024);
559 
560 			alloc_addr = k_aligned_alloc(data->indirect_dev_page_size,
561 						     data->indirect_dev_page_size);
562 			if (!alloc_addr) {
563 				return -ENOMEM;
564 			}
565 
566 			memset(alloc_addr, 0, data->indirect_dev_page_size);
567 
568 #ifdef CONFIG_GIC_V3_ITS_DMA_NONCOHERENT
569 			arch_dcache_flush_and_invd_range(alloc_addr, data->indirect_dev_page_size);
570 #endif
571 
572 			data->indirect_dev_lvl1_table[offset] = (uintptr_t)alloc_addr |
573 								MASK_SET(1, GITS_BASER_VALID);
574 
575 #ifdef CONFIG_GIC_V3_ITS_DMA_NONCOHERENT
576 			arch_dcache_flush_and_invd_range(data->indirect_dev_lvl1_table + offset,
577 							 GITS_LVL1_ENTRY_SIZE);
578 #endif
579 
580 			barrier_dsync_fence_full();
581 		}
582 	}
583 
584 	/* ITT must be of power of 2 */
585 	nr_ites = MAX(2, nites);
586 	alloc_size = ROUND_UP(nr_ites * entry_size, 256);
587 
588 	LOG_INF("Allocating ITT for DeviceID %x and %d vectors (%ld bytes entry)",
589 		device_id, nr_ites, entry_size);
590 
591 	itt = k_aligned_alloc(256, alloc_size);
592 	if (!itt) {
593 		return -ENOMEM;
594 	}
595 	memset(itt, 0, alloc_size);
596 #ifdef CONFIG_GIC_V3_ITS_DMA_NONCOHERENT
597 	arch_dcache_flush_and_invd_range(itt, alloc_size);
598 #endif
599 
600 	/* size is log2(ites) - 1, equivalent to (fls(ites) - 1) - 1 */
601 	ret = its_send_mapd_cmd(data, device_id, fls_z(nr_ites) - 2, (uintptr_t)itt, true);
602 	if (ret) {
603 		LOG_ERR("Failed to map device id %x ITT table", device_id);
604 		return ret;
605 	}
606 
607 	return 0;
608 }
609 
gicv3_its_alloc_intid(const struct device * dev)610 static unsigned int gicv3_its_alloc_intid(const struct device *dev)
611 {
612 	return atomic_inc(&nlpi_intid);
613 }
614 
gicv3_its_get_msi_addr(const struct device * dev)615 static uint32_t gicv3_its_get_msi_addr(const struct device *dev)
616 {
617 	const struct gicv3_its_config *cfg = (const struct gicv3_its_config *)dev->config;
618 
619 	return cfg->base_addr + GITS_TRANSLATER;
620 }
621 
622 #define ITS_RDIST_MAP(n)									  \
623 	{											  \
624 		const struct device *const dev = DEVICE_DT_INST_GET(n);				  \
625 		struct gicv3_its_data *data;							  \
626 		int ret;									  \
627 												  \
628 		if (dev) {									  \
629 			data = (struct gicv3_its_data *) dev->data;				  \
630 			ret = its_send_mapc_cmd(data, arch_curr_cpu()->id,			  \
631 						gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id), \
632 						true);						  \
633 			if (ret) {								  \
634 				LOG_ERR("Failed to map CPU%d redistributor",			  \
635 					arch_curr_cpu()->id);					  \
636 			}									  \
637 		}										  \
638 	}
639 
its_rdist_map(void)640 void its_rdist_map(void)
641 {
642 	DT_INST_FOREACH_STATUS_OKAY(ITS_RDIST_MAP)
643 }
644 
645 #define ITS_RDIST_INVALL(n)									\
646 	{											\
647 		const struct device *const dev = DEVICE_DT_INST_GET(n);				\
648 		struct gicv3_its_data *data;							\
649 		int ret;									\
650 												\
651 		if (dev) {									\
652 			data = (struct gicv3_its_data *) dev->data;				\
653 			ret = its_send_invall_cmd(data, arch_curr_cpu()->id);			\
654 			if (ret) {								\
655 				LOG_ERR("Failed to sync RDIST LPI cache for CPU%d",		\
656 					arch_curr_cpu()->id);					\
657 			}									\
658 												\
659 			its_send_sync_cmd(data,							\
660 					  gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id));	\
661 		}										\
662 	}
663 
its_rdist_invall(void)664 void its_rdist_invall(void)
665 {
666 	DT_INST_FOREACH_STATUS_OKAY(ITS_RDIST_INVALL)
667 }
668 
gicv3_its_init(const struct device * dev)669 static int gicv3_its_init(const struct device *dev)
670 {
671 	const struct gicv3_its_config *cfg = dev->config;
672 	struct gicv3_its_data *data = dev->data;
673 	uint32_t reg;
674 	int ret;
675 
676 	device_map(&data->base, cfg->base_addr, cfg->base_size, K_MEM_CACHE_NONE);
677 
678 	ret = its_force_quiescent(data);
679 	if (ret) {
680 		LOG_ERR("Failed to quiesce, giving up");
681 		return ret;
682 	}
683 
684 	ret = its_alloc_tables(data);
685 	if (ret) {
686 		LOG_ERR("Failed to allocate tables, giving up");
687 		return ret;
688 	}
689 
690 	its_setup_cmd_queue(dev);
691 
692 	reg = sys_read32(data->base + GITS_CTLR);
693 	reg |= MASK_SET(1, GITS_CTLR_ENABLED);
694 	sys_write32(reg, data->base + GITS_CTLR);
695 
696 	/* Map the boot CPU id to the CPU redistributor */
697 	ret = its_send_mapc_cmd(data, arch_curr_cpu()->id,
698 				gicv3_rdist_get_rdbase(dev, arch_curr_cpu()->id), true);
699 	if (ret) {
700 		LOG_ERR("Failed to map boot CPU redistributor");
701 		return ret;
702 	}
703 
704 	return 0;
705 }
706 
707 DEVICE_API(its, gicv3_its_api) = {
708 	.alloc_intid = gicv3_its_alloc_intid,
709 	.setup_deviceid = gicv3_its_init_device_id,
710 	.map_intid = gicv3_its_map_intid,
711 	.send_int = gicv3_its_send_int,
712 	.get_msi_addr = gicv3_its_get_msi_addr,
713 };
714 
715 #define GICV3_ITS_INIT(n)						       \
716 	static struct its_cmd_block gicv3_its_cmd##n[ITS_CMD_QUEUE_NR_ENTRIES] \
717 	__aligned(ITS_CMD_QUEUE_SIZE);					       \
718 	static struct gicv3_its_data gicv3_its_data##n;			       \
719 	static const struct gicv3_its_config gicv3_its_config##n = {	       \
720 		.base_addr = DT_INST_REG_ADDR(n),			       \
721 		.base_size = DT_INST_REG_SIZE(n),			       \
722 		.cmd_queue = gicv3_its_cmd##n,				       \
723 		.cmd_queue_size = sizeof(gicv3_its_cmd##n),		       \
724 	};								       \
725 	DEVICE_DT_INST_DEFINE(n, &gicv3_its_init, NULL,			       \
726 			      &gicv3_its_data##n,			       \
727 			      &gicv3_its_config##n,			       \
728 			      PRE_KERNEL_1,				       \
729 			      CONFIG_INTC_INIT_PRIORITY,		       \
730 			      &gicv3_its_api);
731 
732 DT_INST_FOREACH_STATUS_OKAY(GICV3_ITS_INIT)
733