1 /*
2 * Arm SCP/MCP Software
3 * Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include <cmn600.h>
9
10 #include <internal/cmn600_ccix.h>
11 #include <internal/cmn600_ctx.h>
12
13 #include <mod_clock.h>
14 #include <mod_cmn600.h>
15 #include <mod_ppu_v1.h>
16 #include <mod_system_info.h>
17 #include <mod_timer.h>
18
19 #include <fwk_assert.h>
20 #include <fwk_event.h>
21 #include <fwk_id.h>
22 #include <fwk_log.h>
23 #include <fwk_mm.h>
24 #include <fwk_module.h>
25 #include <fwk_module_idx.h>
26 #include <fwk_notification.h>
27 #include <fwk_status.h>
28
29 #include <inttypes.h>
30 #include <stdbool.h>
31 #include <string.h>
32
33 #define MOD_NAME "[CMN600] "
34
35 #if FWK_LOG_LEVEL <= FWK_LOG_LEVEL_INFO
36 static const char *const mmap_type_name[] = {
37 [MOD_CMN600_MEMORY_REGION_TYPE_IO] = "I/O",
38 [MOD_CMN600_MEMORY_REGION_TYPE_SYSCACHE] = "System Cache",
39 [MOD_CMN600_REGION_TYPE_SYSCACHE_SUB] = "Sub-System Cache",
40 [MOD_CMN600_REGION_TYPE_CCIX] = "CCIX",
41 [MOD_CMN600_REGION_TYPE_SYSCACHE_NONHASH] = "System Cache Non-hash",
42 };
43 #else
44 static const char *const mmap_type_name[] = { "" };
45 #endif
46
cmn600_hnf_cache_group_count(size_t hnf_count)47 static inline size_t cmn600_hnf_cache_group_count(size_t hnf_count)
48 {
49 return (hnf_count + CMN600_HNF_CACHE_GROUP_ENTRIES_PER_GROUP - 1) /
50 CMN600_HNF_CACHE_GROUP_ENTRIES_PER_GROUP;
51 }
52
53 struct cmn600_ctx *ctx;
54
55 /* Chip information API */
56 struct mod_system_info_get_info_api *system_info_api;
57
process_node_hnf(struct cmn600_hnf_reg * hnf)58 static void process_node_hnf(struct cmn600_hnf_reg *hnf)
59 {
60 unsigned int logical_id;
61 unsigned int node_id;
62 unsigned int group;
63 unsigned int bit_pos;
64 unsigned int region_idx;
65 unsigned int region_sub_count = 0;
66 const struct mod_cmn600_memory_region_map *region;
67 const struct mod_cmn600_config *config = ctx->config;
68 static unsigned int cal_mode_factor = 1;
69 uint64_t base_offset;
70
71 logical_id = get_node_logical_id(hnf);
72 node_id = get_node_id(hnf);
73
74 /*
75 * If CAL mode is set, only even numbered hnf node should be added to the
76 * sys_cache_grp_hn_nodeid registers and hnf_count should be incremented
77 * only for the even numbered hnf nodes.
78 */
79 if (config->hnf_cal_mode == true && (node_id % 2 == 1) &&
80 is_cal_mode_supported(ctx->root)) {
81
82 /* Factor to manipulate the group and bit_pos */
83 cal_mode_factor = 2;
84
85 /*
86 * Reduce the hnf_count as the current hnf node is not getting included
87 * in the sys_cache_grp_hn_nodeid register
88 */
89 ctx->hnf_count--;
90 }
91
92 fwk_assert(logical_id < config->snf_count);
93
94 group = logical_id /
95 (CMN600_HNF_CACHE_GROUP_ENTRIES_PER_GROUP * cal_mode_factor);
96 bit_pos = (CMN600_HNF_CACHE_GROUP_ENTRY_BITS_WIDTH / cal_mode_factor) *
97 (logical_id % (CMN600_HNF_CACHE_GROUP_ENTRIES_PER_GROUP *
98 cal_mode_factor));
99
100 /*
101 * If CAL mode is set, add only even numbered hnd node to
102 * sys_cache_grp_hn_nodeid registers
103 */
104 if (config->hnf_cal_mode == true && is_cal_mode_supported(ctx->root)) {
105 if (node_id % 2 == 0)
106 ctx->hnf_cache_group[group] += ((uint64_t)get_node_id(hnf)) <<
107 bit_pos;
108 } else
109 ctx->hnf_cache_group[group] += ((uint64_t)get_node_id(hnf)) << bit_pos;
110
111 /* Set target node */
112 hnf->SAM_CONTROL = config->snf_table[logical_id];
113
114 if (ctx->chip_id != 0) {
115 base_offset = ((uint64_t)(ctx->config->chip_addr_space *
116 ctx->chip_id));
117 } else
118 base_offset = 0;
119
120 /*
121 * Map sub-regions to this HN-F node
122 */
123 for (region_idx = 0; region_idx < config->mmap_count; region_idx++) {
124 region = &config->mmap_table[region_idx];
125
126 /* Skip non sub-regions */
127 if (region->type != MOD_CMN600_REGION_TYPE_SYSCACHE_SUB)
128 continue;
129
130 /* Configure sub-region entry */
131 hnf->SAM_MEMREGION[region_sub_count] = region->node_id |
132 (sam_encode_region_size(region->size) <<
133 CMN600_HNF_SAM_MEMREGION_SIZE_POS) |
134 (((region->base + base_offset) / SAM_GRANULARITY) <<
135 CMN600_HNF_SAM_MEMREGION_BASE_POS) |
136 CMN600_HNF_SAM_MEMREGION_VALID;
137
138 region_sub_count++;
139 }
140
141 /* Configure the system cache RAM PPU */
142 hnf->PPU_PWPR = CMN600_PPU_PWPR_POLICY_ON |
143 CMN600_PPU_PWPR_OPMODE_FAM |
144 CMN600_PPU_PWPR_DYN_EN;
145 }
146
147 /*
148 * Scan the CMN600 to find out:
149 * - Number of external RN-SAM nodes
150 * - Number of internal RN-SAM nodes
151 * - Number of HN-F nodes (cache)
152 */
cmn600_discovery(void)153 static int cmn600_discovery(void)
154 {
155 unsigned int xp_count;
156 unsigned int xp_idx;
157 unsigned int node_count;
158 unsigned int node_idx;
159 bool xp_port;
160 struct cmn600_mxp_reg *xp;
161 struct node_header *node;
162 const struct mod_cmn600_config *config = ctx->config;
163
164 FWK_LOG_INFO(
165 MOD_NAME "CMN-600 revision: %s", get_cmn600_revision_name(ctx->root));
166 FWK_LOG_INFO(MOD_NAME "Starting discovery...");
167
168 fwk_assert(get_node_type(ctx->root) == NODE_TYPE_CFG);
169
170 /* Traverse cross points (XP) */
171 xp_count = get_node_child_count(ctx->root);
172 for (xp_idx = 0; xp_idx < xp_count; xp_idx++) {
173
174 xp = get_child_node(config->base, ctx->root, xp_idx);
175 fwk_assert(get_node_type(xp) == NODE_TYPE_XP);
176
177 FWK_LOG_INFO(MOD_NAME);
178
179 FWK_LOG_INFO(
180 MOD_NAME "XP (%d, %d) ID:%d, LID:%d",
181 get_node_pos_x(xp),
182 get_node_pos_y(xp),
183 get_node_id(xp),
184 get_node_logical_id(xp));
185
186 /* Traverse nodes */
187 node_count = get_node_child_count(xp);
188 for (node_idx = 0; node_idx < node_count; node_idx++) {
189
190 node = get_child_node(config->base, xp, node_idx);
191
192 /* External nodes */
193 if (is_child_external(xp, node_idx)) {
194 xp_port = get_port_number(get_child_node_id(xp, node_idx));
195
196 /*
197 * If the device type is CXRH, CXHA, or CXRA, then the external
198 * child node is CXLA as every CXRH, CXHA, or CXRA node has a
199 * corresponding external CXLA node.
200 */
201 if ((get_device_type(xp, xp_port) == DEVICE_TYPE_CXRH) ||
202 (get_device_type(xp, xp_port) == DEVICE_TYPE_CXHA) ||
203 (get_device_type(xp, xp_port) == DEVICE_TYPE_CXRA)) {
204 ctx->cxla_reg = (void *)node;
205 FWK_LOG_INFO(
206 MOD_NAME " Found CXLA at node ID: %d",
207 get_child_node_id(xp, node_idx));
208 } else { /* External RN-SAM Node */
209 ctx->external_rnsam_count++;
210 FWK_LOG_INFO(
211 MOD_NAME " Found external node ID: %d",
212 get_child_node_id(xp, node_idx));
213 }
214 } else { /* Internal nodes */
215 switch (get_node_type(node)) {
216 case NODE_TYPE_HN_F:
217 if (ctx->hnf_count >= MAX_HNF_COUNT) {
218 FWK_LOG_INFO(
219 MOD_NAME " hnf count %d >= max limit (%d)",
220 ctx->hnf_count,
221 MAX_HNF_COUNT);
222 return FWK_E_DATA;
223 }
224 ctx->hnf_offset[ctx->hnf_count++] = (uint32_t)node;
225 break;
226
227 case NODE_TYPE_RN_SAM:
228 ctx->internal_rnsam_count++;
229 break;
230
231 case NODE_TYPE_RN_D:
232 if (ctx->rnd_count >= MAX_RND_COUNT) {
233 FWK_LOG_INFO(
234 MOD_NAME " rnd count %d >= max limit (%d)",
235 ctx->rnd_count,
236 MAX_RND_COUNT);
237 return FWK_E_DATA;
238 }
239 ctx->rnd_ldid[ctx->rnd_count++] = get_node_logical_id(node);
240 break;
241
242 case NODE_TYPE_RN_I:
243 if (ctx->rni_count >= MAX_RNI_COUNT) {
244 FWK_LOG_INFO(
245 MOD_NAME " rni count %d >= max limit (%d)",
246 ctx->rni_count,
247 MAX_RNI_COUNT);
248 return FWK_E_DATA;
249 }
250 ctx->rni_ldid[ctx->rni_count++] = get_node_logical_id(node);
251 break;
252
253 case NODE_TYPE_CXRA:
254 ctx->cxg_ra_reg = (struct cmn600_cxg_ra_reg *)node;
255 break;
256
257 case NODE_TYPE_CXHA:
258 ctx->cxg_ha_reg = (struct cmn600_cxg_ha_reg *)node;
259 ctx->ccix_host_info.host_ha_count++;
260 break;
261
262 default:
263 /* Nothing to be done for other node types */
264 break;
265 }
266
267 FWK_LOG_INFO(
268 MOD_NAME " %s ID:%d, LID:%d",
269 get_node_type_name(get_node_type(node)),
270 get_node_id(node),
271 get_node_logical_id(node));
272 }
273 }
274 }
275
276 /*
277 * RN-F nodes does not have node type identifier and hence the count cannot
278 * be determined during the discovery process. RN-F count will be total
279 * RN-SAM count minus the total RN-D, RN-I and CXHA count combined.
280 */
281 ctx->rnf_count = ctx->internal_rnsam_count + ctx->external_rnsam_count -
282 (ctx->rnd_count + ctx->rni_count + ctx->ccix_host_info.host_ha_count);
283
284 if (ctx->rnf_count > MAX_RNF_COUNT) {
285 FWK_LOG_ERR(
286 MOD_NAME "rnf count %d > max limit (%d)",
287 ctx->rnf_count,
288 MAX_RNF_COUNT);
289 return FWK_E_RANGE;
290 }
291
292 /* When CAL is present, the number of HN-Fs must be even. */
293 if ((ctx->hnf_count % 2 != 0) && (config->hnf_cal_mode == true)) {
294 FWK_LOG_ERR(
295 MOD_NAME "hnf count: %d should be even when CAL mode is set",
296 ctx->hnf_count);
297 return FWK_E_DATA;
298 }
299
300 FWK_LOG_INFO(
301 MOD_NAME "Total internal RN-SAM nodes: %d", ctx->internal_rnsam_count);
302 FWK_LOG_INFO(
303 MOD_NAME "Total external RN-SAM nodes: %d", ctx->external_rnsam_count);
304 FWK_LOG_INFO(MOD_NAME "Total HN-F nodes: %d", ctx->hnf_count);
305 FWK_LOG_INFO(MOD_NAME "Total RN-F nodes: %d", ctx->rnf_count);
306 FWK_LOG_INFO(MOD_NAME "Total RN-D nodes: %d", ctx->rnd_count);
307 FWK_LOG_INFO(MOD_NAME "Total RN-I nodes: %d", ctx->rni_count);
308
309 if (ctx->cxla_reg != NULL) {
310 FWK_LOG_INFO(MOD_NAME "CCIX CXLA node at: 0x%p", (void *)ctx->cxla_reg);
311 }
312 if (ctx->cxg_ra_reg != NULL) {
313 FWK_LOG_INFO(
314 MOD_NAME "CCIX CXRA node at: 0x%p", (void *)ctx->cxg_ra_reg);
315 }
316 if (ctx->cxg_ha_reg != NULL) {
317 FWK_LOG_INFO(
318 MOD_NAME "CCIX CXHA node at: 0x%p", (void *)ctx->cxg_ha_reg);
319 }
320 return FWK_SUCCESS;
321 }
322
cmn600_configure(void)323 static void cmn600_configure(void)
324 {
325 unsigned int xp_count;
326 unsigned int xp_idx;
327 bool xp_port;
328 unsigned int node_count;
329 unsigned int node_idx;
330 unsigned int xrnsam_entry;
331 unsigned int irnsam_entry;
332 struct cmn600_mxp_reg *xp;
333 void *node;
334 const struct mod_cmn600_config *config = ctx->config;
335
336 fwk_assert(get_node_type(ctx->root) == NODE_TYPE_CFG);
337
338 xrnsam_entry = 0;
339 irnsam_entry = 0;
340
341 /* Traverse cross points (XP) */
342 xp_count = get_node_child_count(ctx->root);
343 for (xp_idx = 0; xp_idx < xp_count; xp_idx++) {
344 xp = get_child_node(config->base, ctx->root, xp_idx);
345 fwk_assert(get_node_type(xp) == NODE_TYPE_XP);
346
347 /* Traverse nodes */
348 node_count = get_node_child_count(xp);
349 for (node_idx = 0; node_idx < node_count; node_idx++) {
350 node = get_child_node(config->base, xp, node_idx);
351
352 if (is_child_external(xp, node_idx)) {
353 unsigned int node_id = get_child_node_id(xp, node_idx);
354 xp_port = get_port_number(get_child_node_id(xp, node_idx));
355
356 /* Skip if the device type is CXG */
357 if ((get_device_type(xp, xp_port) == DEVICE_TYPE_CXRH) ||
358 (get_device_type(xp, xp_port) == DEVICE_TYPE_CXHA) ||
359 (get_device_type(xp, xp_port) == DEVICE_TYPE_CXRA))
360 continue;
361
362 fwk_assert(xrnsam_entry < ctx->external_rnsam_count);
363
364 ctx->external_rnsam_table[xrnsam_entry].node_id = node_id;
365 ctx->external_rnsam_table[xrnsam_entry].node = node;
366
367 xrnsam_entry++;
368 } else {
369 enum node_type node_type = get_node_type(node);
370
371 if (node_type == NODE_TYPE_RN_SAM) {
372 fwk_assert(irnsam_entry < ctx->internal_rnsam_count);
373
374 ctx->internal_rnsam_table[irnsam_entry] = node;
375
376 irnsam_entry++;
377 } else if (node_type == NODE_TYPE_HN_F)
378 process_node_hnf(node);
379 }
380 }
381 }
382 }
383
get_tgt_nodeid_reg_count()384 static int get_tgt_nodeid_reg_count()
385 {
386 switch (get_cmn600_revision(ctx->root)) {
387 case CMN600_PERIPH_ID_2_REV_R1_P0:
388 case CMN600_PERIPH_ID_2_REV_R1_P1:
389 case CMN600_PERIPH_ID_2_REV_R1_P3:
390 return CMN600_RNSAM_NON_HASH_TGT_NODEID_REGS_2;
391
392 case CMN600_PERIPH_ID_2_REV_R1_P2:
393 case CMN600_PERIPH_ID_2_REV_R2_P0:
394 return CMN600_RNSAM_NON_HASH_TGT_NODEID_REGS_3;
395
396 case CMN600_PERIPH_ID_2_REV_R3_P0:
397 case CMN600_PERIPH_ID_2_REV_R3_P1:
398 case CMN600_PERIPH_ID_2_REV_R3_P2:
399 return CMN600_RNSAM_NON_HASH_TGT_NODEID_REGS_5;
400
401 default:
402 fwk_unexpected();
403 return FWK_E_DEVICE;
404 }
405 }
406
cmn600_setup_sam(struct cmn600_rnsam_reg * rnsam)407 int cmn600_setup_sam(struct cmn600_rnsam_reg *rnsam)
408 {
409 unsigned int region_idx;
410 unsigned int region_io_count = 0;
411 unsigned int region_sys_count = 0;
412 const struct mod_cmn600_memory_region_map *region;
413 const struct mod_cmn600_config *config = ctx->config;
414 unsigned int bit_pos;
415 unsigned int group;
416 unsigned int group_count;
417 enum sam_node_type sam_node_type;
418 uint64_t base;
419 unsigned int scg_region = 0;
420 unsigned int scg_regions_enabled[CMN600_MAX_NUM_SCG] = {0, 0, 0, 0};
421 unsigned int tgt_nodeid_reg_count;
422 volatile uint64_t *nodeid;
423 int status;
424
425 FWK_LOG_INFO(MOD_NAME "Configuring SAM for node %d", get_node_id(rnsam));
426
427 status = get_tgt_nodeid_reg_count();
428 if (status < 0)
429 return FWK_E_DEVICE;
430 tgt_nodeid_reg_count = (unsigned int)status;
431
432 for (region_idx = 0; region_idx < config->mmap_count; region_idx++) {
433 region = &config->mmap_table[region_idx];
434
435 if (ctx->chip_id != 0) {
436 if (region->type == MOD_CMN600_REGION_TYPE_CCIX)
437 base = 0;
438 else if (region->type == MOD_CMN600_MEMORY_REGION_TYPE_SYSCACHE)
439 base = region->base;
440 else {
441 base = ((uint64_t)(ctx->config->chip_addr_space *
442 ctx->chip_id) + region->base);
443 }
444 } else
445 base = region->base;
446
447 (void)mmap_type_name;
448 FWK_LOG_INFO(
449 MOD_NAME " [0x%08" PRIX32 "%08" PRIX32
450 " - 0x%08" PRIX32 "%08" PRIX32 "] %s",
451 (uint32_t)(base >> 32),
452 (uint32_t)base,
453 (uint32_t)((base + region->size - 1) >> 32),
454 (uint32_t)(base + region->size - 1),
455 mmap_type_name[region->type]);
456
457 switch (region->type) {
458 case MOD_CMN600_MEMORY_REGION_TYPE_IO:
459 case MOD_CMN600_REGION_TYPE_CCIX:
460 /*
461 * Configure memory region
462 */
463 if (region_io_count > CMN600_RNSAM_MAX_NON_HASH_MEM_REGION_ENTRIES(
464 tgt_nodeid_reg_count)) {
465 FWK_LOG_ERR(
466 MOD_NAME
467 "Non-Hashed Memory can have maximum of %d regions only",
468 CMN600_RNSAM_MAX_NON_HASH_MEM_REGION_ENTRIES(
469 tgt_nodeid_reg_count));
470 return FWK_E_DATA;
471 }
472
473 group = region_io_count / CMN600_RNSAM_REGION_ENTRIES_PER_GROUP;
474 bit_pos = (region_io_count %
475 CMN600_RNSAM_REGION_ENTRIES_PER_GROUP) *
476 CMN600_RNSAM_REGION_ENTRY_BITS_WIDTH;
477
478 sam_node_type =
479 (region->type == MOD_CMN600_MEMORY_REGION_TYPE_IO) ?
480 SAM_NODE_TYPE_HN_I : SAM_NODE_TYPE_CXRA;
481 configure_region(&rnsam->NON_HASH_MEM_REGION[group],
482 bit_pos,
483 base,
484 region->size,
485 sam_node_type);
486 /*
487 * Configure target node
488 */
489 group = region_io_count /
490 CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRIES_PER_GROUP;
491 if (group > tgt_nodeid_reg_count) {
492 FWK_LOG_ERR(
493 MOD_NAME
494 "Non-Hashed Memory can have maximum of %d groups only",
495 tgt_nodeid_reg_count);
496 return FWK_E_DATA;
497 }
498
499 bit_pos = CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRY_BITS_WIDTH *
500 (region_io_count %
501 CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRIES_PER_GROUP);
502
503 if (group < CMN600_RNSAM_NON_HASH_TGT_NODEID_REGS_3) {
504 nodeid = &rnsam->NON_HASH_TGT_NODEID[group];
505 } else if (group < CMN600_RNSAM_NON_HASH_TGT_NODEID_REGS_5) {
506 /*
507 * CMN-600 revisions R3P0 and above has five NON_HASH_TGT_NODEID
508 * registers. Last two registers are implemented in a different
509 * offset address
510 */
511 nodeid = &rnsam->NON_HASH_TGT_NODEID_GRP2
512 [group - CMN600_RNSAM_NON_HASH_TGT_NODEID_REGS_3];
513 } else {
514 FWK_LOG_ERR(MOD_NAME "Invalid Non-Hashed target node ID group");
515 return FWK_E_DATA;
516 }
517
518 *nodeid &=
519 ~(CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRY_MASK << bit_pos);
520 *nodeid |=
521 (region->node_id & CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRY_MASK)
522 << bit_pos;
523
524 region_io_count++;
525 break;
526
527 case MOD_CMN600_MEMORY_REGION_TYPE_SYSCACHE:
528 /*
529 * Configure memory region
530 */
531 if (region_sys_count >= CMN600_RNSAM_MAX_HASH_MEM_REGION_ENTRIES) {
532 FWK_LOG_ERR(
533 MOD_NAME
534 "Hashed Memory can have maximum of %d regions only",
535 CMN600_RNSAM_MAX_HASH_MEM_REGION_ENTRIES);
536 return FWK_E_DATA;
537 }
538
539 group = region_sys_count / CMN600_RNSAM_REGION_ENTRIES_PER_GROUP;
540 bit_pos = (region_sys_count %
541 CMN600_RNSAM_REGION_ENTRIES_PER_GROUP) *
542 CMN600_RNSAM_REGION_ENTRY_BITS_WIDTH;
543 configure_region(&rnsam->SYS_CACHE_GRP_REGION[group],
544 bit_pos,
545 region->base,
546 region->size,
547 SAM_NODE_TYPE_HN_F);
548
549 /* Mark corresponding region as enabled */
550 scg_region = (2 * group) + (bit_pos/32);
551 fwk_assert(scg_region < CMN600_MAX_NUM_SCG);
552 scg_regions_enabled[scg_region] = 1;
553
554 region_sys_count++;
555 break;
556
557 case MOD_CMN600_REGION_TYPE_SYSCACHE_NONHASH:
558 group = region_sys_count / CMN600_RNSAM_REGION_ENTRIES_PER_GROUP;
559 bit_pos = (region_sys_count %
560 CMN600_RNSAM_REGION_ENTRIES_PER_GROUP) *
561 CMN600_RNSAM_REGION_ENTRY_BITS_WIDTH;
562 /*
563 * Configure memory region
564 */
565 configure_region(&rnsam->SYS_CACHE_GRP_REGION[group],
566 bit_pos,
567 region->base,
568 region->size,
569 SAM_NODE_TYPE_HN_I);
570
571 rnsam->SYS_CACHE_GRP_REGION[group] |= (UINT64_C(0x2) << bit_pos);
572 bit_pos = CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRY_BITS_WIDTH *
573 ((region_sys_count - 1) %
574 CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRIES_PER_GROUP);
575 rnsam->SYS_CACHE_GRP_NOHASH_NODEID &=
576 ~(CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRY_MASK << bit_pos);
577 rnsam->SYS_CACHE_GRP_NOHASH_NODEID |= (region->node_id &
578 CMN600_RNSAM_NON_HASH_TGT_NODEID_ENTRY_MASK) << bit_pos;
579
580 region_sys_count++;
581 break;
582
583 case MOD_CMN600_REGION_TYPE_SYSCACHE_SUB:
584 /* Do nothing. System cache sub-regions are handled by HN-Fs */
585 break;
586
587 default:
588 fwk_unexpected();
589 return FWK_E_DATA;
590 }
591 }
592
593 group_count = cmn600_hnf_cache_group_count(ctx->hnf_count);
594 for (group = 0; group < group_count; group++)
595 rnsam->SYS_CACHE_GRP_HN_NODEID[group] = ctx->hnf_cache_group[group];
596
597 /* Program the number of HNFs */
598 rnsam->SYS_CACHE_GRP_HN_COUNT = ctx->hnf_count;
599
600 /* Use CAL mode only if the CMN600 revision is r2p0 or above */
601 if (is_cal_mode_supported(ctx->root) && config->hnf_cal_mode) {
602 for (region_idx = 0; region_idx < CMN600_MAX_NUM_SCG; region_idx++)
603 rnsam->SYS_CACHE_GRP_CAL_MODE |= scg_regions_enabled[region_idx] *
604 (CMN600_RNSAM_SCG_HNF_CAL_MODE_EN <<
605 (region_idx * CMN600_RNSAM_SCG_HNF_CAL_MODE_SHIFT));
606 }
607
608 /* Enable RNSAM */
609 rnsam->STATUS = CMN600_RNSAM_STATUS_UNSTALL;
610 __sync_synchronize();
611
612 return FWK_SUCCESS;
613 }
614
cmn600_setup(void)615 static int cmn600_setup(void)
616 {
617 unsigned int rnsam_idx, i, ccix_mmap_idx;
618 int status;
619
620 if (!ctx->initialized) {
621 status = cmn600_discovery();
622 if (status != FWK_SUCCESS)
623 return status;
624 /*
625 * Allocate resources based on the discovery
626 */
627
628 /* Pointers for the internal RN-SAM nodes */
629 if (ctx->internal_rnsam_count != 0) {
630 ctx->internal_rnsam_table = fwk_mm_calloc(
631 ctx->internal_rnsam_count, sizeof(*ctx->internal_rnsam_table));
632 }
633
634 /* Tuples for the external RN-RAM nodes (including their node IDs) */
635 if (ctx->external_rnsam_count != 0) {
636 ctx->external_rnsam_table = fwk_mm_calloc(
637 ctx->external_rnsam_count, sizeof(*ctx->external_rnsam_table));
638 }
639
640 /* Cache groups */
641 if (ctx->hnf_count != 0) {
642 /*
643 * Allocate enough group descriptors to accommodate all expected
644 * HN-F nodes in the system.
645 */
646 ctx->hnf_cache_group = fwk_mm_calloc(
647 cmn600_hnf_cache_group_count(ctx->hnf_count),
648 sizeof(*ctx->hnf_cache_group));
649 }
650 }
651
652 cmn600_configure();
653
654 /* Setup internal RN-SAM nodes */
655 for (rnsam_idx = 0; rnsam_idx < ctx->internal_rnsam_count; rnsam_idx++)
656 cmn600_setup_sam(ctx->internal_rnsam_table[rnsam_idx]);
657
658 /* Capture CCIX Host Topology */
659 for (i = 0; i < ctx->config->mmap_count; i++) {
660 if (ctx->config->mmap_table[i].type == MOD_CMN600_REGION_TYPE_CCIX) {
661 ccix_mmap_idx = ctx->ccix_host_info.ccix_host_mmap_count;
662 if (ccix_mmap_idx >= MAX_HA_MMAP_ENTRIES)
663 return FWK_E_DATA;
664
665 ctx->ccix_host_info.ccix_host_mmap[ccix_mmap_idx].base =
666 ctx->config->mmap_table[i].base;
667 ctx->ccix_host_info.ccix_host_mmap[ccix_mmap_idx].size =
668 ctx->config->mmap_table[i].size;
669 ctx->ccix_host_info.ccix_host_mmap_count++;
670 }
671 }
672
673 FWK_LOG_INFO(MOD_NAME "Done");
674
675 ctx->initialized = true;
676
677 return FWK_SUCCESS;
678 }
679
cmn600_setup_rnsam(unsigned int node_id)680 static int cmn600_setup_rnsam(unsigned int node_id)
681 {
682 unsigned int node_idx;
683
684 for (node_idx = 0; node_idx < ctx->external_rnsam_count; node_idx++) {
685 if (ctx->external_rnsam_table[node_idx].node_id == node_id) {
686 cmn600_setup_sam(ctx->external_rnsam_table[node_idx].node);
687 return FWK_SUCCESS;
688 }
689 }
690
691 return FWK_E_PARAM;
692 }
693
694 /*
695 * PPUv1 State Observer API
696 */
697
post_ppu_on(void * data)698 static void post_ppu_on(void *data)
699 {
700 fwk_assert(data != NULL);
701 cmn600_setup_rnsam(*(unsigned int *)data);
702 }
703
704 static const struct mod_ppu_v1_power_state_observer_api cmn600_observer_api = {
705 .post_ppu_on = post_ppu_on,
706 };
707
708 /*
709 * CCIX configuration APIs invoked by SCMI
710 */
711
cmn600_ccix_config_get(struct mod_cmn600_ccix_host_node_config * config)712 static int cmn600_ccix_config_get(
713 struct mod_cmn600_ccix_host_node_config *config)
714 {
715 if (ctx->internal_rnsam_count == 0)
716 return FWK_E_DATA;
717
718 ctx->ccix_host_info.host_ra_count =
719 ctx->internal_rnsam_count + ctx->external_rnsam_count;
720 ctx->ccix_host_info.host_sa_count = ctx->config->sa_count;
721
722 ccix_capabilities_get(ctx);
723
724 memcpy((void *)config, (void *)&ctx->ccix_host_info,
725 sizeof(struct mod_cmn600_ccix_host_node_config));
726 return FWK_SUCCESS;
727 }
728
729
cmn600_ccix_config_set(struct mod_cmn600_ccix_remote_node_config * config)730 static int cmn600_ccix_config_set(
731 struct mod_cmn600_ccix_remote_node_config *config)
732 {
733 unsigned int i;
734 int status;
735
736
737 status = ccix_setup(ctx, config);
738 if (status != FWK_SUCCESS)
739 return status;
740
741 for (i = 0; i < ctx->config->mmap_count; i++) {
742 if (ctx->config->mmap_table[i].type == MOD_CMN600_REGION_TYPE_CCIX)
743 cmn600_setup_rnsam(ctx->config->mmap_table[i].node_id);
744 }
745 return FWK_SUCCESS;
746 }
747
cmn600_ccix_exchange_protocol_credit(uint8_t link_id)748 static int cmn600_ccix_exchange_protocol_credit(uint8_t link_id)
749 {
750 return ccix_exchange_protocol_credit(ctx, link_id);
751 }
752
cmn600_ccix_enter_system_coherency(uint8_t link_id)753 static int cmn600_ccix_enter_system_coherency(uint8_t link_id)
754 {
755 return ccix_enter_system_coherency(ctx, link_id);
756 }
757
cmn600_ccix_enter_dvm_domain(uint8_t link_id)758 static int cmn600_ccix_enter_dvm_domain(uint8_t link_id)
759 {
760 return ccix_enter_dvm_domain(ctx, link_id);
761 }
762
763 static const struct mod_cmn600_ccix_config_api cmn600_ccix_config_api = {
764 .get_config = cmn600_ccix_config_get,
765 .set_config = cmn600_ccix_config_set,
766 .exchange_protocol_credit = cmn600_ccix_exchange_protocol_credit,
767 .enter_system_coherency = cmn600_ccix_enter_system_coherency,
768 .enter_dvm_domain = cmn600_ccix_enter_dvm_domain,
769 };
770
771
772 /*
773 * Framework handlers
774 */
775
cmn600_init(fwk_id_t module_id,unsigned int element_count,const void * data)776 static int cmn600_init(fwk_id_t module_id, unsigned int element_count,
777 const void *data)
778 {
779 const struct mod_cmn600_config *config = data;
780
781 /* No elements support */
782 if (element_count > 0)
783 return FWK_E_DATA;
784
785 /* Allocate space for the context */
786 ctx = fwk_mm_calloc(1, sizeof(*ctx));
787
788 if (config->base == 0)
789 return FWK_E_DATA;
790
791 if ((config->mesh_size_x == 0) || (config->mesh_size_x > CMN600_MESH_X_MAX))
792 return FWK_E_DATA;
793
794 if ((config->mesh_size_y == 0) || (config->mesh_size_y > CMN600_MESH_Y_MAX))
795 return FWK_E_DATA;
796
797 if (config->snf_count > CMN600_HNF_CACHE_GROUP_ENTRIES_MAX)
798 return FWK_E_DATA;
799
800 ctx->root = get_root_node(config->base, config->hnd_node_id,
801 config->mesh_size_x, config->mesh_size_y);
802
803 ctx->config = config;
804
805 return FWK_SUCCESS;
806 }
807
cmn600_bind(fwk_id_t id,unsigned int round)808 static int cmn600_bind(fwk_id_t id, unsigned int round)
809 {
810 int status;
811
812 /* Use second round only (round numbering is zero-indexed) */
813 if (round == 1) {
814
815 /* Bind to the timer component */
816 status = fwk_module_bind(FWK_ID_ELEMENT(FWK_MODULE_IDX_TIMER, 0),
817 FWK_ID_API(FWK_MODULE_IDX_TIMER,
818 MOD_TIMER_API_IDX_TIMER),
819 &ctx->timer_api);
820 if (status != FWK_SUCCESS)
821 return FWK_E_PANIC;
822
823 /* Bind to system info module to obtain multi-chip info */
824 status = fwk_module_bind(FWK_ID_MODULE(FWK_MODULE_IDX_SYSTEM_INFO),
825 FWK_ID_API(FWK_MODULE_IDX_SYSTEM_INFO,
826 MOD_SYSTEM_INFO_GET_API_IDX),
827 &system_info_api);
828 if (status != FWK_SUCCESS)
829 return FWK_E_PANIC;
830 }
831
832 return FWK_SUCCESS;
833 }
834
cmn600_process_bind_request(fwk_id_t requester_id,fwk_id_t target_id,fwk_id_t api_id,const void ** api)835 static int cmn600_process_bind_request(fwk_id_t requester_id,
836 fwk_id_t target_id, fwk_id_t api_id, const void **api)
837 {
838 switch (fwk_id_get_api_idx(api_id)) {
839 case MOD_CMN600_API_IDX_PPU_OBSERVER:
840 *api = &cmn600_observer_api;
841 break;
842
843 case MOD_CMN600_API_IDX_CCIX_CONFIG:
844 *api = &cmn600_ccix_config_api;
845 break;
846 }
847
848 return FWK_SUCCESS;
849 }
850
cmn600_start(fwk_id_t id)851 int cmn600_start(fwk_id_t id)
852 {
853 uint8_t chip_id = 0;
854 bool mc_mode = false;
855 int status;
856
857 if (fwk_id_is_equal(ctx->config->clock_id, FWK_ID_NONE)) {
858 cmn600_setup();
859 return FWK_SUCCESS;
860 }
861
862 status = system_info_api->get_system_info(&ctx->system_info);
863 if (status == FWK_SUCCESS) {
864 chip_id = ctx->system_info->chip_id;
865 mc_mode = ctx->system_info->multi_chip_mode;
866 }
867
868 ctx->chip_id = chip_id;
869
870 (void)mc_mode;
871 FWK_LOG_INFO(MOD_NAME "Multichip mode: %s", mc_mode ? "yes" : "no");
872 FWK_LOG_INFO(MOD_NAME "Chip ID: %d", chip_id);
873
874 /* Register the module for clock state notifications */
875 return fwk_notification_subscribe(
876 mod_clock_notification_id_state_changed,
877 ctx->config->clock_id,
878 id);
879 }
880
cmn600_process_notification(const struct fwk_event * event,struct fwk_event * resp_event)881 static int cmn600_process_notification(
882 const struct fwk_event *event,
883 struct fwk_event *resp_event)
884 {
885 struct clock_notification_params *params;
886
887 fwk_assert(
888 fwk_id_is_equal(event->id, mod_clock_notification_id_state_changed));
889 fwk_assert(fwk_id_is_type(event->target_id, FWK_ID_TYPE_MODULE));
890
891 params = (struct clock_notification_params *)event->params;
892
893 if (params->new_state == MOD_CLOCK_STATE_RUNNING)
894 cmn600_setup();
895
896 return FWK_SUCCESS;
897 }
898
899 const struct fwk_module module_cmn600 = {
900 .type = FWK_MODULE_TYPE_DRIVER,
901 .api_count = MOD_CMN600_API_COUNT,
902 .init = cmn600_init,
903 .bind = cmn600_bind,
904 .start = cmn600_start,
905 .process_bind_request = cmn600_process_bind_request,
906 .process_notification = cmn600_process_notification,
907 };
908