1 /*
2 * Arm SCP/MCP Software
3 * Copyright (c) 2021-2024, Arm Limited and Contributors. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include <mpmm.h>
9
10 #include <mod_mpmm.h>
11 #include <mod_power_domain.h>
12 #include <mod_scmi_perf.h>
13
14 #include <interface_amu.h>
15
16 #include <fwk_assert.h>
17 #include <fwk_core.h>
18 #include <fwk_id.h>
19 #include <fwk_log.h>
20 #include <fwk_mm.h>
21 #include <fwk_module.h>
22 #include <fwk_notification.h>
23 #include <fwk_status.h>
24 #include <fwk_string.h>
25
26 struct mod_mpmm_core_ctx {
27 /* Core Identifier */
28 fwk_id_t core_id;
29
30 /* MPMM registers */
31 struct mpmm_reg *mpmm;
32
33 /* The core is online */
34 bool online;
35
36 /* Current selected threshold */
37 uint32_t threshold;
38
39 /* Cached counters */
40 uint64_t *cached_counters;
41
42 /* Thresholds delta */
43 uint64_t *delta;
44
45 /* Used to block the PD when transitioning from OFF to ON */
46 bool pd_blocked;
47
48 /* Cookie to un-block the PD transition from OFF to ON */
49 uint32_t cookie;
50
51 /* Identifier of the base AMU Auxiliry counter */
52 fwk_id_t base_aux_counter_id;
53 };
54
55 struct mod_mpmm_domain_ctx {
56 /* Context Domain ID */
57 fwk_id_t domain_id;
58
59 /* Number of cores to monitor */
60 uint32_t num_cores;
61
62 /* Number of cores online */
63 uint32_t num_cores_online;
64
65 /* Threshold map */
66 uint32_t threshold_map;
67
68 /* Latest perf level value as reported by the plugin handler */
69 uint32_t current_perf_level;
70
71 /* Latest perf limit value required by mpmm */
72 uint32_t perf_limit;
73
74 /* Wait for the report callback to confirm perf transition completion */
75 bool wait_for_perf_transition;
76
77 /* Core context */
78 struct mod_mpmm_core_ctx core_ctx[MPMM_MAX_NUM_CORES_IN_DOMAIN];
79
80 /* Domain configuration */
81 const struct mod_mpmm_domain_config *domain_config;
82 };
83
84 static struct mod_mpmm_ctx {
85 /* Number of MPMM domains */
86 uint32_t mpmm_domain_count;
87
88 /* Domain context table */
89 struct mod_mpmm_domain_ctx *domain_ctx;
90
91 /* Perf plugin API */
92 struct perf_plugins_handler_api *perf_plugins_handler_api;
93
94 /* AMU driver API */
95 struct amu_api *amu_driver_api;
96
97 /* AMU driver API for access AMU Auxiliry registers */
98 fwk_id_t amu_driver_api_id;
99 } mpmm_ctx;
100
101 /*
102 * MPMM and AMU Registers access functions
103 */
104
105 /* Read the number of MPMM threshold levels. */
mpmm_core_get_number_of_thresholds(struct mod_mpmm_core_ctx * core_ctx,uint32_t * num_thresholds)106 static void mpmm_core_get_number_of_thresholds(
107 struct mod_mpmm_core_ctx *core_ctx,
108 uint32_t *num_thresholds)
109 {
110 *num_thresholds =
111 ((core_ctx->mpmm->PPMCR >> MPMM_PPMCR_NUM_GEARS_POS) &
112 MPMM_PPMCR_NUM_GEARS_MASK);
113 }
114
115 /* Check if the MPMM throttling and MPMM counters are enabled for a core. */
mpmm_core_check_enabled(struct mod_mpmm_core_ctx * core_ctx,bool * enabled)116 static void mpmm_core_check_enabled(
117 struct mod_mpmm_core_ctx *core_ctx,
118 bool *enabled)
119 {
120 *enabled =
121 (bool)((core_ctx->mpmm->MPMMCR >> MPMM_MPMMCR_EN_POS) & MPMM_MPMMCR_EN_MASK);
122 }
123
124 /* Set the MPMM threshold for a specific core. */
mpmm_core_set_threshold(struct mod_mpmm_core_ctx * core_ctx)125 static void mpmm_core_set_threshold(struct mod_mpmm_core_ctx *core_ctx)
126 {
127 core_ctx->mpmm->MPMMCR =
128 (core_ctx->mpmm->MPMMCR &
129 ~(MPMM_MPMMCR_GEAR_MASK << MPMM_MPMMCR_GEAR_POS)) |
130 ((core_ctx->threshold & MPMM_MPMMCR_GEAR_MASK) << MPMM_MPMMCR_GEAR_POS);
131 }
132
133 /*
134 * MPMM Module Helper Functions
135 */
get_domain_ctx(fwk_id_t domain_id)136 static struct mod_mpmm_domain_ctx *get_domain_ctx(fwk_id_t domain_id)
137 {
138 uint32_t idx = fwk_id_get_element_idx(domain_id);
139
140 if (idx < mpmm_ctx.mpmm_domain_count) {
141 return &mpmm_ctx.domain_ctx[idx];
142 } else {
143 return NULL;
144 }
145 }
146
mpmm_core_counters_delta(struct mod_mpmm_domain_ctx * domain_ctx,struct mod_mpmm_core_ctx * core_ctx)147 static void mpmm_core_counters_delta(
148 struct mod_mpmm_domain_ctx *domain_ctx,
149 struct mod_mpmm_core_ctx *core_ctx)
150 {
151 int status;
152 uint32_t th_count = domain_ctx->domain_config->num_threshold_counters;
153 uint32_t i;
154 static uint64_t counter_buff[MPMM_MAX_THRESHOLD_COUNT];
155
156 fwk_str_memset(counter_buff, 0, sizeof(counter_buff));
157
158 status = mpmm_ctx.amu_driver_api->get_counters(
159 core_ctx->base_aux_counter_id, counter_buff, th_count);
160 if (status != FWK_SUCCESS) {
161 FWK_LOG_DEBUG(
162 "[MPMM] %s @%d: AMU counter read fail, error=%d",
163 __func__,
164 __LINE__,
165 status);
166 return;
167 }
168
169 /*
170 * Each MPMM threshold has an associated counter. The counters are
171 * indexed in the same order as the MPMM thresholds for the platform.
172 */
173 for (i = 0; i < th_count; i++) {
174 /* Calculate the delta */
175 if (counter_buff[i] < core_ctx->cached_counters[i]) {
176 /* Counter wraparound case */
177 core_ctx->delta[i] = UINT64_MAX - core_ctx->cached_counters[i];
178 core_ctx->delta[i] += counter_buff[i];
179 } else {
180 core_ctx->delta[i] = counter_buff[i] - core_ctx->cached_counters[i];
181 }
182 /* Store the last value */
183 core_ctx->cached_counters[i] = counter_buff[i];
184 }
185 }
186
187 /*
188 * This function returns the selected threshold based on the btc value.
189 */
mpmm_core_threshold_policy(struct mod_mpmm_domain_ctx * domain_ctx,struct mod_mpmm_core_ctx * core_ctx)190 static uint32_t mpmm_core_threshold_policy(
191 struct mod_mpmm_domain_ctx *domain_ctx,
192 struct mod_mpmm_core_ctx *core_ctx)
193 {
194 uint32_t thr_idx;
195 uint32_t const highest_gear =
196 domain_ctx->domain_config->num_threshold_counters;
197
198 /*
199 * Select the highest gear whose counter delta is just below the btc value.
200 */
201 for (thr_idx = 0; thr_idx < highest_gear; thr_idx++) {
202 if (core_ctx->delta[thr_idx] <= domain_ctx->domain_config->btc) {
203 return thr_idx;
204 }
205 }
206
207 /*
208 * It is not expected that all counters will cross the BTC. If this scenario
209 * is encountered set throttling to a minimum.
210 */
211 return (highest_gear - 1);
212 }
213
214 /* set the threshold for all cores */
mpmm_domain_set_thresholds(struct mod_mpmm_domain_ctx * ctx)215 static void mpmm_domain_set_thresholds(struct mod_mpmm_domain_ctx *ctx)
216 {
217 uint32_t core_idx;
218 struct mod_mpmm_core_ctx *core_ctx;
219
220 for (core_idx = 0; core_idx < ctx->num_cores; core_idx++) {
221 core_ctx = &ctx->core_ctx[core_idx];
222 if (core_ctx->online) {
223 mpmm_core_set_threshold(core_ctx);
224 }
225 }
226 }
227
mpmm_core_evaluate_threshold(struct mod_mpmm_domain_ctx * domain_ctx,struct mod_mpmm_core_ctx * core_ctx)228 static void mpmm_core_evaluate_threshold(
229 struct mod_mpmm_domain_ctx *domain_ctx,
230 struct mod_mpmm_core_ctx *core_ctx)
231 {
232 bool enabled;
233
234 /* If counters are not enabled exit */
235 mpmm_core_check_enabled(core_ctx, &enabled);
236 if (!enabled) {
237 core_ctx->threshold = domain_ctx->domain_config->num_threshold_counters;
238 return;
239 }
240
241 /* Read counters */
242 mpmm_core_counters_delta(domain_ctx, core_ctx);
243
244 /* Threshold selection policy */
245 core_ctx->threshold = mpmm_core_threshold_policy(domain_ctx, core_ctx);
246
247 return;
248 }
249
find_perf_limit_from_pct(struct mod_mpmm_pct_table * pct_config,uint32_t threshold_map)250 static uint32_t find_perf_limit_from_pct(
251 struct mod_mpmm_pct_table *pct_config,
252 uint32_t threshold_map)
253 {
254 int j;
255
256 for (j = pct_config->num_perf_limits - 1; j >= 0; j--) {
257 if (threshold_map <= pct_config->threshold_perf[j].threshold_bitmap) {
258 return pct_config->threshold_perf[j].perf_limit;
259 }
260 }
261
262 /* If no threshold_map was found select the default limits */
263 return pct_config->default_perf_limit;
264 }
265
266 /* Convert the thresholds into a bitmap as described by the PCT */
mpmm_build_threshold_map(struct mod_mpmm_domain_ctx * ctx)267 static void mpmm_build_threshold_map(struct mod_mpmm_domain_ctx *ctx)
268 {
269 uint32_t i, j, thr_tmp, thr_map = 0;
270 uint32_t threshold_array[MPMM_MAX_NUM_CORES_IN_DOMAIN] = { 0 };
271
272 /* Copy all CPU threshold values to an array */
273 for (i = 0, j = 0; i < ctx->num_cores; i++) {
274 if (ctx->core_ctx[i].online) {
275 threshold_array[j] = ctx->core_ctx[i].threshold;
276 j++;
277 }
278 }
279
280 /* Threshold sorting in descending order */
281 for (i = 0; i < ctx->num_cores_online; i++) {
282 for (j = i + 1; j < ctx->num_cores_online; j++) {
283 if (threshold_array[i] > threshold_array[j]) {
284 thr_tmp = threshold_array[i];
285 threshold_array[i] = threshold_array[j];
286 threshold_array[j] = thr_tmp;
287 }
288 }
289 }
290
291 for (i = 0; i < ctx->num_cores_online; i++) {
292 thr_map |= (threshold_array[i] << (MPMM_THRESHOLD_MAP_NUM_OF_BITS * i));
293 }
294
295 ctx->threshold_map = thr_map;
296 }
297
mpmm_evaluate_perf_limit(struct mod_mpmm_domain_ctx * ctx)298 static uint32_t mpmm_evaluate_perf_limit(struct mod_mpmm_domain_ctx *ctx)
299 {
300 struct mod_mpmm_pct_table *pct_config;
301 size_t pct_size;
302 int pct_idx;
303
304 /* Parse PCT table from the bottom-up*/
305 pct_config = ctx->domain_config->pct;
306 pct_size = ctx->domain_config->pct_size;
307 /* Start from the last index */
308 for (pct_idx = (pct_size - 1); pct_idx >= 0; pct_idx--) {
309 /* Find the entry based on the number of online cores */
310 if (ctx->num_cores_online <= pct_config[pct_idx].cores_online) {
311 /* Find the performance limit */
312 return find_perf_limit_from_pct(
313 &pct_config[pct_idx], ctx->threshold_map);
314 }
315 }
316
317 /* If no entry was found, select the highest number of cores available */
318 return find_perf_limit_from_pct(&pct_config[0], ctx->threshold_map);
319 }
320
321 /* Check CPU status and update performance limits accordingly */
mpmm_monitor_and_control(struct mod_mpmm_domain_ctx * domain_ctx)322 static void mpmm_monitor_and_control(struct mod_mpmm_domain_ctx *domain_ctx)
323 {
324 uint32_t core_idx;
325 struct mod_mpmm_core_ctx *core_ctx;
326
327 if (domain_ctx->num_cores_online == 0) {
328 return;
329 }
330
331 /* Core level algorithm */
332 for (core_idx = 0; core_idx < domain_ctx->num_cores; core_idx++) {
333 core_ctx = &domain_ctx->core_ctx[core_idx];
334
335 if (!core_ctx->online) {
336 continue;
337 }
338
339 mpmm_core_evaluate_threshold(domain_ctx, core_ctx);
340 }
341
342 mpmm_build_threshold_map(domain_ctx);
343
344 /* Cache the last value */
345 domain_ctx->perf_limit = mpmm_evaluate_perf_limit(domain_ctx);
346 }
347
348 /* Module APIs */
349
350 /*
351 * update function should be called periodically to monitor the threshold
352 * counters and update the performance limits.
353 *
354 */
mpmm_update(struct perf_plugins_perf_update * data)355 static int mpmm_update(struct perf_plugins_perf_update *data)
356 {
357 struct mod_mpmm_domain_ctx *domain_ctx;
358 uint32_t domain_idx;
359 /*
360 * Get the performance element id from the sub-element provided in the
361 * function argument.
362 */
363 fwk_id_t perf_id = FWK_ID_ELEMENT(
364 FWK_MODULE_IDX_DVFS, fwk_id_get_element_idx(data->domain_id));
365
366 for (domain_idx = 0; domain_idx < mpmm_ctx.mpmm_domain_count;
367 domain_idx++) {
368 if (fwk_id_is_equal(
369 mpmm_ctx.domain_ctx[domain_idx].domain_config->perf_id,
370 perf_id)) {
371 break;
372 }
373 }
374
375 if (domain_idx == mpmm_ctx.mpmm_domain_count) {
376 return FWK_E_PARAM;
377 }
378
379 domain_ctx = &mpmm_ctx.domain_ctx[domain_idx];
380
381 if (domain_ctx->num_cores_online == 0) {
382 return FWK_SUCCESS;
383 }
384
385 mpmm_monitor_and_control(domain_ctx);
386
387 /* Update the new performance limits */
388 data->adj_max_limit[0] = domain_ctx->perf_limit;
389
390 /* set the flag to wait for the transition to complete. */
391 if (domain_ctx->perf_limit < domain_ctx->current_perf_level) {
392 domain_ctx->wait_for_perf_transition = true;
393 return FWK_SUCCESS;
394 }
395
396 mpmm_domain_set_thresholds(domain_ctx);
397 return FWK_SUCCESS;
398 }
399
mpmm_report(struct perf_plugins_perf_report * data)400 static int mpmm_report(struct perf_plugins_perf_report *data)
401 {
402 int status;
403 uint32_t domain_idx;
404 uint32_t core_idx;
405 struct fwk_event resp_notif;
406 struct mod_mpmm_domain_ctx *domain_ctx;
407 struct mod_pd_power_state_pre_transition_notification_resp_params
408 *pd_resp_params =
409 (struct mod_pd_power_state_pre_transition_notification_resp_params
410 *)resp_notif.params;
411 /*
412 * Get the performance element id from the sub-element provided in the
413 * function argument.
414 */
415 fwk_id_t perf_id = FWK_ID_ELEMENT(
416 FWK_MODULE_IDX_DVFS, fwk_id_get_element_idx(data->dep_dom_id));
417
418 for (domain_idx = 0; domain_idx < mpmm_ctx.mpmm_domain_count;
419 domain_idx++) {
420 if (fwk_id_is_equal(
421 mpmm_ctx.domain_ctx[domain_idx].domain_config->perf_id,
422 perf_id)) {
423 break;
424 }
425 }
426
427 if (domain_idx == mpmm_ctx.mpmm_domain_count) {
428 return FWK_E_PARAM;
429 }
430
431 domain_ctx = &mpmm_ctx.domain_ctx[domain_idx];
432
433 domain_ctx->current_perf_level = data->level;
434
435 if (!domain_ctx->wait_for_perf_transition) {
436 return FWK_SUCCESS;
437 }
438
439 domain_ctx->wait_for_perf_transition = false;
440
441 mpmm_domain_set_thresholds(domain_ctx);
442
443 /*
444 * If a previous core wake-up sequence was delayed to re-evaluate the MPMM
445 * thresholds and perf limits, then respond to the power domain notification
446 * so the core can now be turned on.
447 */
448 for (core_idx = 0; core_idx < domain_ctx->num_cores; core_idx++) {
449 if (domain_ctx->core_ctx[core_idx].pd_blocked) {
450 domain_ctx->core_ctx[core_idx].pd_blocked = false;
451
452 status = fwk_get_delayed_response(
453 domain_ctx->domain_id,
454 domain_ctx->core_ctx[core_idx].cookie,
455 &resp_notif);
456 if (status != FWK_SUCCESS) {
457 return status;
458 }
459
460 pd_resp_params->status = FWK_SUCCESS;
461 status = fwk_put_event(&resp_notif);
462 if (status != FWK_SUCCESS) {
463 return status;
464 }
465 }
466 }
467 return FWK_SUCCESS;
468 }
469
470 static struct perf_plugins_api perf_plugins_api = {
471 .update = mpmm_update,
472 .report = mpmm_report,
473 };
474
475 /*
476 * Framework handlers
477 */
mpmm_init(fwk_id_t module_id,unsigned int element_count,const void * data)478 static int mpmm_init(
479 fwk_id_t module_id,
480 unsigned int element_count,
481 const void *data)
482 {
483 if (element_count == 0) {
484 return FWK_E_PARAM;
485 }
486
487 mpmm_ctx.mpmm_domain_count = element_count;
488 mpmm_ctx.domain_ctx =
489 fwk_mm_calloc(element_count, sizeof(struct mod_mpmm_domain_ctx));
490 mpmm_ctx.amu_driver_api_id = *(fwk_id_t *)data;
491
492 return FWK_SUCCESS;
493 }
494
mpmm_element_init(fwk_id_t domain_id,unsigned int sub_element_count,const void * data)495 static int mpmm_element_init(
496 fwk_id_t domain_id,
497 unsigned int sub_element_count,
498 const void *data)
499 {
500 struct mod_mpmm_domain_ctx *domain_ctx;
501 struct mod_mpmm_core_ctx *core_ctx;
502 struct mod_mpmm_core_config const *core_config;
503 uint32_t core_idx;
504 uint32_t num_thresholds;
505
506 if ((sub_element_count == 0) ||
507 (sub_element_count > MPMM_MAX_NUM_CORES_IN_DOMAIN)) {
508 return FWK_E_PARAM;
509 }
510
511 domain_ctx = get_domain_ctx(domain_id);
512 domain_ctx->domain_id = domain_id;
513 domain_ctx->num_cores = sub_element_count;
514 domain_ctx->wait_for_perf_transition = false;
515
516 /* Initialize the configuration */
517 domain_ctx->domain_config = data;
518 fwk_assert(domain_ctx->domain_config->pct != NULL);
519
520 if (domain_ctx->domain_config->num_threshold_counters >
521 MPMM_MAX_THRESHOLD_COUNT) {
522 return FWK_E_SUPPORT;
523 }
524
525 /* Initialize each core */
526 for (core_idx = 0; core_idx < domain_ctx->num_cores; core_idx++) {
527 core_ctx = &domain_ctx->core_ctx[core_idx];
528 core_ctx->core_id = fwk_id_build_sub_element_id(domain_id, core_idx);
529 core_config = &domain_ctx->domain_config->core_config[core_idx];
530 core_ctx->mpmm = (struct mpmm_reg *)core_config->mpmm_reg_base;
531 core_ctx->base_aux_counter_id = core_config->base_aux_counter_id;
532
533 mpmm_core_get_number_of_thresholds(core_ctx, &num_thresholds);
534
535 if (num_thresholds !=
536 domain_ctx->domain_config->num_threshold_counters) {
537 return FWK_E_DEVICE;
538 }
539
540 /* Create counters storage */
541 core_ctx->cached_counters = fwk_mm_calloc(
542 domain_ctx->domain_config->num_threshold_counters,
543 sizeof(*core_ctx->cached_counters));
544 core_ctx->delta = fwk_mm_calloc(
545 domain_ctx->domain_config->num_threshold_counters,
546 sizeof(*core_ctx->delta));
547
548 if (core_config->core_starts_online) {
549 domain_ctx->num_cores_online++;
550 core_ctx->online = true;
551 }
552 }
553
554 return FWK_SUCCESS;
555 }
556
mpmm_start(fwk_id_t id)557 static int mpmm_start(fwk_id_t id)
558 {
559 int status;
560 uint32_t i;
561 struct mod_mpmm_domain_ctx *domain_ctx;
562
563 if (fwk_module_is_valid_module_id(id)) {
564 return FWK_SUCCESS;
565 }
566
567 /* Subscribe to core power state transition */
568 domain_ctx = get_domain_ctx(id);
569
570 for (i = 0; i < domain_ctx->num_cores; i++) {
571 status = fwk_notification_subscribe(
572 mod_pd_notification_id_power_state_pre_transition,
573 domain_ctx->domain_config->core_config[i].pd_id,
574 domain_ctx->domain_id);
575 if (status != FWK_SUCCESS) {
576 return status;
577 }
578
579 status = fwk_notification_subscribe(
580 mod_pd_notification_id_power_state_transition,
581 domain_ctx->domain_config->core_config[i].pd_id,
582 domain_ctx->domain_id);
583 if (status != FWK_SUCCESS) {
584 return status;
585 }
586 }
587
588 return FWK_SUCCESS;
589 }
590
mpmm_process_notification(const struct fwk_event * event,struct fwk_event * resp_event)591 static int mpmm_process_notification(
592 const struct fwk_event *event,
593 struct fwk_event *resp_event)
594 {
595 struct mod_pd_power_state_pre_transition_notification_params
596 *pre_state_params;
597 struct mod_pd_power_state_transition_notification_params *post_state_params;
598 struct mod_pd_power_state_pre_transition_notification_resp_params
599 *pd_resp_params =
600 (struct mod_pd_power_state_pre_transition_notification_resp_params
601 *)resp_event->params;
602 struct mod_mpmm_domain_ctx *domain_ctx;
603 uint32_t core_idx;
604 uint32_t perf_limit;
605
606 fwk_assert(fwk_module_is_valid_element_id(event->target_id));
607 domain_ctx = get_domain_ctx(event->target_id);
608 if (domain_ctx == NULL) {
609 return FWK_E_PARAM;
610 }
611
612 /* Find the corresponding core */
613 for (core_idx = 0; core_idx < domain_ctx->num_cores; core_idx++) {
614 if (fwk_id_is_equal(
615 domain_ctx->domain_config->core_config[core_idx].pd_id,
616 event->source_id)) {
617 break;
618 }
619 }
620
621 if (core_idx >= domain_ctx->num_cores) {
622 return FWK_E_PARAM;
623 }
624
625 if (fwk_id_is_equal(
626 event->id, mod_pd_notification_id_power_state_pre_transition)) {
627 pre_state_params =
628 (struct mod_pd_power_state_pre_transition_notification_params *)
629 event->params;
630 pd_resp_params->status = FWK_SUCCESS;
631 if (pre_state_params->target_state == MOD_PD_STATE_ON) {
632 /* The core is transitioning to online */
633 domain_ctx->num_cores_online++;
634 domain_ctx->core_ctx[core_idx].online = true;
635 /*
636 * After core transition to ON the threshold is set to zero as
637 * defined by the hardware. The next line modifies the threshold
638 * bitmap to include this core threshold.
639 */
640 domain_ctx->threshold_map = domain_ctx->threshold_map
641 << MPMM_THRESHOLD_MAP_NUM_OF_BITS;
642 perf_limit = mpmm_evaluate_perf_limit(domain_ctx);
643
644 /* Set the new limits */
645 struct plugin_limits_req plugin_limit_req = {
646 .domain_id = domain_ctx->domain_config->perf_id,
647 .max_limit = perf_limit,
648 };
649 mpmm_ctx.perf_plugins_handler_api->plugin_set_limits(
650 &plugin_limit_req);
651
652 domain_ctx->perf_limit = perf_limit;
653
654 /*
655 * If the perf limit requested will not trigger a dvfs change, there
656 * is no need to block the power domain state transition.
657 */
658 if (perf_limit >= domain_ctx->current_perf_level) {
659 return FWK_SUCCESS;
660 }
661
662 /* Block the power domain until the new level is applied */
663 domain_ctx->core_ctx[core_idx].pd_blocked = true;
664 domain_ctx->wait_for_perf_transition = true;
665 resp_event->is_delayed_response = true;
666 domain_ctx->core_ctx[core_idx].cookie = event->cookie;
667 }
668
669 } else if (fwk_id_is_equal(
670 event->id, mod_pd_notification_id_power_state_transition)) {
671 post_state_params =
672 (struct mod_pd_power_state_transition_notification_params *)
673 event->params;
674 if (post_state_params->state != MOD_PD_STATE_ON) {
675 /* The core transitioned to offline */
676 domain_ctx->num_cores_online--;
677 domain_ctx->core_ctx[core_idx].online = false;
678 }
679 }
680
681 return FWK_SUCCESS;
682 }
683
mpmm_bind(fwk_id_t id,unsigned int round)684 static int mpmm_bind(fwk_id_t id, unsigned int round)
685 {
686 int status = FWK_E_DATA;
687 /* Bind in the second round */
688 if ((round == 0) || (!fwk_module_is_valid_module_id(id))) {
689 return FWK_SUCCESS;
690 }
691
692 status = fwk_module_bind(
693 FWK_ID_MODULE(mpmm_ctx.amu_driver_api_id.common.module_idx),
694 mpmm_ctx.amu_driver_api_id,
695 &mpmm_ctx.amu_driver_api);
696 if (status != FWK_SUCCESS) {
697 return status;
698 }
699
700 return fwk_module_bind(
701 FWK_ID_MODULE(FWK_MODULE_IDX_SCMI_PERF),
702 FWK_ID_API(FWK_MODULE_IDX_SCMI_PERF, MOD_SCMI_PERF_PLUGINS_API),
703 &mpmm_ctx.perf_plugins_handler_api);
704 }
705
mpmm_process_bind_request(fwk_id_t source_id,fwk_id_t target_id,fwk_id_t api_id,const void ** api)706 static int mpmm_process_bind_request(
707 fwk_id_t source_id,
708 fwk_id_t target_id,
709 fwk_id_t api_id,
710 const void **api)
711 {
712 if (fwk_id_is_equal(source_id, FWK_ID_MODULE(FWK_MODULE_IDX_SCMI_PERF))) {
713 *api = &perf_plugins_api;
714 } else {
715 return FWK_E_ACCESS;
716 }
717
718 return FWK_SUCCESS;
719 }
720
721 const struct fwk_module module_mpmm = {
722 .type = FWK_MODULE_TYPE_SERVICE,
723 .api_count = 1,
724 .init = mpmm_init,
725 .element_init = mpmm_element_init,
726 .start = mpmm_start,
727 .bind = mpmm_bind,
728 .process_bind_request = mpmm_process_bind_request,
729 .process_notification = mpmm_process_notification,
730 };
731