| /drivers/clk/sunxi/ |
| A D | clk-sun9i-cpus.c | 57 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_recalc_rate() 159 reg = readl(cpus->reg); in sun9i_a80_cpus_clk_set_rate() 167 writel(reg, cpus->reg); in sun9i_a80_cpus_clk_set_rate() 189 struct sun9i_a80_cpus_clk *cpus; in sun9i_a80_cpus_setup() local 194 cpus = kzalloc(sizeof(*cpus), GFP_KERNEL); in sun9i_a80_cpus_setup() 195 if (!cpus) in sun9i_a80_cpus_setup() 199 if (IS_ERR(cpus->reg)) in sun9i_a80_cpus_setup() 212 mux->reg = cpus->reg; in sun9i_a80_cpus_setup() 220 &cpus->hw, &sun9i_a80_cpus_clk_ops, in sun9i_a80_cpus_setup() 236 iounmap(cpus->reg); in sun9i_a80_cpus_setup() [all …]
|
| /drivers/cpufreq/ |
| A D | cpufreq-dt.c | 30 cpumask_var_t cpus; member 44 if (cpumask_test_cpu(cpu, priv->cpus)) in cpufreq_dt_find_data() 109 cpumask_copy(policy->cpus, priv->cpus); in cpufreq_init() 176 if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL)) in dt_cpufreq_early_init() 179 cpumask_set_cpu(cpu, priv->cpus); in dt_cpufreq_early_init() 222 ret = dev_pm_opp_of_cpumask_add_table(priv->cpus); in dt_cpufreq_early_init() 241 cpumask_setall(priv->cpus); in dt_cpufreq_early_init() 259 dev_pm_opp_of_cpumask_remove_table(priv->cpus); in dt_cpufreq_early_init() 262 free_cpumask_var(priv->cpus); in dt_cpufreq_early_init() 273 dev_pm_opp_of_cpumask_remove_table(priv->cpus); in dt_cpufreq_release() [all …]
|
| A D | tegra186-cpufreq.c | 68 const struct tegra186_cpufreq_cpu *cpus; member 75 unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; in tegra186_cpufreq_init() 84 if (data->cpus[cpu].bpmp_cluster_id == cluster) in tegra186_cpufreq_init() 85 cpumask_set_cpu(cpu, policy->cpus); in tegra186_cpufreq_init() 96 unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset; in tegra186_cpufreq_set_target() 116 edvd_offset = data->cpus[policy->cpu].edvd_offset; in tegra186_cpufreq_get() 118 cluster_id = data->cpus[policy->cpu].bpmp_cluster_id; in tegra186_cpufreq_get() 236 data->cpus = tegra186_cpus; in tegra186_cpufreq_probe()
|
| A D | qcom-cpufreq-nvmem.c | 67 struct qcom_cpufreq_drv_cpu cpus[]; member 432 struct dev_pm_domain_list *pd_list = drv->cpus[cpu].pd_list; in qcom_cpufreq_suspend_pd_devs() 467 drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()), in qcom_cpufreq_probe() 512 drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config); in qcom_cpufreq_probe() 513 if (drv->cpus[cpu].opp_token < 0) { in qcom_cpufreq_probe() 514 ret = drv->cpus[cpu].opp_token; in qcom_cpufreq_probe() 529 &drv->cpus[cpu].pd_list); in qcom_cpufreq_probe() 547 dev_pm_domain_detach_list(drv->cpus[cpu].pd_list); in qcom_cpufreq_probe() 548 dev_pm_opp_clear_config(drv->cpus[cpu].opp_token); in qcom_cpufreq_probe() 561 dev_pm_domain_detach_list(drv->cpus[cpu].pd_list); in qcom_cpufreq_remove() [all …]
|
| A D | armada-8k-cpufreq.c | 135 static struct cpumask cpus, shared_cpus; in armada_8k_cpufreq_init() local 149 cpumask_copy(&cpus, cpu_possible_mask); in armada_8k_cpufreq_init() 156 for_each_cpu(cpu, &cpus) { in armada_8k_cpufreq_init() 185 cpumask_andnot(&cpus, &cpus, &shared_cpus); in armada_8k_cpufreq_init()
|
| A D | mediatek-cpufreq.c | 40 struct cpumask cpus; member 71 if (cpumask_test_cpu(cpu, &info->cpus)) in mtk_cpu_dvfs_info_lookup() 453 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); in mtk_cpu_dvfs_info_init() 460 ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); in mtk_cpu_dvfs_info_init() 533 dev_pm_opp_of_cpumask_remove_table(&info->cpus); in mtk_cpu_dvfs_info_init() 570 dev_pm_opp_of_cpumask_remove_table(&info->cpus); in mtk_cpu_dvfs_info_release() 595 cpumask_copy(policy->cpus, &info->cpus); in mtk_cpufreq_init()
|
| A D | speedstep-ich.c | 263 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_target() 296 cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); in speedstep_cpu_init() 298 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); in speedstep_cpu_init()
|
| A D | cpufreq.c | 189 cpumask_setall(policy->cpus); in cpufreq_generic_init() 346 cpumask_pr_args(policy->cpus)); in cpufreq_notify_transition() 348 for_each_cpu(cpu, policy->cpus) in cpufreq_notify_transition() 1169 if (cpumask_test_cpu(cpu, policy->cpus)) in cpufreq_add_policy_cpu() 1177 cpumask_set_cpu(cpu, policy->cpus); in cpufreq_add_policy_cpu() 1326 free_cpumask_var(policy->cpus); in cpufreq_policy_alloc() 1378 free_cpumask_var(policy->cpus); in cpufreq_policy_free() 1435 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); in cpufreq_policy_online() 1581 cpumask_clear(policy->cpus); in cpufreq_policy_online() 1680 cpumask_clear_cpu(cpu, policy->cpus); in __cpufreq_offline() [all …]
|
| A D | cpufreq_governor.c | 102 for_each_cpu(j, policy_dbs->policy->cpus) { in gov_update_cpu_data() 137 for_each_cpu(j, policy->cpus) { in dbs_update() 332 for_each_cpu(cpu, policy->cpus) { in gov_set_update_util() 344 for_each_cpu(i, policy->cpus) in gov_clear_update_util() 532 for_each_cpu(j, policy->cpus) { in cpufreq_dbs_governor_start()
|
| A D | acpi-cpufreq.c | 133 on_each_cpu_mask(policy->cpus, boost_set_msr_each, in set_boost() 136 cpumask_pr_args(policy->cpus), str_enabled_disabled(val)); in set_boost() 442 cpumask_of(policy->cpu) : policy->cpus; in acpi_cpufreq_target() 738 cpumask_copy(policy->cpus, perf->shared_cpu_map); in acpi_cpufreq_cpu_init() 746 cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); in acpi_cpufreq_cpu_init() 751 cpumask_clear(policy->cpus); in acpi_cpufreq_cpu_init() 752 cpumask_set_cpu(cpu, policy->cpus); in acpi_cpufreq_cpu_init()
|
| A D | p4-clockmod.c | 111 for_each_cpu(i, policy->cpus) in cpufreq_p4_target() 162 cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); in cpufreq_p4_cpu_init()
|
| A D | scpi-cpufreq.c | 111 ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus); in scpi_cpufreq_init() 117 ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); in scpi_cpufreq_init()
|
| A D | powernv-cpufreq.c | 687 if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) { in gpstate_timer_handler() 689 add_timer_on(&gpstates->timer, cpumask_first(policy->cpus)); in gpstate_timer_handler() 824 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); in powernv_cpufreq_target_index() 837 cpumask_set_cpu(base + i, policy->cpus); in powernv_cpufreq_cpu_init() 937 cpumask_andnot(&mask, &mask, policy->cpus); in powernv_cpufreq_work_fn()
|
| A D | virtual-cpufreq.c | 150 cpumask_set_cpu(cpu, policy->cpus); in virt_cpufreq_get_sharing_cpus() 227 topology_set_scale_freq_source(&virt_sfd, policy->cpus); in virt_cpufreq_cpu_init()
|
| /drivers/irqchip/ |
| A D | irq-bcm7038-l1.c | 41 struct bcm7038_l1_cpu *cpus[NR_CPUS]; member 127 cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; in bcm7038_l1_irq_handle() 129 cpu = intc->cpus[0]; in bcm7038_l1_irq_handle() 157 intc->cpus[cpu_idx]->mask_cache[word] &= ~mask; in __bcm7038_l1_unmask() 158 l1_writel(mask, intc->cpus[cpu_idx]->map_base + in __bcm7038_l1_unmask() 168 intc->cpus[cpu_idx]->mask_cache[word] |= mask; in __bcm7038_l1_mask() 169 l1_writel(mask, intc->cpus[cpu_idx]->map_base + in __bcm7038_l1_mask() 312 intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word)); in bcm7038_l1_suspend() 334 l1_writel(intc->cpus[boot_cpu]->mask_cache[word], in bcm7038_l1_resume() 336 l1_writel(~intc->cpus[boot_cpu]->mask_cache[word], in bcm7038_l1_resume() [all …]
|
| A D | irq-bcm6345-l1.c | 80 struct bcm6345_l1_cpu *cpus[NR_CPUS]; member 149 intc->cpus[cpu_idx]->enable_cache[word] |= mask; in __bcm6345_l1_unmask() 150 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word], in __bcm6345_l1_unmask() 151 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); in __bcm6345_l1_unmask() 161 intc->cpus[cpu_idx]->enable_cache[word] &= ~mask; in __bcm6345_l1_mask() 162 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word], in __bcm6345_l1_mask() 163 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); in __bcm6345_l1_mask() 206 enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; in bcm6345_l1_set_affinity() 241 cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words), in bcm6345_l1_init_one() 330 struct bcm6345_l1_cpu *cpu = intc->cpus[idx]; in bcm6345_l1_of_init() [all …]
|
| /drivers/thermal/intel/ |
| A D | intel_hfi.c | 125 cpumask_var_t cpus; member 178 for_each_cpu(cpu, hfi_instance->cpus) { in get_hfi_caps() 209 cpu_count = cpumask_weight(hfi_instance->cpus); in update_capabilities() 479 cpumask_set_cpu(cpu, hfi_instance->cpus); in intel_hfi_online() 485 if (cpumask_weight(hfi_instance->cpus) == 1 && hfi_clients_nr > 0) { in intel_hfi_online() 527 cpumask_clear_cpu(cpu, hfi_instance->cpus); in intel_hfi_offline() 529 if (!cpumask_weight(hfi_instance->cpus)) in intel_hfi_offline() 652 if (cpumask_empty(hfi_instance->cpus)) in hfi_thermal_notify() 655 cpu = cpumask_any(hfi_instance->cpus); in hfi_thermal_notify() 696 if (!zalloc_cpumask_var(&hfi_instance->cpus, GFP_KERNEL)) in intel_hfi_init() [all …]
|
| /drivers/leds/trigger/ |
| A D | ledtrig-activity.c | 45 int cpus; in led_activity_function() local 57 cpus = 0; in led_activity_function() 70 cpus++; in led_activity_function() 78 curr_boot = ktime_get_boottime_ns() * cpus; in led_activity_function() 135 target = (cpus > 1) ? (100 / cpus) : 50; in led_activity_function()
|
| /drivers/base/ |
| A D | arch_topology.c | 35 static bool supports_scale_freq_counters(const struct cpumask *cpus) in supports_scale_freq_counters() argument 37 return cpumask_subset(cpus, &scale_freq_counters_mask); in supports_scale_freq_counters() 64 const struct cpumask *cpus) in topology_set_scale_freq_source() argument 78 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source() 95 const struct cpumask *cpus) in topology_clear_scale_freq_source() argument 102 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source() 148 if (supports_scale_freq_counters(cpus)) in topology_set_freq_scale() 153 for_each_cpu(i, cpus) in topology_set_freq_scale() 173 void topology_update_hw_pressure(const struct cpumask *cpus, in topology_update_hw_pressure() argument 180 cpu = cpumask_first(cpus); in topology_update_hw_pressure() [all …]
|
| /drivers/crypto/caam/ |
| A D | qi.c | 410 const cpumask_t *cpus = qman_affine_cpus(); in caam_drv_ctx_init() local 442 if (!cpumask_test_cpu(*cpu, cpus)) { in caam_drv_ctx_init() 445 *pcpu = cpumask_next_wrap(*pcpu, cpus); in caam_drv_ctx_init() 519 const cpumask_t *cpus = qman_affine_cpus(); in caam_qi_shutdown() local 521 for_each_cpu(i, cpus) { in caam_qi_shutdown() 696 const cpumask_t *cpus = qman_affine_cpus(); in alloc_rsp_fqs() local 699 for_each_cpu(i, cpus) { in alloc_rsp_fqs() 713 const cpumask_t *cpus = qman_affine_cpus(); in free_rsp_fqs() local 715 for_each_cpu(i, cpus) in free_rsp_fqs() 724 for_each_cpu(i, cpus) { in free_caam_qi_pcpu_netdev() [all …]
|
| /drivers/misc/sgi-gru/ |
| A D | grutlbpurge.c | 295 int cpus, shift = 0, n; in gru_tgh_flush_init() local 297 cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id); in gru_tgh_flush_init() 300 if (cpus) { in gru_tgh_flush_init() 301 n = 1 << fls(cpus - 1); in gru_tgh_flush_init() 314 gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift; in gru_tgh_flush_init()
|
| /drivers/crypto/intel/qat/qat_common/ |
| A D | adf_gen2_config.c | 17 int cpus = num_online_cpus(); in adf_gen2_crypto_dev_config() local 24 instances = min(cpus, banks); in adf_gen2_crypto_dev_config() 116 int cpus = num_online_cpus(); in adf_gen2_comp_dev_config() local 123 instances = min(cpus, banks); in adf_gen2_comp_dev_config()
|
| A D | adf_gen4_config.c | 18 int cpus = num_online_cpus(); in adf_crypto_dev_config() local 25 instances = min(cpus, banks / 2); in adf_crypto_dev_config() 124 int cpus = num_online_cpus(); in adf_comp_dev_config() local 131 instances = min(cpus, banks); in adf_comp_dev_config()
|
| /drivers/thermal/ |
| A D | thermal_trace.h | 116 TP_PROTO(const struct cpumask *cpus, unsigned int freq, 119 TP_ARGS(cpus, freq, cdev_state, power), 129 __assign_bitmask(cpumask, cpumask_bits(cpus),
|
| /drivers/platform/x86/amd/hfi/ |
| A D | hfi.c | 109 cpumask_var_t cpus; member 298 if (!zalloc_cpumask_var(&hfi_info->cpus, GFP_KERNEL)) in amd_hfi_online() 301 cpumask_set_cpu(cpu, hfi_info->cpus); in amd_hfi_online() 332 free_cpumask_var(hfi_info->cpus); in amd_hfi_offline()
|