| /linux/arch/arm/common/ |
| A D | mcpm_entry.c | 66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical() 67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical() 88 c->cluster = CLUSTER_GOING_DOWN; in __mcpm_outbound_enter_critical() 89 sync_cache_w(&c->cluster); in __mcpm_outbound_enter_critical() 137 sync_cache_r(&mcpm_sync.clusters[cluster].cluster); in __mcpm_cluster_state() 138 return mcpm_sync.clusters[cluster].cluster; in __mcpm_cluster_state() 241 unsigned int mpidr, cpu, cluster; in mcpm_cpu_power_down() local 286 __mcpm_cpu_down(cpu, cluster); in mcpm_cpu_power_down() 318 __func__, cpu, cluster, ret); in mcpm_wait_for_cpu_powerdown() 342 unsigned int mpidr, cpu, cluster; in mcpm_cpu_powered_up() local [all …]
|
| A D | bL_switcher.c | 273 int cluster; in bL_switcher_thread() local 288 cluster = t->wanted_cluster; in bL_switcher_thread() 295 if (cluster != -1) { in bL_switcher_thread() 296 bL_switch_to(cluster); in bL_switcher_thread() 430 if (cluster >= 2) { in bL_switcher_halve_cpus() 436 mask |= (1 << cluster); in bL_switcher_halve_cpus() 456 cluster_0 = cluster; in bL_switcher_halve_cpus() 457 if (cluster != cluster_0) in bL_switcher_halve_cpus() 468 if (cluster != cluster_0) in bL_switcher_halve_cpus() 496 cpu, cluster, gic_id); in bL_switcher_halve_cpus() [all …]
|
| A D | mcpm_head.S | 56 ubfx r10, r0, #8, #8 @ r10 = cluster 88 mla r8, r0, r10, r8 @ r8 = sync cluster base 100 mla r11, r0, r10, r11 @ r11 = cluster first man lock 106 bne mcpm_setup_wait @ wait for cluster setup if so 109 cmp r0, #CLUSTER_UP @ cluster already up? 110 bne mcpm_setup @ if not, set up the cluster 120 @ Signal that the cluster is being brought up: 145 @ power_up_setup is now responsible for setting up the cluster: 148 mov r0, #1 @ second (cluster) affinity level 157 @ Leave the cluster setup critical section: [all …]
|
| /linux/arch/arm/mach-sunxi/ |
| A D | mc_smp.c | 105 __func__, cluster, core); in sunxi_core_is_cortex_a15() 125 cluster, cpu); in sunxi_cpu_power_switch_set() 167 if (cluster == 0 && cpu == 0) in sunxi_cpu_powerup() 258 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerup() 393 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_boot_secondary() local 411 sunxi_cluster_powerup(cluster); in sunxi_mc_smp_boot_secondary() 418 sunxi_cpu_powerup(cpu, cluster); in sunxi_mc_smp_boot_secondary() 445 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_die() local 463 cluster, cpu); in sunxi_mc_smp_cpu_die() 509 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerdown() [all …]
|
| /linux/arch/arm/mach-versatile/ |
| A D | tc2_pm.c | 49 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup() 51 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup() 60 if (cluster >= TC2_CLUSTERS) in tc2_pm_cluster_powerup() 62 ve_spc_powerdown(cluster, false); in tc2_pm_cluster_powerup() 84 BUG_ON(cluster >= TC2_CLUSTERS); in tc2_pm_cluster_powerdown_prepare() 85 ve_spc_powerdown(cluster, true); in tc2_pm_cluster_powerdown_prepare() 114 u32 mask = cluster ? in tc2_core_in_reset() 133 __func__, cpu, cluster, in tc2_pm_wait_for_powerdown() 171 BUG_ON(cluster >= TC2_CLUSTERS); in tc2_pm_cluster_is_up() 172 ve_spc_powerdown(cluster, false); in tc2_pm_cluster_is_up() [all …]
|
| A D | spc.c | 154 if (cluster >= MAX_CLUSTERS) in ve_spc_cpu_wakeup_irq() 183 if (cluster >= MAX_CLUSTERS) in ve_spc_set_resume_addr() 186 if (cluster_is_a15(cluster)) in ve_spc_set_resume_addr() 208 if (cluster >= MAX_CLUSTERS) in ve_spc_powerdown() 402 info->opps[cluster] = opps; in ve_spc_populate_opps() 421 int cluster; in ve_init_opp_table() local 426 cluster = cluster < 0 ? 0 : cluster; in ve_init_opp_table() 484 int cluster; member 534 spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; in ve_spc_clk_register() 546 int cpu, cluster; in ve_spc_clk_init() local [all …]
|
| A D | spc.h | 13 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); 14 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); 15 void ve_spc_powerdown(u32 cluster, bool enable); 16 int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
|
| /linux/fs/ocfs2/cluster/ |
| A D | nodemanager.c | 53 if (cluster == NULL) in o2nm_configured_node_map() 104 if (cluster == NULL) in o2nm_get_node_by_ip() 187 struct o2nm_cluster *cluster; in o2nm_node_num_store() local 209 if (!cluster) { in o2nm_node_num_store() 215 if (cluster->cl_nodes[tmp]) in o2nm_node_num_store() 290 if (!cluster) { in o2nm_node_ipv4_address_store() 346 if (!cluster) { in o2nm_node_local_store() 537 if (cluster) in o2nm_cluster_fence_method_show() 656 kfree(cluster); in o2nm_cluster_release() 718 ret = &cluster->cl_group; in o2nm_cluster_group_make_group() [all …]
|
| /linux/arch/arm/mach-exynos/ |
| A D | mcpm-exynos.c | 64 cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cpu_powerup() 76 if (cluster && in exynos_cpu_powerup() 93 cpu, cluster); in exynos_cpu_powerup() 108 pr_debug("%s: cluster %u\n", __func__, cluster); in exynos_cluster_powerup() 109 if (cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cluster_powerup() 112 exynos_cluster_power_up(cluster); in exynos_cluster_powerup() 122 cluster >= EXYNOS5420_NR_CLUSTERS); in exynos_cpu_powerdown_prepare() 129 BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS); in exynos_cluster_powerdown_prepare() 130 exynos_cluster_power_down(cluster); in exynos_cluster_powerdown_prepare() 170 cluster >= EXYNOS5420_NR_CLUSTERS); in exynos_wait_for_powerdown() [all …]
|
| /linux/arch/arm/mach-hisi/ |
| A D | platmcpm.c | 76 if (hip04_cpu_table[cluster][i]) in hip04_cluster_is_down() 89 data |= 1 << cluster; in hip04_set_snoop_filter() 91 data &= ~(1 << cluster); in hip04_set_snoop_filter() 100 unsigned int mpidr, cpu, cluster; in hip04_boot_secondary() local 115 if (hip04_cpu_table[cluster][cpu]) in hip04_boot_secondary() 146 hip04_cpu_table[cluster][cpu]++; in hip04_boot_secondary() 155 unsigned int mpidr, cpu, cluster; in hip04_cpu_die() local 163 hip04_cpu_table[cluster][cpu]--; in hip04_cpu_die() 193 unsigned int mpidr, cpu, cluster; in hip04_cpu_kill() local 249 unsigned int mpidr, cpu, cluster; in hip04_cpu_table_init() local [all …]
|
| /linux/drivers/perf/ |
| A D | qcom_l2_pmu.c | 489 if (!cluster) { in l2_cache_event_init() 761 return cluster; in l2_cache_associate_cpu_with_cluster() 774 if (!cluster) { in l2cache_pmu_online_cpu() 777 if (!cluster) { in l2cache_pmu_online_cpu() 792 cluster->on_cpu = cpu; in l2cache_pmu_online_cpu() 810 if (!cluster) in l2cache_pmu_offline_cpu() 819 cluster->on_cpu = -1; in l2cache_pmu_offline_cpu() 853 cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); in l2_cache_pmu_probe_cluster() 854 if (!cluster) in l2_cache_pmu_probe_cluster() 863 cluster->irq = irq; in l2_cache_pmu_probe_cluster() [all …]
|
| /linux/drivers/remoteproc/ |
| A D | mtk_scp.c | 70 struct mtk_scp_of_cluster *scp_cluster = scp->cluster; in scp_wdt_handler() 175 val = readl(scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_assert() 177 writel(val, scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_assert() 184 val = readl(scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_deassert() 186 writel(val, scp->cluster->reg_base + MT8183_SW_RSTN); in mt8183_scp_reset_deassert() 221 scp->cluster->reg_base + MT8183_SCP_TO_HOST); in mt8183_scp_irq_handler() 779 if (scp->cluster->l1tcm_size && in mt8192_scp_da_to_va() 780 da >= scp->cluster->l1tcm_phys && in mt8192_scp_da_to_va() 781 (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) { in mt8192_scp_da_to_va() 782 offset = da - scp->cluster->l1tcm_phys; in mt8192_scp_da_to_va() [all …]
|
| A D | ti_k3_r5_remoteproc.c | 450 struct k3_r5_cluster *cluster = kproc->cluster; in k3_r5_rproc_prepare() local 517 struct k3_r5_cluster *cluster = kproc->cluster; in k3_r5_rproc_unprepare() local 552 struct k3_r5_cluster *cluster = kproc->cluster; in k3_r5_rproc_start() local 630 struct k3_r5_cluster *cluster = kproc->cluster; in k3_r5_rproc_stop() local 838 struct k3_r5_cluster *cluster = kproc->cluster; in k3_r5_rproc_configure() local 1076 struct k3_r5_cluster *cluster = kproc->cluster; in k3_r5_adjust_tcm_sizes() local 1115 struct k3_r5_cluster *cluster = kproc->cluster; in k3_r5_rproc_configure_mode() local 1252 kproc->cluster = cluster; in k3_r5_cluster_rproc_init() 1711 cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL); in k3_r5_probe() 1712 if (!cluster) in k3_r5_probe() [all …]
|
| A D | xlnx_r5_remoteproc.c | 1371 cluster->mode = cluster_mode; in zynqmp_r5_cluster_init() 1378 cluster->core_count = 0; in zynqmp_r5_cluster_init() 1379 cluster->r5_cores = NULL; in zynqmp_r5_cluster_init() 1416 if (!cluster) in zynqmp_r5_cluster_exit() 1429 kfree(cluster->r5_cores); in zynqmp_r5_cluster_exit() 1430 kfree(cluster); in zynqmp_r5_cluster_exit() 1449 cluster = kzalloc(sizeof(*cluster), GFP_KERNEL); in zynqmp_r5_remoteproc_probe() 1450 if (!cluster) in zynqmp_r5_remoteproc_probe() 1453 cluster->dev = dev; in zynqmp_r5_remoteproc_probe() 1458 kfree(cluster); in zynqmp_r5_remoteproc_probe() [all …]
|
| /linux/arch/arm/include/asm/ |
| A D | mcpm.h | 44 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); 51 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, 84 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); 219 int (*cpu_powerup)(unsigned int cpu, unsigned int cluster); 220 int (*cluster_powerup)(unsigned int cluster); 221 void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster); 223 void (*cluster_powerdown_prepare)(unsigned int cluster); 226 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); 227 void (*cluster_is_up)(unsigned int cluster); 228 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); [all …]
|
| /linux/Documentation/arch/arm/ |
| A D | cluster-pm-race-avoidance.rst | 92 CPUs in the cluster simultaneously modifying the state. The cluster- 118 cluster setup and 161 cluster is set up and coherent. If the cluster is not ready, 163 cluster has been set up. 230 "cluster" state: The global state of the cluster; or the state 244 states for the cluster as a whole:: 337 enabled for the cluster. Other CPUs in the cluster can safely 355 enabled for the cluster. Other CPUs in the cluster can safely 359 made to power the cluster down. 377 cluster-level coherency. [all …]
|
| /linux/drivers/cpufreq/ |
| A D | vexpress-spc-cpufreq.c | 45 #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) argument 46 #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) argument 284 if (!freq_table[cluster]) in _put_cluster_clk_and_freq_table() 287 clk_put(clk[cluster]); in _put_cluster_clk_and_freq_table() 300 if (cluster < MAX_CLUSTERS) in put_cluster_clk_and_freq_table() 313 kfree(freq_table[cluster]); in put_cluster_clk_and_freq_table() 322 if (freq_table[cluster]) in _get_cluster_clk_and_freq_table() 338 if (!IS_ERR(clk[cluster])) in _get_cluster_clk_and_freq_table() 343 ret = PTR_ERR(clk[cluster]); in _get_cluster_clk_and_freq_table() 348 cluster); in _get_cluster_clk_and_freq_table() [all …]
|
| A D | tegra186-cpufreq.c | 75 unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; in tegra186_cpufreq_init() local 77 policy->freq_table = data->clusters[cluster].table; in tegra186_cpufreq_init() 100 struct tegra186_cpufreq_cluster *cluster; in tegra186_cpufreq_get() local 112 cluster = &data->clusters[cluster_id]; in tegra186_cpufreq_get() 115 return (cluster->ref_clk_khz * ndiv) / cluster->div; in tegra186_cpufreq_get() 187 cluster->ref_clk_khz = data->ref_clk_hz / 1000; in init_vhint_table() 188 cluster->div = data->pdiv * data->mdiv; in init_vhint_table() 207 point->frequency = (cluster->ref_clk_khz * ndiv) / cluster->div; in init_vhint_table() 245 cluster->table = init_vhint_table(pdev, bpmp, cluster, i); in tegra186_cpufreq_probe() 246 if (IS_ERR(cluster->table)) { in tegra186_cpufreq_probe() [all …]
|
| /linux/Documentation/devicetree/bindings/cpufreq/ |
| A D | apple,cluster-cpufreq.yaml | 4 $id: http://devicetree.org/schemas/cpufreq/apple,cluster-cpufreq.yaml# 7 title: Apple SoC cluster cpufreq device 14 the cluster management register block. This binding uses the standard 23 - apple,t8103-cluster-cpufreq 24 - apple,t8112-cluster-cpufreq 25 - const: apple,cluster-cpufreq 27 - const: apple,t6000-cluster-cpufreq 28 - const: apple,t8103-cluster-cpufreq 29 - const: apple,cluster-cpufreq 107 compatible = "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq"; [all …]
|
| /linux/Documentation/ABI/testing/ |
| A D | sysfs-ocfs2 | 14 covers how ocfs2 uses distributed locking between cluster 18 cluster nodes can interoperate if they have an identical 34 the available plugins to support ocfs2 cluster operation. 35 A cluster plugin is required to use ocfs2 in a cluster. 38 * 'o2cb' - The classic o2cb cluster stack that ocfs2 has 54 cluster plugin is currently in use by the filesystem. 62 the cluster stack in use. The contents may change 71 of current ocfs2 cluster stack. This value is set by 76 When the 'o2cb' cluster stack is used, the 'o2cb' cluster 78 cluster plugin. [all …]
|
| /linux/arch/mips/include/asm/ |
| A D | mips-cps.h | 131 static inline uint64_t mips_cps_cluster_config(unsigned int cluster) in mips_cps_cluster_config() argument 141 WARN_ON(cluster != 0); in mips_cps_cluster_config() 149 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in mips_cps_cluster_config() 164 static inline unsigned int mips_cps_numcores(unsigned int cluster) in mips_cps_numcores() argument 171 mips_cps_cluster_config(cluster) + 1); in mips_cps_numcores() 181 static inline unsigned int mips_cps_numiocu(unsigned int cluster) in mips_cps_numiocu() argument 187 mips_cps_cluster_config(cluster)); in mips_cps_numiocu() 199 static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core) in mips_cps_numvps() argument 210 mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in mips_cps_numvps()
|
| /linux/fs/fat/ |
| A D | cache.c | 244 if (cluster == 0) in fat_get_cluster() 247 if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { in fat_get_cluster() 256 while (*fclus < cluster) { in fat_get_cluster() 291 static int fat_bmap_cluster(struct inode *inode, int cluster) in fat_bmap_cluster() argument 299 ret = fat_get_cluster(inode, cluster, &fclus, &dclus); in fat_bmap_cluster() 316 int cluster, offset; in fat_get_mapped_cluster() local 320 cluster = fat_bmap_cluster(inode, cluster); in fat_get_mapped_cluster() 321 if (cluster < 0) in fat_get_mapped_cluster() 322 return cluster; in fat_get_mapped_cluster() 323 else if (cluster) { in fat_get_mapped_cluster() [all …]
|
| /linux/Documentation/devicetree/bindings/phy/ |
| A D | phy-mvebu.txt | 23 Armada 375 USB cluster 27 controller. The USB cluster control register allows to manage common 32 - compatible: "marvell,armada-375-usb-cluster" 33 - reg: Should contain usb cluster register location and length. 38 usbcluster: usb-cluster@18400 { 39 compatible = "marvell,armada-375-usb-cluster";
|
| /linux/arch/arm/mach-milbeaut/ |
| A D | platsmp.c | 25 unsigned int mpidr, cpu, cluster; in m10v_boot_secondary() local 32 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_boot_secondary() 38 __func__, cpu, l_cpu, cluster); in m10v_boot_secondary() 48 unsigned int mpidr, cpu, cluster; in m10v_smp_init() local 61 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_smp_init() 62 pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster); in m10v_smp_init()
|
| /linux/arch/arm64/boot/dts/apple/ |
| A D | t600x-dieX.dtsi | 10 …compatible = "apple,t6000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq"; 16 …compatible = "apple,t6000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq"; 22 …compatible = "apple,t6000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
|