• Home
  • Raw
  • Download

Lines Matching +full:multi +full:- +full:cluster

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018 Chen-Yu Tsai
5 * Chen-Yu Tsai <wens@csie.org>
7 * arch/arm/mach-sunxi/mc_smp.c
9 * Based on Allwinner code, arch/arm/mach-exynos/mcpm-exynos.c, and
10 * arch/arm/mach-hisi/platmcpm.c
11 * Cluster cache enable trampoline code adapted from MCPM framework
14 #include <linux/arm-cci.h>
19 #include <linux/irqchip/arm-gic.h>
71 /* R_CPUCFG registers, specific to sun8i-a83t */
88 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument
91 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15()
103 * would be mid way in a core or cluster power sequence. in sunxi_core_is_cortex_a15()
105 pr_err("%s: Couldn't get CPU cluster %u core %u device node\n", in sunxi_core_is_cortex_a15()
106 __func__, cluster, core); in sunxi_core_is_cortex_a15()
111 is_compatible = of_device_is_compatible(node, "arm,cortex-a15"); in sunxi_core_is_cortex_a15()
116 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument
122 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
125 pr_debug("power clamp for cluster %u cpu %u already open\n", in sunxi_cpu_power_switch_set()
126 cluster, cpu); in sunxi_cpu_power_switch_set()
130 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
132 writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
134 writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
136 writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
138 writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
141 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
159 static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster) in sunxi_cpu_powerup() argument
163 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_cpu_powerup()
164 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) in sunxi_cpu_powerup()
165 return -EINVAL; in sunxi_cpu_powerup()
168 if (cluster == 0 && cpu == 0) in sunxi_cpu_powerup()
171 /* assert processor power-on reset */ in sunxi_cpu_powerup()
172 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
174 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
177 /* assert cpu power-on reset */ in sunxi_cpu_powerup()
179 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
182 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
186 /* Cortex-A7: hold L1 reset disable signal low */ in sunxi_cpu_powerup()
187 if (!sunxi_core_is_cortex_a15(cpu, cluster)) { in sunxi_cpu_powerup()
188 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cpu_powerup()
190 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cpu_powerup()
194 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
199 * to ARM manuals, asserting power-on reset is sufficient. in sunxi_cpu_powerup()
201 if (!sunxi_core_is_cortex_a15(cpu, cluster)) in sunxi_cpu_powerup()
204 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
207 sunxi_cpu_power_switch_set(cpu, cluster, true); in sunxi_cpu_powerup()
216 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerup()
218 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerup()
227 /* de-assert processor power-on reset */ in sunxi_cpu_powerup()
228 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
230 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
234 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
237 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
241 /* de-assert all processor resets */ in sunxi_cpu_powerup()
242 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
245 if (!sunxi_core_is_cortex_a15(cpu, cluster)) in sunxi_cpu_powerup()
249 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
254 static int sunxi_cluster_powerup(unsigned int cluster) in sunxi_cluster_powerup() argument
258 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_powerup()
259 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerup()
260 return -EINVAL; in sunxi_cluster_powerup()
262 /* For A83T, assert cluster cores resets */ in sunxi_cluster_powerup()
264 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
266 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
271 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
273 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
275 /* assert cluster processor power-on resets */ in sunxi_cluster_powerup()
276 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
278 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
280 /* assert cluster cores resets */ in sunxi_cluster_powerup()
283 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
286 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
290 /* assert cluster resets */ in sunxi_cluster_powerup()
291 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
299 * to ARM manuals, asserting power-on reset is sufficient. in sunxi_cluster_powerup()
301 if (!sunxi_core_is_cortex_a15(0, cluster)) in sunxi_cluster_powerup()
304 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
307 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cluster_powerup()
308 if (sunxi_core_is_cortex_a15(0, cluster)) { in sunxi_cluster_powerup()
309 /* Cortex-A15: hold L2RSTDISABLE low */ in sunxi_cluster_powerup()
312 /* Cortex-A7: hold L1RSTDISABLE and L2RSTDISABLE low */ in sunxi_cluster_powerup()
316 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cluster_powerup()
318 /* clear cluster power gate */ in sunxi_cluster_powerup()
319 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerup()
324 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerup()
327 /* de-assert cluster resets */ in sunxi_cluster_powerup()
328 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
332 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
334 /* de-assert ACINACTM */ in sunxi_cluster_powerup()
335 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
337 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
344 * enable CCI-400 and proper cluster cache disable before power down.
350 * On the Cortex-A15 we need to disable in sunxi_cluster_cache_disable_without_axi()
360 /* Flush all cache levels for this cluster. */ in sunxi_cluster_cache_disable_without_axi()
364 * Disable cluster-level coherency by masking in sunxi_cluster_cache_disable_without_axi()
375 static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster) in sunxi_mc_smp_cluster_is_down() argument
380 if (sunxi_mc_smp_cpu_table[cluster][i]) in sunxi_mc_smp_cluster_is_down()
394 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_boot_secondary() local
398 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_boot_secondary()
401 return -ENODEV; in sunxi_mc_smp_boot_secondary()
402 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) in sunxi_mc_smp_boot_secondary()
403 return -EINVAL; in sunxi_mc_smp_boot_secondary()
407 if (sunxi_mc_smp_cpu_table[cluster][cpu]) in sunxi_mc_smp_boot_secondary()
410 if (sunxi_mc_smp_cluster_is_down(cluster)) { in sunxi_mc_smp_boot_secondary()
412 sunxi_cluster_powerup(cluster); in sunxi_mc_smp_boot_secondary()
419 sunxi_cpu_powerup(cpu, cluster); in sunxi_mc_smp_boot_secondary()
422 sunxi_mc_smp_cpu_table[cluster][cpu]++; in sunxi_mc_smp_boot_secondary()
431 unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1); in sunxi_cluster_cache_disable() local
434 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_cache_disable()
439 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_cache_disable()
441 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_cache_disable()
446 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_die() local
451 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_die()
452 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_mc_smp_cpu_die()
455 sunxi_mc_smp_cpu_table[cluster][cpu]--; in sunxi_mc_smp_cpu_die()
456 if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) { in sunxi_mc_smp_cpu_die()
462 } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) { in sunxi_mc_smp_cpu_die()
463 pr_err("Cluster %d CPU%d boots multiple times\n", in sunxi_mc_smp_cpu_die()
464 cluster, cpu); in sunxi_mc_smp_cpu_die()
468 last_man = sunxi_mc_smp_cluster_is_down(cluster); in sunxi_mc_smp_cpu_die()
481 static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster) in sunxi_cpu_powerdown() argument
486 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_cpu_powerdown()
487 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) in sunxi_cpu_powerdown()
488 return -EINVAL; in sunxi_cpu_powerdown()
494 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerdown()
496 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerdown()
500 sunxi_cpu_power_switch_set(cpu, cluster, false); in sunxi_cpu_powerdown()
505 static int sunxi_cluster_powerdown(unsigned int cluster) in sunxi_cluster_powerdown() argument
509 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_powerdown()
510 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerdown()
511 return -EINVAL; in sunxi_cluster_powerdown()
513 /* assert cluster resets or system will hang */ in sunxi_cluster_powerdown()
514 pr_debug("%s: assert cluster reset\n", __func__); in sunxi_cluster_powerdown()
515 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerdown()
519 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerdown()
521 /* gate cluster power */ in sunxi_cluster_powerdown()
522 pr_debug("%s: gate cluster power\n", __func__); in sunxi_cluster_powerdown()
523 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerdown()
528 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerdown()
536 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_kill() local
543 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_kill()
546 if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS || in sunxi_mc_smp_cpu_kill()
565 if (sunxi_mc_smp_cpu_table[cluster][cpu]) in sunxi_mc_smp_cpu_kill()
568 reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster)); in sunxi_mc_smp_cpu_kill()
579 sunxi_cpu_powerdown(cpu, cluster); in sunxi_mc_smp_cpu_kill()
581 if (!sunxi_mc_smp_cluster_is_down(cluster)) in sunxi_mc_smp_cpu_kill()
584 /* wait for cluster L2 WFI */ in sunxi_mc_smp_cpu_kill()
585 ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg, in sunxi_mc_smp_cpu_kill()
590 * Ignore timeout on the cluster. Leaving the cluster on in sunxi_mc_smp_cpu_kill()
599 /* Power down cluster */ in sunxi_mc_smp_cpu_kill()
600 sunxi_cluster_powerdown(cluster); in sunxi_mc_smp_cpu_kill()
604 pr_debug("%s: cluster %u cpu %u powerdown: %d\n", in sunxi_mc_smp_cpu_kill()
605 __func__, cluster, cpu, ret); in sunxi_mc_smp_cpu_kill()
611 /* CPU0 hotplug not handled for sun8i-a83t */ in sunxi_mc_smp_cpu_can_disable()
631 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_table_init() local
635 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_table_init()
637 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) { in sunxi_mc_smp_cpu_table_init()
641 sunxi_mc_smp_cpu_table[cluster][cpu] = 1; in sunxi_mc_smp_cpu_table_init()
648 * We need the trampoline code to enable CCI-400 on the first cluster
669 * We're going to soft-restart the current CPU through the in sunxi_mc_smp_loopback()
670 * low-level MCPM code by leveraging the suspend/resume in sunxi_mc_smp_loopback()
700 /* This structure holds SoC-specific bits tied to an enable-method string. */
709 of_node_put(nodes->prcm_node); in sunxi_mc_smp_put_nodes()
710 of_node_put(nodes->cpucfg_node); in sunxi_mc_smp_put_nodes()
711 of_node_put(nodes->sram_node); in sunxi_mc_smp_put_nodes()
712 of_node_put(nodes->r_cpucfg_node); in sunxi_mc_smp_put_nodes()
718 nodes->prcm_node = of_find_compatible_node(NULL, NULL, in sun9i_a80_get_smp_nodes()
719 "allwinner,sun9i-a80-prcm"); in sun9i_a80_get_smp_nodes()
720 if (!nodes->prcm_node) { in sun9i_a80_get_smp_nodes()
722 return -ENODEV; in sun9i_a80_get_smp_nodes()
725 nodes->cpucfg_node = of_find_compatible_node(NULL, NULL, in sun9i_a80_get_smp_nodes()
726 "allwinner,sun9i-a80-cpucfg"); in sun9i_a80_get_smp_nodes()
727 if (!nodes->cpucfg_node) { in sun9i_a80_get_smp_nodes()
729 return -ENODEV; in sun9i_a80_get_smp_nodes()
732 nodes->sram_node = of_find_compatible_node(NULL, NULL, in sun9i_a80_get_smp_nodes()
733 "allwinner,sun9i-a80-smp-sram"); in sun9i_a80_get_smp_nodes()
734 if (!nodes->sram_node) { in sun9i_a80_get_smp_nodes()
736 return -ENODEV; in sun9i_a80_get_smp_nodes()
744 nodes->prcm_node = of_find_compatible_node(NULL, NULL, in sun8i_a83t_get_smp_nodes()
745 "allwinner,sun8i-a83t-r-ccu"); in sun8i_a83t_get_smp_nodes()
746 if (!nodes->prcm_node) { in sun8i_a83t_get_smp_nodes()
748 return -ENODEV; in sun8i_a83t_get_smp_nodes()
751 nodes->cpucfg_node = of_find_compatible_node(NULL, NULL, in sun8i_a83t_get_smp_nodes()
752 "allwinner,sun8i-a83t-cpucfg"); in sun8i_a83t_get_smp_nodes()
753 if (!nodes->cpucfg_node) { in sun8i_a83t_get_smp_nodes()
755 return -ENODEV; in sun8i_a83t_get_smp_nodes()
758 nodes->r_cpucfg_node = of_find_compatible_node(NULL, NULL, in sun8i_a83t_get_smp_nodes()
759 "allwinner,sun8i-a83t-r-cpucfg"); in sun8i_a83t_get_smp_nodes()
760 if (!nodes->r_cpucfg_node) { in sun8i_a83t_get_smp_nodes()
762 return -ENODEV; in sun8i_a83t_get_smp_nodes()
770 .enable_method = "allwinner,sun9i-a80-smp",
774 .enable_method = "allwinner,sun8i-a83t-smp",
789 * Don't bother checking the "cpus" node, as an enable-method in sunxi_mc_smp_init()
794 return -ENODEV; in sunxi_mc_smp_init()
797 * We can't actually use the enable-method magic in the kernel. in sunxi_mc_smp_init()
805 ret = of_property_match_string(node, "enable-method", in sunxi_mc_smp_init()
813 return -ENODEV; in sunxi_mc_smp_init()
818 return -EINVAL; in sunxi_mc_smp_init()
821 pr_err("%s: CCI-400 not available\n", __func__); in sunxi_mc_smp_init()
822 return -ENODEV; in sunxi_mc_smp_init()
837 ret = -ENOMEM; in sunxi_mc_smp_init()
842 "sunxi-mc-smp"); in sunxi_mc_smp_init()
852 0, "sunxi-mc-smp"); in sunxi_mc_smp_init()
855 pr_err("%s: failed to map R-CPUCFG registers\n", in sunxi_mc_smp_init()
861 "sunxi-mc-smp"); in sunxi_mc_smp_init()
869 /* Configure CCI-400 for boot cluster */ in sunxi_mc_smp_init()
872 pr_err("%s: failed to configure boot cluster: %d\n", in sunxi_mc_smp_init()
887 /* Actually enable multi cluster SMP */ in sunxi_mc_smp_init()
890 pr_info("sunxi multi cluster SMP support installed\n"); in sunxi_mc_smp_init()