/arch/arm/common/ |
D | mcpm_entry.c | 34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument 36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument 50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 63 static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) in __mcpm_outbound_leave_critical() argument 66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical() 67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical() 82 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument [all …]
|
D | mcpm_head.S | 54 ubfx r10, r0, #8, #8 @ r10 = cluster 86 mla r8, r0, r10, r8 @ r8 = sync cluster base 94 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN 98 mla r11, r0, r10, r11 @ r11 = cluster first man lock 104 bne mcpm_setup_wait @ wait for cluster setup if so 107 cmp r0, #CLUSTER_UP @ cluster already up? 108 bne mcpm_setup @ if not, set up the cluster 118 @ Signal that the cluster is being brought up: 123 @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this 126 @ Wait for any previously-pending cluster teardown operations to abort [all …]
|
D | bL_switcher.c | 273 int cluster; in bL_switcher_thread() local 288 cluster = t->wanted_cluster; in bL_switcher_thread() 295 if (cluster != -1) { in bL_switcher_thread() 296 bL_switch_to(cluster); in bL_switcher_thread() 422 unsigned int cpu, cluster, mask; in bL_switcher_halve_cpus() local 429 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); in bL_switcher_halve_cpus() 430 if (cluster >= 2) { in bL_switcher_halve_cpus() 436 mask |= (1 << cluster); in bL_switcher_halve_cpus() 454 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); in bL_switcher_halve_cpus() 456 cluster_0 = cluster; in bL_switcher_halve_cpus() [all …]
|
D | bL_switcher_dummy_if.c | 22 unsigned int cpu, cluster; in bL_switcher_write() local 40 cluster = val[2] - '0'; in bL_switcher_write() 41 ret = bL_switch_request(cpu, cluster); in bL_switcher_write()
|
/arch/arm/mach-vexpress/ |
D | tc2_pm.c | 48 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerup() argument 50 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in tc2_pm_cpu_powerup() 51 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) in tc2_pm_cpu_powerup() 53 ve_spc_set_resume_addr(cluster, cpu, in tc2_pm_cpu_powerup() 55 ve_spc_cpu_wakeup_irq(cluster, cpu, true); in tc2_pm_cpu_powerup() 59 static int tc2_pm_cluster_powerup(unsigned int cluster) in tc2_pm_cluster_powerup() argument 61 pr_debug("%s: cluster %u\n", __func__, cluster); in tc2_pm_cluster_powerup() 62 if (cluster >= TC2_CLUSTERS) in tc2_pm_cluster_powerup() 64 ve_spc_powerdown(cluster, false); in tc2_pm_cluster_powerup() 68 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) in tc2_pm_cpu_powerdown_prepare() argument [all …]
|
D | dcscb.c | 39 static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster) in dcscb_cpu_powerup() argument 43 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in dcscb_cpu_powerup() 44 if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])) in dcscb_cpu_powerup() 47 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cpu_powerup() 49 writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cpu_powerup() 53 static int dcscb_cluster_powerup(unsigned int cluster) in dcscb_cluster_powerup() argument 57 pr_debug("%s: cluster %u\n", __func__, cluster); in dcscb_cluster_powerup() 58 if (cluster >= 2) in dcscb_cluster_powerup() 62 rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); in dcscb_cluster_powerup() 64 rst_hold |= dcscb_allcpus_mask[cluster]; in dcscb_cluster_powerup() [all …]
|
D | spc.c | 119 static inline bool cluster_is_a15(u32 cluster) in cluster_is_a15() argument 121 return cluster == info->a15_clusid; in cluster_is_a15() 158 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) in ve_spc_cpu_wakeup_irq() argument 162 if (cluster >= MAX_CLUSTERS) in ve_spc_cpu_wakeup_irq() 167 if (!cluster_is_a15(cluster)) in ve_spc_cpu_wakeup_irq() 187 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) in ve_spc_set_resume_addr() argument 191 if (cluster >= MAX_CLUSTERS) in ve_spc_set_resume_addr() 194 if (cluster_is_a15(cluster)) in ve_spc_set_resume_addr() 212 void ve_spc_powerdown(u32 cluster, bool enable) in ve_spc_powerdown() argument 216 if (cluster >= MAX_CLUSTERS) in ve_spc_powerdown() [all …]
|
D | spc.h | 13 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); 14 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); 15 void ve_spc_powerdown(u32 cluster, bool enable); 16 int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster);
|
D | Kconfig | 59 This is needed to provide CPU and cluster power management 68 between the dual cluster test-chip and the M3 microcontroller that 78 Support for CPU and cluster power management on Versatile Express
|
/arch/arm/mach-sunxi/ |
D | mc_smp.c | 88 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument 91 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15() 106 __func__, cluster, core); in sunxi_core_is_cortex_a15() 116 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument 122 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 126 cluster, cpu); in sunxi_cpu_power_switch_set() 130 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 132 writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 134 writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() 136 writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set() [all …]
|
/arch/arm/mach-exynos/ |
D | mcpm-exynos.c | 59 static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) in exynos_cpu_powerup() argument 61 unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); in exynos_cpu_powerup() 64 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in exynos_cpu_powerup() 66 cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cpu_powerup() 78 if (cluster && in exynos_cpu_powerup() 79 cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { in exynos_cpu_powerup() 95 cpu, cluster); in exynos_cpu_powerup() 108 static int exynos_cluster_powerup(unsigned int cluster) in exynos_cluster_powerup() argument 110 pr_debug("%s: cluster %u\n", __func__, cluster); in exynos_cluster_powerup() 111 if (cluster >= EXYNOS5420_NR_CLUSTERS) in exynos_cluster_powerup() [all …]
|
D | platsmp.c | 139 void exynos_cluster_power_down(int cluster) in exynos_cluster_power_down() argument 141 pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); in exynos_cluster_power_down() 148 void exynos_cluster_power_up(int cluster) in exynos_cluster_power_up() argument 151 EXYNOS_COMMON_CONFIGURATION(cluster)); in exynos_cluster_power_up() 159 int exynos_cluster_power_state(int cluster) in exynos_cluster_power_state() argument 161 return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) & in exynos_cluster_power_state()
|
D | common.h | 132 extern void exynos_cluster_power_down(int cluster); 133 extern void exynos_cluster_power_up(int cluster); 134 extern int exynos_cluster_power_state(int cluster);
|
D | suspend.c | 268 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in exynos5420_cpu_suspend() local 272 mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume); in exynos5420_cpu_suspend() 453 unsigned int mpidr, cluster; in exynos5420_prepare_pm_resume() local 456 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in exynos5420_prepare_pm_resume() 461 if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { in exynos5420_prepare_pm_resume()
|
/arch/arm/mach-hisi/ |
D | platmcpm.c | 71 static bool hip04_cluster_is_down(unsigned int cluster) in hip04_cluster_is_down() argument 76 if (hip04_cpu_table[cluster][i]) in hip04_cluster_is_down() 81 static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) in hip04_set_snoop_filter() argument 89 data |= 1 << cluster; in hip04_set_snoop_filter() 91 data &= ~(1 << cluster); in hip04_set_snoop_filter() 100 unsigned int mpidr, cpu, cluster; in hip04_boot_secondary() local 106 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in hip04_boot_secondary() 110 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) in hip04_boot_secondary() 115 if (hip04_cpu_table[cluster][cpu]) in hip04_boot_secondary() 118 sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); in hip04_boot_secondary() [all …]
|
/arch/arm/include/asm/ |
D | mcpm.h | 44 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); 51 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, 84 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); 132 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); 219 int (*cpu_powerup)(unsigned int cpu, unsigned int cluster); 220 int (*cluster_powerup)(unsigned int cluster); 221 void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster); 222 void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster); 223 void (*cluster_powerdown_prepare)(unsigned int cluster); 226 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); [all …]
|
/arch/mips/include/asm/ |
D | mips-cps.h | 133 static inline uint64_t mips_cps_cluster_config(unsigned int cluster) in mips_cps_cluster_config() argument 143 WARN_ON(cluster != 0); in mips_cps_cluster_config() 151 mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); in mips_cps_cluster_config() 166 static inline unsigned int mips_cps_numcores(unsigned int cluster) in mips_cps_numcores() argument 172 return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; in mips_cps_numcores() 182 static inline unsigned int mips_cps_numiocu(unsigned int cluster) in mips_cps_numiocu() argument 189 num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU; in mips_cps_numiocu() 203 static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core) in mips_cps_numvps() argument 214 mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); in mips_cps_numvps()
|
D | mips-cm.h | 424 extern void mips_cm_lock_other(unsigned int cluster, unsigned int core, 437 static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core, in mips_cm_lock_other() argument
|
/arch/arm/mach-milbeaut/ |
D | platsmp.c | 25 unsigned int mpidr, cpu, cluster; in m10v_boot_secondary() local 32 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_boot_secondary() 38 __func__, cpu, l_cpu, cluster); in m10v_boot_secondary() 48 unsigned int mpidr, cpu, cluster; in m10v_smp_init() local 61 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in m10v_smp_init() 62 pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster); in m10v_smp_init()
|
/arch/alpha/kernel/ |
D | setup.c | 259 for ((_cluster) = (memdesc)->cluster, (i) = 0; \ 306 struct memclust_struct * cluster; in setup_memory() local 315 for_each_mem_cluster(memdesc, cluster, i) { in setup_memory() 319 i, cluster->usage, cluster->start_pfn, in setup_memory() 320 cluster->start_pfn + cluster->numpages); in setup_memory() 322 end = cluster->start_pfn + cluster->numpages; in setup_memory() 326 memblock_add(PFN_PHYS(cluster->start_pfn), in setup_memory() 327 cluster->numpages << PAGE_SHIFT); in setup_memory() 332 if (cluster->usage & 3) in setup_memory() 333 memblock_reserve(PFN_PHYS(cluster->start_pfn), in setup_memory() [all …]
|
/arch/x86/kernel/apic/ |
D | x2apic_cluster.c | 103 u32 cluster, apicid = apic_read(APIC_LDR); in init_x2apic_ldr() local 111 cluster = apicid >> 16; in init_x2apic_ldr() 115 if (cmsk && cmsk->clusterid == cluster) in init_x2apic_ldr() 119 cmsk->clusterid = cluster; in init_x2apic_ldr()
|
/arch/mips/kernel/ |
D | mips-cm.c | 298 void mips_cm_lock_other(unsigned int cluster, unsigned int core, in mips_cm_lock_other() argument 313 val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster); in mips_cm_lock_other() 316 WARN_ON(cluster != 0); in mips_cm_lock_other() 332 WARN_ON(cluster != 0); in mips_cm_lock_other()
|
D | cacheinfo.c | 70 int cluster = cpu_cluster(&cpu_data[cpu]); in fill_cpumask_cluster() local 73 if (cpu_cluster(&cpu_data[cpu1]) == cluster) in fill_cpumask_cluster()
|
/arch/arm/boot/dts/ |
D | exynos5422-odroidxu3.dts | 27 /* A15 cluster: VDD_ARM */ 48 /* A7 cluster: VDD_KFC */
|
/arch/arm64/boot/dts/ti/ |
D | k3-am6528-iot2050-basic.dts | 65 ti,cluster-mode = <0>;
|