Lines Matching full:cluster
2 * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
27 * see Documentation/arm/cluster-pm-race-avoidance.txt.
37 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument
39 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down()
40 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down()
45 * cluster can be torn down without disrupting this CPU.
50 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument
53 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down()
54 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down()
59 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
60 * @state: the final state of the cluster:
61 * CLUSTER_UP: no destructive teardown was done and the cluster has been
63 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
66 static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) in __mcpm_outbound_leave_critical() argument
69 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical()
70 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical()
75 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
81 * observed, or the cluster is already being set up;
83 * cluster.
85 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument
88 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; in __mcpm_outbound_enter_critical()
90 /* Warn inbound CPUs that the cluster is being torn down: */ in __mcpm_outbound_enter_critical()
91 c->cluster = CLUSTER_GOING_DOWN; in __mcpm_outbound_enter_critical()
92 sync_cache_w(&c->cluster); in __mcpm_outbound_enter_critical()
94 /* Back out if the inbound cluster is already in the critical region: */ in __mcpm_outbound_enter_critical()
101 * teardown is complete on each CPU before tearing down the cluster. in __mcpm_outbound_enter_critical()
104 * shouldn't be taking the cluster down at all: abort in that case. in __mcpm_outbound_enter_critical()
134 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); in __mcpm_outbound_enter_critical()
138 static int __mcpm_cluster_state(unsigned int cluster) in __mcpm_cluster_state() argument
140 sync_cache_r(&mcpm_sync.clusters[cluster].cluster); in __mcpm_cluster_state()
141 return mcpm_sync.clusters[cluster].cluster; in __mcpm_cluster_state()
146 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) in mcpm_set_entry_vector() argument
149 mcpm_entry_vectors[cluster][cpu] = val; in mcpm_set_entry_vector()
150 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); in mcpm_set_entry_vector()
155 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, in mcpm_set_early_poke() argument
158 unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; in mcpm_set_early_poke()
190 static inline bool mcpm_cluster_unused(unsigned int cluster) in mcpm_cluster_unused() argument
194 cnt |= mcpm_cpu_use_count[cluster][i]; in mcpm_cluster_unused()
198 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) in mcpm_cpu_power_up() argument
203 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in mcpm_cpu_power_up()
215 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; in mcpm_cpu_power_up()
216 cluster_is_down = mcpm_cluster_unused(cluster); in mcpm_cpu_power_up()
218 mcpm_cpu_use_count[cluster][cpu]++; in mcpm_cpu_power_up()
227 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && in mcpm_cpu_power_up()
228 mcpm_cpu_use_count[cluster][cpu] != 2); in mcpm_cpu_power_up()
231 ret = platform_ops->cluster_powerup(cluster); in mcpm_cpu_power_up()
233 ret = platform_ops->cpu_powerup(cpu, cluster); in mcpm_cpu_power_up()
244 unsigned int mpidr, cpu, cluster; in mcpm_cpu_power_down() local
250 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in mcpm_cpu_power_down()
251 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in mcpm_cpu_power_down()
258 __mcpm_cpu_going_down(cpu, cluster); in mcpm_cpu_power_down()
260 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); in mcpm_cpu_power_down()
262 mcpm_cpu_use_count[cluster][cpu]--; in mcpm_cpu_power_down()
263 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && in mcpm_cpu_power_down()
264 mcpm_cpu_use_count[cluster][cpu] != 1); in mcpm_cpu_power_down()
265 cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; in mcpm_cpu_power_down()
266 last_man = mcpm_cluster_unused(cluster); in mcpm_cpu_power_down()
268 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { in mcpm_cpu_power_down()
269 platform_ops->cpu_powerdown_prepare(cpu, cluster); in mcpm_cpu_power_down()
270 platform_ops->cluster_powerdown_prepare(cluster); in mcpm_cpu_power_down()
273 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); in mcpm_cpu_power_down()
276 platform_ops->cpu_powerdown_prepare(cpu, cluster); in mcpm_cpu_power_down()
289 __mcpm_cpu_down(cpu, cluster); in mcpm_cpu_power_down()
311 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) in mcpm_wait_for_cpu_powerdown() argument
318 ret = platform_ops->wait_for_powerdown(cpu, cluster); in mcpm_wait_for_cpu_powerdown()
320 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", in mcpm_wait_for_cpu_powerdown()
321 __func__, cpu, cluster, ret); in mcpm_wait_for_cpu_powerdown()
335 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in mcpm_cpu_suspend() local
337 platform_ops->cpu_suspend_prepare(cpu, cluster); in mcpm_cpu_suspend()
345 unsigned int mpidr, cpu, cluster; in mcpm_cpu_powered_up() local
354 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in mcpm_cpu_powered_up()
358 cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; in mcpm_cpu_powered_up()
359 first_man = mcpm_cluster_unused(cluster); in mcpm_cpu_powered_up()
362 platform_ops->cluster_is_up(cluster); in mcpm_cpu_powered_up()
364 mcpm_cpu_use_count[cluster][cpu] = 1; in mcpm_cpu_powered_up()
366 platform_ops->cpu_is_up(cpu, cluster); in mcpm_cpu_powered_up()
381 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in nocache_trampoline() local
384 mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp); in nocache_trampoline()
387 __mcpm_cpu_going_down(cpu, cluster); in nocache_trampoline()
388 BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); in nocache_trampoline()
390 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); in nocache_trampoline()
391 __mcpm_cpu_down(cpu, cluster); in nocache_trampoline()
435 * Set initial CPU and cluster states. in mcpm_sync_init()
436 * Only one cluster is assumed to be active at this point. in mcpm_sync_init()
439 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; in mcpm_sync_init()
450 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; in mcpm_sync_init()