Lines Matching +full:tightly +full:- +full:coupled
2 * coupled.c - helper functions to enter the same idle state on multiple cpus
30 * DOC: Coupled cpuidle states
39 * WFI), and one or more "coupled" power states that affect blocks
41 * sometimes the whole SoC). Entering a coupled power state must
42 * be tightly controlled on both cpus.
45 * WFI state until all cpus are ready to enter a coupled state, at
46 * which point the coupled state function will be called on all
55 * ready counter matches the number of online coupled cpus. If any
59 * requested_state stores the deepest coupled idle state each cpu
65 * and only read after all the cpus are ready for the coupled idle
69 * of cpus in the coupled set that are currently or soon will be
71 * the waiting loop, in the ready loop, or in the coupled idle state.
73 * or in the coupled idle state.
75 * To use coupled cpuidle states, a cpuidle driver must:
78 * coupled cpus, usually the same as cpu_possible_mask if all cpus
83 * coupled state. This is usually WFI.
97 * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
98 * @coupled_cpus: mask of cpus that are part of the coupled set
99 * @requested_state: array of requested states for cpus in the coupled set
103 * @prevent: flag to prevent coupled idle while a cpu is hotplugging
117 #define WAITING_MASK (MAX_WAITING_CPUS - 1)
120 #define CPUIDLE_COUPLED_NOT_IDLE (-1)
140 * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
145 * cpus in the same coupled group have called this function. Once any caller
152 * Must only be called from within a coupled idle state handler
159 int n = dev->coupled->online_count; in cpuidle_coupled_parallel_barrier()
177 * cpuidle_state_is_coupled - check if a state is part of a coupled set
179 * @state: index of the target state in drv->states
181 * Returns true if the target state is coupled with cpus besides this one
185 return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; in cpuidle_state_is_coupled()
189 * cpuidle_coupled_state_verify - check if the coupled states are correctly set.
193 * * -EINVAL if any coupled state(safe_state_index) is wrongly set.
199 for (i = drv->state_count - 1; i >= 0; i--) { in cpuidle_coupled_state_verify()
201 (drv->safe_state_index == i || in cpuidle_coupled_state_verify()
202 drv->safe_state_index < 0 || in cpuidle_coupled_state_verify()
203 drv->safe_state_index >= drv->state_count)) in cpuidle_coupled_state_verify()
204 return -EINVAL; in cpuidle_coupled_state_verify()
211 * cpuidle_coupled_set_ready - mark a cpu as ready
212 * @coupled: the struct coupled that contains the current cpu
214 static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_set_ready() argument
216 atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); in cpuidle_coupled_set_ready()
220 * cpuidle_coupled_set_not_ready - mark a cpu as not ready
221 * @coupled: the struct coupled that contains the current cpu
225 * decrements the waiting counter and then re-increments it just before another
227 * down from the number of online cpus without going through the coupled idle
230 * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
234 inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_set_not_ready() argument
239 all = coupled->online_count | (coupled->online_count << WAITING_BITS); in cpuidle_coupled_set_not_ready()
240 ret = atomic_add_unless(&coupled->ready_waiting_counts, in cpuidle_coupled_set_not_ready()
241 -MAX_WAITING_CPUS, all); in cpuidle_coupled_set_not_ready()
243 return ret ? 0 : -EINVAL; in cpuidle_coupled_set_not_ready()
247 * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
248 * @coupled: the struct coupled that contains the current cpu
250 * Returns true if all of the cpus in a coupled set are out of the ready loop.
252 static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_no_cpus_ready() argument
254 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; in cpuidle_coupled_no_cpus_ready()
259 * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
260 * @coupled: the struct coupled that contains the current cpu
262 * Returns true if all cpus coupled to this target state are in the ready loop
264 static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) in cpuidle_coupled_cpus_ready() argument
266 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; in cpuidle_coupled_cpus_ready()
267 return r == coupled->online_count; in cpuidle_coupled_cpus_ready()
271 * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
272 * @coupled: the struct coupled that contains the current cpu
274 * Returns true if all cpus coupled to this target state are in the wait loop
276 static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) in cpuidle_coupled_cpus_waiting() argument
278 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_cpus_waiting()
279 return w == coupled->online_count; in cpuidle_coupled_cpus_waiting()
283 * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
284 * @coupled: the struct coupled that contains the current cpu
286 * Returns true if all of the cpus in a coupled set are out of the waiting loop.
288 static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) in cpuidle_coupled_no_cpus_waiting() argument
290 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_no_cpus_waiting()
295 * cpuidle_coupled_get_state - determine the deepest idle state
297 * @coupled: the struct coupled that contains the current cpu
299 * Returns the deepest idle state that all coupled cpus can enter
302 struct cpuidle_coupled *coupled) in cpuidle_coupled_get_state() argument
314 for_each_cpu(i, &coupled->coupled_cpus) in cpuidle_coupled_get_state()
315 if (cpu_online(i) && coupled->requested_state[i] < state) in cpuidle_coupled_get_state()
316 state = coupled->requested_state[i]; in cpuidle_coupled_get_state()
329 * cpuidle_coupled_poke - wake up a cpu that may be waiting
333 * and will see updates to waiting_count before it re-enters it's waiting idle
349 * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
351 * @coupled: the struct coupled that contains the current cpu
356 struct cpuidle_coupled *coupled) in cpuidle_coupled_poke_others() argument
360 for_each_cpu(cpu, &coupled->coupled_cpus) in cpuidle_coupled_poke_others()
366 * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
368 * @coupled: the struct coupled that contains the current cpu
369 * @next_state: the index in drv->states of the requested state for this cpu
375 struct cpuidle_coupled *coupled, int next_state) in cpuidle_coupled_set_waiting() argument
377 coupled->requested_state[cpu] = next_state; in cpuidle_coupled_set_waiting()
383 return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; in cpuidle_coupled_set_waiting()
387 * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
389 * @coupled: the struct coupled that contains the current cpu
394 struct cpuidle_coupled *coupled) in cpuidle_coupled_set_not_waiting() argument
402 atomic_dec(&coupled->ready_waiting_counts); in cpuidle_coupled_set_not_waiting()
404 coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; in cpuidle_coupled_set_not_waiting()
408 * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
410 * @coupled: the struct coupled that contains the current cpu
416 static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) in cpuidle_coupled_set_done() argument
418 cpuidle_coupled_set_not_waiting(cpu, coupled); in cpuidle_coupled_set_done()
419 atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); in cpuidle_coupled_set_done()
423 * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
424 * @cpu - this cpu
448 static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) in cpuidle_coupled_any_pokes_pending() argument
453 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_any_pokes_pending()
460 * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
463 * @next_state: index of the requested state in drv->states
465 * Coordinate with coupled cpus to enter the target state. This is a two
470 * all the other cpus to call this function. Once all coupled cpus are idle,
471 * the second stage will start. Each coupled cpu will spin until all cpus have
481 int entered_state = -1; in cpuidle_enter_state_coupled()
482 struct cpuidle_coupled *coupled = dev->coupled; in cpuidle_enter_state_coupled() local
485 if (!coupled) in cpuidle_enter_state_coupled()
486 return -EINVAL; in cpuidle_enter_state_coupled()
488 while (coupled->prevent) { in cpuidle_enter_state_coupled()
489 cpuidle_coupled_clear_pokes(dev->cpu); in cpuidle_enter_state_coupled()
495 drv->safe_state_index); in cpuidle_enter_state_coupled()
503 cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); in cpuidle_enter_state_coupled()
505 w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); in cpuidle_enter_state_coupled()
513 if (w == coupled->online_count) { in cpuidle_enter_state_coupled()
514 cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); in cpuidle_enter_state_coupled()
515 cpuidle_coupled_poke_others(dev->cpu, coupled); in cpuidle_enter_state_coupled()
520 * Wait for all coupled cpus to be idle, using the deepest state in cpuidle_enter_state_coupled()
527 while (!cpuidle_coupled_cpus_waiting(coupled) || in cpuidle_enter_state_coupled()
528 !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { in cpuidle_enter_state_coupled()
529 if (cpuidle_coupled_clear_pokes(dev->cpu)) in cpuidle_enter_state_coupled()
533 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
537 if (coupled->prevent) { in cpuidle_enter_state_coupled()
538 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
543 drv->safe_state_index); in cpuidle_enter_state_coupled()
547 cpuidle_coupled_clear_pokes(dev->cpu); in cpuidle_enter_state_coupled()
549 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); in cpuidle_enter_state_coupled()
560 * All coupled cpus are probably idle. There is a small chance that in cpuidle_enter_state_coupled()
562 * and spin until all coupled cpus have incremented the counter. Once a in cpuidle_enter_state_coupled()
568 cpuidle_coupled_set_ready(coupled); in cpuidle_enter_state_coupled()
569 while (!cpuidle_coupled_cpus_ready(coupled)) { in cpuidle_enter_state_coupled()
571 if (!cpuidle_coupled_cpus_waiting(coupled)) in cpuidle_enter_state_coupled()
572 if (!cpuidle_coupled_set_not_ready(coupled)) in cpuidle_enter_state_coupled()
591 * coupled idle state of all cpus and retry. in cpuidle_enter_state_coupled()
593 if (cpuidle_coupled_any_pokes_pending(coupled)) { in cpuidle_enter_state_coupled()
594 cpuidle_coupled_set_done(dev->cpu, coupled); in cpuidle_enter_state_coupled()
596 cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); in cpuidle_enter_state_coupled()
600 /* all cpus have acked the coupled state */ in cpuidle_enter_state_coupled()
601 next_state = cpuidle_coupled_get_state(dev, coupled); in cpuidle_enter_state_coupled()
605 cpuidle_coupled_set_done(dev->cpu, coupled); in cpuidle_enter_state_coupled()
618 * Calling local_irq_enable here allows coupled states to return with in cpuidle_enter_state_coupled()
625 * Wait until all coupled cpus have exited idle. There is no risk that in cpuidle_enter_state_coupled()
626 * a cpu exits and re-enters the ready state because this cpu has in cpuidle_enter_state_coupled()
629 while (!cpuidle_coupled_no_cpus_ready(coupled)) in cpuidle_enter_state_coupled()
635 static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) in cpuidle_coupled_update_online_cpus() argument
638 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); in cpuidle_coupled_update_online_cpus()
639 coupled->online_count = cpumask_weight(&cpus); in cpuidle_coupled_update_online_cpus()
643 * cpuidle_coupled_register_device - register a coupled cpuidle device
646 * Called from cpuidle_register_device to handle coupled idle init. Finds the
647 * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
655 struct cpuidle_coupled *coupled; in cpuidle_coupled_register_device() local
657 if (cpumask_empty(&dev->coupled_cpus)) in cpuidle_coupled_register_device()
660 for_each_cpu(cpu, &dev->coupled_cpus) { in cpuidle_coupled_register_device()
662 if (other_dev && other_dev->coupled) { in cpuidle_coupled_register_device()
663 coupled = other_dev->coupled; in cpuidle_coupled_register_device()
668 /* No existing coupled info found, create a new one */ in cpuidle_coupled_register_device()
669 coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); in cpuidle_coupled_register_device()
670 if (!coupled) in cpuidle_coupled_register_device()
671 return -ENOMEM; in cpuidle_coupled_register_device()
673 coupled->coupled_cpus = dev->coupled_cpus; in cpuidle_coupled_register_device()
676 dev->coupled = coupled; in cpuidle_coupled_register_device()
677 if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) in cpuidle_coupled_register_device()
678 coupled->prevent++; in cpuidle_coupled_register_device()
680 cpuidle_coupled_update_online_cpus(coupled); in cpuidle_coupled_register_device()
682 coupled->refcnt++; in cpuidle_coupled_register_device()
684 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); in cpuidle_coupled_register_device()
685 csd->func = cpuidle_coupled_handle_poke; in cpuidle_coupled_register_device()
686 csd->info = (void *)(unsigned long)dev->cpu; in cpuidle_coupled_register_device()
692 * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
695 * Called from cpuidle_unregister_device to tear down coupled idle. Removes the
696 * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
701 struct cpuidle_coupled *coupled = dev->coupled; in cpuidle_coupled_unregister_device() local
703 if (cpumask_empty(&dev->coupled_cpus)) in cpuidle_coupled_unregister_device()
706 if (--coupled->refcnt) in cpuidle_coupled_unregister_device()
707 kfree(coupled); in cpuidle_coupled_unregister_device()
708 dev->coupled = NULL; in cpuidle_coupled_unregister_device()
712 * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
713 * @coupled: the struct coupled that contains the cpu that is changing state
715 * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that
716 * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
718 static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) in cpuidle_coupled_prevent_idle() argument
723 coupled->prevent++; in cpuidle_coupled_prevent_idle()
724 cpuidle_coupled_poke_others(cpu, coupled); in cpuidle_coupled_prevent_idle()
726 while (!cpuidle_coupled_no_cpus_waiting(coupled)) in cpuidle_coupled_prevent_idle()
731 * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
732 * @coupled: the struct coupled that contains the cpu that is changing state
734 * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that
735 * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
737 static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) in cpuidle_coupled_allow_idle() argument
746 coupled->prevent--; in cpuidle_coupled_allow_idle()
748 cpuidle_coupled_poke_others(cpu, coupled); in cpuidle_coupled_allow_idle()
759 if (dev && dev->coupled) { in coupled_cpu_online()
760 cpuidle_coupled_update_online_cpus(dev->coupled); in coupled_cpu_online()
761 cpuidle_coupled_allow_idle(dev->coupled); in coupled_cpu_online()
775 if (dev && dev->coupled) in coupled_cpu_up_prepare()
776 cpuidle_coupled_prevent_idle(dev->coupled); in coupled_cpu_up_prepare()
787 "cpuidle/coupled:prepare", in cpuidle_coupled_init()
793 "cpuidle/coupled:online", in cpuidle_coupled_init()