• Home
  • Raw
  • Download

Lines Matching refs:rf

561 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)  in __task_rq_lock()  argument
572 rq_pin_lock(rq, rf); in __task_rq_lock()
586 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() argument
593 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
614 rq_pin_lock(rq, rf); in task_rq_lock()
618 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
725 struct rq_flags rf; in hrtick() local
729 rq_lock(rq, &rf); in hrtick()
732 rq_unlock(rq, &rf); in hrtick()
753 struct rq_flags rf; in __hrtick_start() local
755 rq_lock(rq, &rf); in __hrtick_start()
757 rq_unlock(rq, &rf); in __hrtick_start()
1411 struct rq_flags rf; in uclamp_update_util_min_rt_default() local
1418 rq = task_rq_lock(p, &rf); in uclamp_update_util_min_rt_default()
1420 task_rq_unlock(rq, p, &rf); in uclamp_update_util_min_rt_default()
1697 struct rq_flags rf; in uclamp_update_active() local
1708 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1719 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
2284 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() argument
2296 trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached); in move_queued_task()
2304 rq_unlock(rq, rf); in move_queued_task()
2307 rq_lock(rq, rf); in move_queued_task()
2342 struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() argument
2350 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
2368 struct rq_flags rf; in migration_cpu_stop() local
2374 local_irq_save(rf.flags); in migration_cpu_stop()
2383 rq_lock(rq, &rf); in migration_cpu_stop()
2409 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2448 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2457 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2707 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() argument
2734 task_rq_unlock(rq, p, rf); in affine_move_task()
2788 task_rq_unlock(rq, p, rf); in affine_move_task()
2806 task_rq_unlock(rq, p, rf); in affine_move_task()
2819 rq = move_queued_task(rq, rf, p, dest_cpu); in affine_move_task()
2826 task_rq_unlock(rq, p, rf); in affine_move_task()
2856 struct rq_flags *rf) in __set_cpus_allowed_ptr_locked() argument
2928 ret = affine_move_task(rq, p, rf, dest_cpu, flags); in __set_cpus_allowed_ptr_locked()
2935 task_rq_unlock(rq, p, rf); in __set_cpus_allowed_ptr_locked()
2952 struct rq_flags rf; in __set_cpus_allowed_ptr() local
2955 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
2956 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf); in __set_cpus_allowed_ptr()
2977 struct rq_flags rf; in restrict_cpus_allowed_ptr() local
2987 rq = task_rq_lock(p, &rf); in restrict_cpus_allowed_ptr()
3013 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf); in restrict_cpus_allowed_ptr()
3016 task_rq_unlock(rq, p, &rf); in restrict_cpus_allowed_ptr()
3291 struct rq_flags rf; in wait_task_inactive() local
3326 rq = task_rq_lock(p, &rf); in wait_task_inactive()
3333 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
3638 struct rq_flags *rf) in ttwu_do_wakeup() argument
3650 rq_unpin_lock(rq, rf); in ttwu_do_wakeup()
3652 rq_repin_lock(rq, rf); in ttwu_do_wakeup()
3674 struct rq_flags *rf) in ttwu_do_activate() argument
3697 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
3727 struct rq_flags rf; in ttwu_runnable() local
3731 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
3735 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_runnable()
3738 __task_rq_unlock(rq, &rf); in ttwu_runnable()
3749 struct rq_flags rf; in sched_ttwu_pending() local
3761 rq_lock_irqsave(rq, &rf); in sched_ttwu_pending()
3771 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3774 rq_unlock_irqrestore(rq, &rf); in sched_ttwu_pending()
3806 struct rq_flags rf; in wake_up_if_idle() local
3816 rq_lock_irqsave(rq, &rf); in wake_up_if_idle()
3820 rq_unlock_irqrestore(rq, &rf); in wake_up_if_idle()
3903 struct rq_flags rf; in ttwu_queue() local
3908 rq_lock(rq, &rf); in ttwu_queue()
3910 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
3911 rq_unlock(rq, &rf); in ttwu_queue()
4284 struct rq_flags rf; in try_invoke_on_locked_down_task() local
4288 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in try_invoke_on_locked_down_task()
4290 rq = __task_rq_lock(p, &rf); in try_invoke_on_locked_down_task()
4293 rq_unlock(rq, &rf); in try_invoke_on_locked_down_task()
4305 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in try_invoke_on_locked_down_task()
4615 struct rq_flags rf; in wake_up_new_task() local
4620 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4635 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
4649 rq_unpin_lock(rq, &rf); in wake_up_new_task()
4651 rq_repin_lock(rq, &rf); in wake_up_new_task()
4654 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
4879 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
4887 rq_unpin_lock(rq, rf); in prepare_lock_switch()
5101 struct task_struct *next, struct rq_flags *rf) in context_switch() argument
5149 prepare_lock_switch(rq, next, rf); in context_switch()
5322 struct rq_flags rf; in task_sched_runtime() local
5342 rq = task_rq_lock(p, &rf); in task_sched_runtime()
5354 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
5418 struct rq_flags rf; in scheduler_tick() local
5425 rq_lock(rq, &rf); in scheduler_tick()
5437 rq_unlock(rq, &rf); in scheduler_tick()
5496 struct rq_flags rf; in sched_tick_remote() local
5510 rq_lock_irq(rq, &rf); in sched_tick_remote()
5529 rq_unlock_irq(rq, &rf); in sched_tick_remote()
5746 struct rq_flags *rf) in put_prev_task_balance() argument
5759 if (class->balance(rq, prev, rf)) in put_prev_task_balance()
5771 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in __pick_next_task() argument
5785 p = pick_next_task_fair(rq, prev, rf); in __pick_next_task()
5799 put_prev_task_balance(rq, prev, rf); in __pick_next_task()
5883 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
5893 return __pick_next_task(rq, prev, rf); in pick_next_task()
5905 return __pick_next_task(rq, prev, rf); in pick_next_task()
5932 put_prev_task_balance(rq, prev, rf); in pick_next_task()
6325 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
6327 return __pick_next_task(rq, prev, rf); in pick_next_task()
6394 struct rq_flags rf; in __schedule() local
6425 rq_lock(rq, &rf); in __schedule()
6475 next = pick_next_task(rq, prev, &rf); in __schedule()
6512 rq = context_switch(rq, prev, next, &rf); in __schedule()
6516 rq_unpin_lock(rq, &rf); in __schedule()
6977 struct rq_flags rf; in rt_mutex_setprio() local
6992 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
7086 rq_unpin_lock(rq, &rf); in rt_mutex_setprio()
7103 struct rq_flags rf; in set_user_nice() local
7113 rq = task_rq_lock(p, &rf); in set_user_nice()
7150 task_rq_unlock(rq, p, &rf); in set_user_nice()
7458 struct rq_flags rf; in __sched_setscheduler() local
7579 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
7645 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7711 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7726 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
8374 struct rq_flags rf; in do_sched_yield() local
8377 rq = this_rq_lock_irq(&rf); in do_sched_yield()
8385 rq_unlock_irq(rq, &rf); in do_sched_yield()
8703 struct rq_flags rf; in sched_rr_get_interval() local
8720 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
8724 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
9001 struct rq_flags rf; in sched_setnuma() local
9004 rq = task_rq_lock(p, &rf); in sched_setnuma()
9019 task_rq_unlock(rq, p, &rf); in sched_setnuma()
9065 struct rq_flags rf; in __balance_push_cpu_stop() local
9069 rq_lock(rq, &rf); in __balance_push_cpu_stop()
9075 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
9078 rq_unlock(rq, &rf); in __balance_push_cpu_stop()
9160 struct rq_flags rf; in balance_push_set() local
9162 rq_lock_irqsave(rq, &rf); in balance_push_set()
9169 rq_unlock_irqrestore(rq, &rf); in balance_push_set()
9286 struct rq_flags rf; in sched_cpu_activate() local
9317 rq_lock_irqsave(rq, &rf); in sched_cpu_activate()
9322 rq_unlock_irqrestore(rq, &rf); in sched_cpu_activate()
9330 struct rq_flags rf; in sched_cpu_deactivate() local
9361 rq_lock_irqsave(rq, &rf); in sched_cpu_deactivate()
9367 rq_unlock_irqrestore(rq, &rf); in sched_cpu_deactivate()
9467 struct rq_flags rf; in sched_cpu_dying() local
9472 rq_lock_irqsave(rq, &rf); in sched_cpu_dying()
9477 rq_unlock_irqrestore(rq, &rf); in sched_cpu_dying()
10117 struct rq_flags rf; in sched_move_task() local
10120 rq = task_rq_lock(tsk, &rf); in sched_move_task()
10145 task_rq_unlock(rq, tsk, &rf); in sched_move_task()
10216 struct rq_flags rf; in cpu_cgroup_fork() local
10219 rq = task_rq_lock(task, &rf); in cpu_cgroup_fork()
10224 task_rq_unlock(rq, task, &rf); in cpu_cgroup_fork()
10566 struct rq_flags rf; in tg_set_cfs_bandwidth() local
10568 rq_lock_irq(rq, &rf); in tg_set_cfs_bandwidth()
10574 rq_unlock_irq(rq, &rf); in tg_set_cfs_bandwidth()