Lines Matching refs:rf
562 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() argument
573 rq_pin_lock(rq, rf); in __task_rq_lock()
587 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() argument
594 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
615 rq_pin_lock(rq, rf); in task_rq_lock()
619 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
726 struct rq_flags rf; in hrtick() local
730 rq_lock(rq, &rf); in hrtick()
733 rq_unlock(rq, &rf); in hrtick()
754 struct rq_flags rf; in __hrtick_start() local
756 rq_lock(rq, &rf); in __hrtick_start()
758 rq_unlock(rq, &rf); in __hrtick_start()
1412 struct rq_flags rf; in uclamp_update_util_min_rt_default() local
1419 rq = task_rq_lock(p, &rf); in uclamp_update_util_min_rt_default()
1421 task_rq_unlock(rq, p, &rf); in uclamp_update_util_min_rt_default()
1698 struct rq_flags rf; in uclamp_update_active() local
1709 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1720 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
2299 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() argument
2311 trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached); in move_queued_task()
2319 rq_unlock(rq, rf); in move_queued_task()
2322 rq_lock(rq, rf); in move_queued_task()
2357 struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() argument
2365 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
2383 struct rq_flags rf; in migration_cpu_stop() local
2389 local_irq_save(rf.flags); in migration_cpu_stop()
2398 rq_lock(rq, &rf); in migration_cpu_stop()
2424 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2463 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2472 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2723 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() argument
2750 task_rq_unlock(rq, p, rf); in affine_move_task()
2804 task_rq_unlock(rq, p, rf); in affine_move_task()
2822 task_rq_unlock(rq, p, rf); in affine_move_task()
2835 rq = move_queued_task(rq, rf, p, dest_cpu); in affine_move_task()
2842 task_rq_unlock(rq, p, rf); in affine_move_task()
2872 struct rq_flags *rf) in __set_cpus_allowed_ptr_locked() argument
2943 ret = affine_move_task(rq, p, rf, dest_cpu, flags); in __set_cpus_allowed_ptr_locked()
2950 task_rq_unlock(rq, p, rf); in __set_cpus_allowed_ptr_locked()
2967 struct rq_flags rf; in __set_cpus_allowed_ptr() local
2970 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
2971 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf); in __set_cpus_allowed_ptr()
2992 struct rq_flags rf; in restrict_cpus_allowed_ptr() local
3002 rq = task_rq_lock(p, &rf); in restrict_cpus_allowed_ptr()
3028 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf); in restrict_cpus_allowed_ptr()
3031 task_rq_unlock(rq, p, &rf); in restrict_cpus_allowed_ptr()
3306 struct rq_flags rf; in wait_task_inactive() local
3341 rq = task_rq_lock(p, &rf); in wait_task_inactive()
3348 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
3653 struct rq_flags *rf) in ttwu_do_wakeup() argument
3665 rq_unpin_lock(rq, rf); in ttwu_do_wakeup()
3667 rq_repin_lock(rq, rf); in ttwu_do_wakeup()
3689 struct rq_flags *rf) in ttwu_do_activate() argument
3712 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
3742 struct rq_flags rf; in ttwu_runnable() local
3746 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
3750 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_runnable()
3753 __task_rq_unlock(rq, &rf); in ttwu_runnable()
3764 struct rq_flags rf; in sched_ttwu_pending() local
3776 rq_lock_irqsave(rq, &rf); in sched_ttwu_pending()
3786 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3789 rq_unlock_irqrestore(rq, &rf); in sched_ttwu_pending()
3821 struct rq_flags rf; in wake_up_if_idle() local
3831 rq_lock_irqsave(rq, &rf); in wake_up_if_idle()
3835 rq_unlock_irqrestore(rq, &rf); in wake_up_if_idle()
3918 struct rq_flags rf; in ttwu_queue() local
3923 rq_lock(rq, &rf); in ttwu_queue()
3925 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
3926 rq_unlock(rq, &rf); in ttwu_queue()
4299 struct rq_flags rf; in try_invoke_on_locked_down_task() local
4303 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in try_invoke_on_locked_down_task()
4305 rq = __task_rq_lock(p, &rf); in try_invoke_on_locked_down_task()
4308 rq_unlock(rq, &rf); in try_invoke_on_locked_down_task()
4320 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in try_invoke_on_locked_down_task()
4629 struct rq_flags rf; in wake_up_new_task() local
4634 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4649 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
4663 rq_unpin_lock(rq, &rf); in wake_up_new_task()
4665 rq_repin_lock(rq, &rf); in wake_up_new_task()
4668 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
4893 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
4901 rq_unpin_lock(rq, rf); in prepare_lock_switch()
5115 struct task_struct *next, struct rq_flags *rf) in context_switch() argument
5163 prepare_lock_switch(rq, next, rf); in context_switch()
5335 struct rq_flags rf; in task_sched_runtime() local
5355 rq = task_rq_lock(p, &rf); in task_sched_runtime()
5367 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
5431 struct rq_flags rf; in scheduler_tick() local
5438 rq_lock(rq, &rf); in scheduler_tick()
5450 rq_unlock(rq, &rf); in scheduler_tick()
5509 struct rq_flags rf; in sched_tick_remote() local
5523 rq_lock_irq(rq, &rf); in sched_tick_remote()
5542 rq_unlock_irq(rq, &rf); in sched_tick_remote()
5759 struct rq_flags *rf) in put_prev_task_balance() argument
5772 if (class->balance(rq, prev, rf)) in put_prev_task_balance()
5784 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in __pick_next_task() argument
5798 p = pick_next_task_fair(rq, prev, rf); in __pick_next_task()
5812 put_prev_task_balance(rq, prev, rf); in __pick_next_task()
5896 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
5906 return __pick_next_task(rq, prev, rf); in pick_next_task()
5918 return __pick_next_task(rq, prev, rf); in pick_next_task()
5945 put_prev_task_balance(rq, prev, rf); in pick_next_task()
6338 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
6340 return __pick_next_task(rq, prev, rf); in pick_next_task()
6407 struct rq_flags rf; in __schedule() local
6438 rq_lock(rq, &rf); in __schedule()
6488 next = pick_next_task(rq, prev, &rf); in __schedule()
6525 rq = context_switch(rq, prev, next, &rf); in __schedule()
6529 rq_unpin_lock(rq, &rf); in __schedule()
6990 struct rq_flags rf; in rt_mutex_setprio() local
7003 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
7097 rq_unpin_lock(rq, &rf); in rt_mutex_setprio()
7114 struct rq_flags rf; in set_user_nice() local
7124 rq = task_rq_lock(p, &rf); in set_user_nice()
7165 task_rq_unlock(rq, p, &rf); in set_user_nice()
7473 struct rq_flags rf; in __sched_setscheduler() local
7594 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
7660 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7727 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7742 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
8390 struct rq_flags rf; in do_sched_yield() local
8393 rq = this_rq_lock_irq(&rf); in do_sched_yield()
8401 rq_unlock_irq(rq, &rf); in do_sched_yield()
8719 struct rq_flags rf; in sched_rr_get_interval() local
8736 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
8740 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
9017 struct rq_flags rf; in sched_setnuma() local
9020 rq = task_rq_lock(p, &rf); in sched_setnuma()
9035 task_rq_unlock(rq, p, &rf); in sched_setnuma()
9081 struct rq_flags rf; in __balance_push_cpu_stop() local
9085 rq_lock(rq, &rf); in __balance_push_cpu_stop()
9091 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
9094 rq_unlock(rq, &rf); in __balance_push_cpu_stop()
9176 struct rq_flags rf; in balance_push_set() local
9178 rq_lock_irqsave(rq, &rf); in balance_push_set()
9185 rq_unlock_irqrestore(rq, &rf); in balance_push_set()
9302 struct rq_flags rf; in sched_cpu_activate() local
9333 rq_lock_irqsave(rq, &rf); in sched_cpu_activate()
9338 rq_unlock_irqrestore(rq, &rf); in sched_cpu_activate()
9346 struct rq_flags rf; in sched_cpu_deactivate() local
9377 rq_lock_irqsave(rq, &rf); in sched_cpu_deactivate()
9383 rq_unlock_irqrestore(rq, &rf); in sched_cpu_deactivate()
9483 struct rq_flags rf; in sched_cpu_dying() local
9488 rq_lock_irqsave(rq, &rf); in sched_cpu_dying()
9493 rq_unlock_irqrestore(rq, &rf); in sched_cpu_dying()
10133 struct rq_flags rf; in sched_move_task() local
10136 rq = task_rq_lock(tsk, &rf); in sched_move_task()
10161 task_rq_unlock(rq, tsk, &rf); in sched_move_task()
10232 struct rq_flags rf; in cpu_cgroup_fork() local
10235 rq = task_rq_lock(task, &rf); in cpu_cgroup_fork()
10240 task_rq_unlock(rq, task, &rf); in cpu_cgroup_fork()
10582 struct rq_flags rf; in tg_set_cfs_bandwidth() local
10584 rq_lock_irq(rq, &rf); in tg_set_cfs_bandwidth()
10590 rq_unlock_irq(rq, &rf); in tg_set_cfs_bandwidth()