• Home
  • Raw
  • Download

Lines Matching refs:p

226 	s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags);
241 void (*enqueue)(struct task_struct *p, u64 enq_flags);
257 void (*dequeue)(struct task_struct *p, u64 deq_flags);
290 void (*tick)(struct task_struct *p);
317 void (*runnable)(struct task_struct *p, u64 enq_flags);
325 void (*running)(struct task_struct *p);
336 void (*stopping)(struct task_struct *p, bool runnable);
356 void (*quiescent)(struct task_struct *p, u64 deq_flags);
398 void (*set_weight)(struct task_struct *p, u32 weight);
407 void (*set_cpumask)(struct task_struct *p,
466 s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args);
475 void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args);
484 void (*enable)(struct task_struct *p);
494 void (*disable)(struct task_struct *p);
524 void (*dump_task)(struct scx_dump_ctx *ctx, struct task_struct *p);
563 s32 (*cgroup_prep_move)(struct task_struct *p,
574 void (*cgroup_move)(struct task_struct *p,
586 void (*cgroup_cancel_move)(struct task_struct *p,
1052 static struct scx_dispatch_q *find_global_dsq(struct task_struct *p) in find_global_dsq() argument
1054 return global_dsqs[cpu_to_node(task_cpu(p))]; in find_global_dsq()
1184 struct task_struct *p) in scx_kf_allowed_on_arg_tasks() argument
1189 if (unlikely((p != current->scx.kf_tasks[0] && in scx_kf_allowed_on_arg_tasks()
1190 p != current->scx.kf_tasks[1]))) { in scx_kf_allowed_on_arg_tasks()
1241 #define nldsq_for_each_task(p, dsq) \ argument
1242 for ((p) = nldsq_next_task((dsq), NULL, false); (p); \
1243 (p) = nldsq_next_task((dsq), (p), false))
1408 struct task_struct *p; in scx_task_iter_next_locked() local
1412 while ((p = scx_task_iter_next(iter))) { in scx_task_iter_next_locked()
1438 if (p->sched_class != &idle_sched_class) in scx_task_iter_next_locked()
1441 if (!p) in scx_task_iter_next_locked()
1444 iter->rq = task_rq_lock(p, &iter->rf); in scx_task_iter_next_locked()
1445 iter->locked = p; in scx_task_iter_next_locked()
1447 return p; in scx_task_iter_next_locked()
1484 static void wait_ops_state(struct task_struct *p, unsigned long opss) in wait_ops_state() argument
1488 } while (atomic_long_read_acquire(&p->scx.ops_state) == opss); in wait_ops_state()
1603 static void touch_core_sched(struct rq *rq, struct task_struct *p) in touch_core_sched() argument
1616 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); in touch_core_sched()
1630 static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) in touch_core_sched_dispatch() argument
1636 touch_core_sched(rq, p); in touch_core_sched_dispatch()
1673 static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, in dispatch_enqueue() argument
1678 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); in dispatch_enqueue()
1679 WARN_ON_ONCE((p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) || in dispatch_enqueue()
1680 !RB_EMPTY_NODE(&p->scx.dsq_priq)); in dispatch_enqueue()
1688 dsq = find_global_dsq(p); in dispatch_enqueue()
1719 p->scx.dsq_flags |= SCX_TASK_DSQ_ON_PRIQ; in dispatch_enqueue()
1720 rb_add(&p->scx.dsq_priq, &dsq->priq, scx_dsq_priq_less); in dispatch_enqueue()
1726 rbp = rb_prev(&p->scx.dsq_priq); in dispatch_enqueue()
1731 list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); in dispatch_enqueue()
1733 list_add(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
1742 list_add(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
1744 list_add_tail(&p->scx.dsq_list.node, &dsq->list); in dispatch_enqueue()
1749 p->scx.dsq_seq = dsq->seq; in dispatch_enqueue()
1752 p->scx.dsq = dsq; in dispatch_enqueue()
1760 p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; in dispatch_enqueue()
1761 p->scx.ddsp_enq_flags = 0; in dispatch_enqueue()
1768 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in dispatch_enqueue()
1774 if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && in dispatch_enqueue()
1788 static void task_unlink_from_dsq(struct task_struct *p, in task_unlink_from_dsq() argument
1791 WARN_ON_ONCE(list_empty(&p->scx.dsq_list.node)); in task_unlink_from_dsq()
1793 if (p->scx.dsq_flags & SCX_TASK_DSQ_ON_PRIQ) { in task_unlink_from_dsq()
1794 rb_erase(&p->scx.dsq_priq, &dsq->priq); in task_unlink_from_dsq()
1795 RB_CLEAR_NODE(&p->scx.dsq_priq); in task_unlink_from_dsq()
1796 p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; in task_unlink_from_dsq()
1799 list_del_init(&p->scx.dsq_list.node); in task_unlink_from_dsq()
1803 static void dispatch_dequeue(struct rq *rq, struct task_struct *p) in dispatch_dequeue() argument
1805 struct scx_dispatch_q *dsq = p->scx.dsq; in dispatch_dequeue()
1813 if (unlikely(!list_empty(&p->scx.dsq_list.node))) in dispatch_dequeue()
1814 list_del_init(&p->scx.dsq_list.node); in dispatch_dequeue()
1822 if (p->scx.holding_cpu >= 0) in dispatch_dequeue()
1823 p->scx.holding_cpu = -1; in dispatch_dequeue()
1835 if (p->scx.holding_cpu < 0) { in dispatch_dequeue()
1837 task_unlink_from_dsq(p, dsq); in dispatch_dequeue()
1845 WARN_ON_ONCE(!list_empty(&p->scx.dsq_list.node)); in dispatch_dequeue()
1846 p->scx.holding_cpu = -1; in dispatch_dequeue()
1848 p->scx.dsq = NULL; in dispatch_dequeue()
1855 struct task_struct *p) in find_dsq_for_dispatch() argument
1866 return find_global_dsq(p); in find_dsq_for_dispatch()
1872 dsq = find_global_dsq(p); in find_dsq_for_dispatch()
1878 dsq_id, p->comm, p->pid); in find_dsq_for_dispatch()
1879 return find_global_dsq(p); in find_dsq_for_dispatch()
1886 struct task_struct *p, u64 dsq_id, in mark_direct_dispatch() argument
1897 if (unlikely(p != ddsp_task)) { in mark_direct_dispatch()
1900 p->comm, p->pid); in mark_direct_dispatch()
1904 p->comm, p->pid); in mark_direct_dispatch()
1908 WARN_ON_ONCE(p->scx.ddsp_dsq_id != SCX_DSQ_INVALID); in mark_direct_dispatch()
1909 WARN_ON_ONCE(p->scx.ddsp_enq_flags); in mark_direct_dispatch()
1911 p->scx.ddsp_dsq_id = dsq_id; in mark_direct_dispatch()
1912 p->scx.ddsp_enq_flags = enq_flags; in mark_direct_dispatch()
1915 static void direct_dispatch(struct task_struct *p, u64 enq_flags) in direct_dispatch() argument
1917 struct rq *rq = task_rq(p); in direct_dispatch()
1919 find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p); in direct_dispatch()
1921 touch_core_sched_dispatch(rq, p); in direct_dispatch()
1923 p->scx.ddsp_enq_flags |= enq_flags; in direct_dispatch()
1934 opss = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_STATE_MASK; in direct_dispatch()
1944 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in direct_dispatch()
1948 p->comm, p->pid, opss); in direct_dispatch()
1949 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in direct_dispatch()
1953 WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); in direct_dispatch()
1954 list_add_tail(&p->scx.dsq_list.node, in direct_dispatch()
1960 dispatch_enqueue(dsq, p, p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); in direct_dispatch()
1975 static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, in do_enqueue_task() argument
1981 WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); in do_enqueue_task()
1998 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) in do_enqueue_task()
2003 unlikely(p->flags & PF_EXITING)) in do_enqueue_task()
2012 WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); in do_enqueue_task()
2013 atomic_long_set(&p->scx.ops_state, SCX_OPSS_QUEUEING | qseq); in do_enqueue_task()
2017 *ddsp_taskp = p; in do_enqueue_task()
2019 SCX_CALL_OP_TASK(SCX_KF_ENQUEUE, enqueue, p, enq_flags); in do_enqueue_task()
2022 if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) in do_enqueue_task()
2029 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_QUEUED | qseq); in do_enqueue_task()
2033 direct_dispatch(p, enq_flags); in do_enqueue_task()
2042 touch_core_sched(rq, p); in do_enqueue_task()
2043 p->scx.slice = SCX_SLICE_DFL; in do_enqueue_task()
2045 dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags); in do_enqueue_task()
2049 touch_core_sched(rq, p); /* see the comment in local: */ in do_enqueue_task()
2050 p->scx.slice = SCX_SLICE_DFL; in do_enqueue_task()
2051 dispatch_enqueue(find_global_dsq(p), p, enq_flags); in do_enqueue_task()
2054 static bool task_runnable(const struct task_struct *p) in task_runnable() argument
2056 return !list_empty(&p->scx.runnable_node); in task_runnable()
2059 static void set_task_runnable(struct rq *rq, struct task_struct *p) in set_task_runnable() argument
2063 if (p->scx.flags & SCX_TASK_RESET_RUNNABLE_AT) { in set_task_runnable()
2064 p->scx.runnable_at = jiffies; in set_task_runnable()
2065 p->scx.flags &= ~SCX_TASK_RESET_RUNNABLE_AT; in set_task_runnable()
2072 list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list); in set_task_runnable()
2075 static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at) in clr_task_runnable() argument
2077 list_del_init(&p->scx.runnable_node); in clr_task_runnable()
2079 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; in clr_task_runnable()
2082 static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) in enqueue_task_scx() argument
2085 int sticky_cpu = p->scx.sticky_cpu; in enqueue_task_scx()
2093 p->scx.sticky_cpu = -1; in enqueue_task_scx()
2101 if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) in enqueue_task_scx()
2104 if (p->scx.flags & SCX_TASK_QUEUED) { in enqueue_task_scx()
2105 WARN_ON_ONCE(!task_runnable(p)); in enqueue_task_scx()
2109 set_task_runnable(rq, p); in enqueue_task_scx()
2110 p->scx.flags |= SCX_TASK_QUEUED; in enqueue_task_scx()
2116 if (consider_migration || !task_on_rq_migrating(p)) in enqueue_task_scx()
2117 SCX_CALL_OP_TASK(SCX_KF_REST, runnable, p, enq_flags); in enqueue_task_scx()
2121 touch_core_sched(rq, p); in enqueue_task_scx()
2123 do_enqueue_task(rq, p, enq_flags, sticky_cpu); in enqueue_task_scx()
2128 static void ops_dequeue(struct task_struct *p, u64 deq_flags) in ops_dequeue() argument
2133 clr_task_runnable(p, false); in ops_dequeue()
2136 opss = atomic_long_read_acquire(&p->scx.ops_state); in ops_dequeue()
2149 SCX_CALL_OP_TASK(SCX_KF_REST, dequeue, p, deq_flags); in ops_dequeue()
2151 if (atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, in ops_dequeue()
2169 wait_ops_state(p, SCX_OPSS_DISPATCHING); in ops_dequeue()
2170 BUG_ON(atomic_long_read(&p->scx.ops_state) != SCX_OPSS_NONE); in ops_dequeue()
2175 static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags) in dequeue_task_scx() argument
2179 if (!(p->scx.flags & SCX_TASK_QUEUED)) { in dequeue_task_scx()
2180 WARN_ON_ONCE(task_runnable(p)); in dequeue_task_scx()
2184 ops_dequeue(p, deq_flags); in dequeue_task_scx()
2198 if (SCX_HAS_OP(stopping) && task_current(rq, p)) { in dequeue_task_scx()
2200 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, false); in dequeue_task_scx()
2205 if (consider_migration || !task_on_rq_migrating(p)) in dequeue_task_scx()
2206 SCX_CALL_OP_TASK(SCX_KF_REST, quiescent, p, deq_flags); in dequeue_task_scx()
2210 p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; in dequeue_task_scx()
2212 p->scx.flags &= ~SCX_TASK_DEQD_FOR_SLEEP; in dequeue_task_scx()
2214 p->scx.flags &= ~SCX_TASK_QUEUED; in dequeue_task_scx()
2218 dispatch_dequeue(rq, p); in dequeue_task_scx()
2224 struct task_struct *p = rq->curr; in yield_task_scx() local
2227 SCX_CALL_OP_2TASKS_RET(SCX_KF_REST, yield, p, NULL); in yield_task_scx()
2229 p->scx.slice = 0; in yield_task_scx()
2242 static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags, in move_local_task_to_local_dsq() argument
2252 WARN_ON_ONCE(p->scx.holding_cpu >= 0); in move_local_task_to_local_dsq()
2255 list_add(&p->scx.dsq_list.node, &dst_dsq->list); in move_local_task_to_local_dsq()
2257 list_add_tail(&p->scx.dsq_list.node, &dst_dsq->list); in move_local_task_to_local_dsq()
2260 p->scx.dsq = dst_dsq; in move_local_task_to_local_dsq()
2273 static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, in move_remote_task_to_local_dsq() argument
2279 deactivate_task(src_rq, p, 0); in move_remote_task_to_local_dsq()
2280 set_task_cpu(p, cpu_of(dst_rq)); in move_remote_task_to_local_dsq()
2281 p->scx.sticky_cpu = cpu_of(dst_rq); in move_remote_task_to_local_dsq()
2291 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); in move_remote_task_to_local_dsq()
2294 activate_task(dst_rq, p, 0); in move_remote_task_to_local_dsq()
2317 static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, in task_can_run_on_remote_rq() argument
2322 SCHED_WARN_ON(task_cpu(p) == cpu); in task_can_run_on_remote_rq()
2336 if (unlikely(is_migration_disabled(p))) { in task_can_run_on_remote_rq()
2339 p->comm, p->pid, task_cpu(p), cpu); in task_can_run_on_remote_rq()
2349 if (!task_allowed_on_cpu(p, cpu)) { in task_can_run_on_remote_rq()
2352 cpu, p->comm, p->pid); in task_can_run_on_remote_rq()
2391 static bool unlink_dsq_and_lock_src_rq(struct task_struct *p, in unlink_dsq_and_lock_src_rq() argument
2399 WARN_ON_ONCE(p->scx.holding_cpu >= 0); in unlink_dsq_and_lock_src_rq()
2400 task_unlink_from_dsq(p, dsq); in unlink_dsq_and_lock_src_rq()
2401 p->scx.holding_cpu = cpu; in unlink_dsq_and_lock_src_rq()
2407 return likely(p->scx.holding_cpu == cpu) && in unlink_dsq_and_lock_src_rq()
2408 !WARN_ON_ONCE(src_rq != task_rq(p)); in unlink_dsq_and_lock_src_rq()
2411 static bool consume_remote_task(struct rq *this_rq, struct task_struct *p, in consume_remote_task() argument
2416 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { in consume_remote_task()
2417 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); in consume_remote_task()
2426 static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *s… in move_remote_task_to_local_dsq() argument
2427 static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_err… in task_can_run_on_remote_rq() argument
2428 static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispat… in consume_remote_task() argument
2446 static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags, in move_task_between_dsqs() argument
2450 struct rq *src_rq = task_rq(p), *dst_rq; in move_task_between_dsqs()
2459 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { in move_task_between_dsqs()
2460 dst_dsq = find_global_dsq(p); in move_task_between_dsqs()
2475 task_unlink_from_dsq(p, src_dsq); in move_task_between_dsqs()
2476 move_local_task_to_local_dsq(p, enq_flags, in move_task_between_dsqs()
2481 move_remote_task_to_local_dsq(p, enq_flags, in move_task_between_dsqs()
2489 task_unlink_from_dsq(p, src_dsq); in move_task_between_dsqs()
2490 p->scx.dsq = NULL; in move_task_between_dsqs()
2493 dispatch_enqueue(dst_dsq, p, enq_flags); in move_task_between_dsqs()
2501 struct task_struct *p; in consume_dispatch_q() local
2513 nldsq_for_each_task(p, dsq) { in consume_dispatch_q()
2514 struct rq *task_rq = task_rq(p); in consume_dispatch_q()
2517 task_unlink_from_dsq(p, dsq); in consume_dispatch_q()
2518 move_local_task_to_local_dsq(p, 0, dsq, rq); in consume_dispatch_q()
2523 if (task_can_run_on_remote_rq(p, rq, false)) { in consume_dispatch_q()
2524 if (likely(consume_remote_task(rq, p, dsq, task_rq))) in consume_dispatch_q()
2556 struct task_struct *p, u64 enq_flags) in dispatch_to_local_dsq() argument
2558 struct rq *src_rq = task_rq(p); in dispatch_to_local_dsq()
2571 dispatch_enqueue(dst_dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); in dispatch_to_local_dsq()
2577 unlikely(!task_can_run_on_remote_rq(p, dst_rq, true))) { in dispatch_to_local_dsq()
2578 dispatch_enqueue(find_global_dsq(p), p, in dispatch_to_local_dsq()
2594 p->scx.holding_cpu = raw_smp_processor_id(); in dispatch_to_local_dsq()
2597 atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE); in dispatch_to_local_dsq()
2607 if (likely(p->scx.holding_cpu == raw_smp_processor_id()) && in dispatch_to_local_dsq()
2608 !WARN_ON_ONCE(src_rq != task_rq(p))) { in dispatch_to_local_dsq()
2615 p->scx.holding_cpu = -1; in dispatch_to_local_dsq()
2616 dispatch_enqueue(&dst_rq->scx.local_dsq, p, enq_flags); in dispatch_to_local_dsq()
2618 move_remote_task_to_local_dsq(p, enq_flags, in dispatch_to_local_dsq()
2625 if (sched_class_above(p->sched_class, dst_rq->curr->sched_class)) in dispatch_to_local_dsq()
2658 static void finish_dispatch(struct rq *rq, struct task_struct *p, in finish_dispatch() argument
2665 touch_core_sched_dispatch(rq, p); in finish_dispatch()
2671 opss = atomic_long_read(&p->scx.ops_state); in finish_dispatch()
2694 if (likely(atomic_long_try_cmpxchg(&p->scx.ops_state, &opss, in finish_dispatch()
2705 wait_ops_state(p, opss); in finish_dispatch()
2709 BUG_ON(!(p->scx.flags & SCX_TASK_QUEUED)); in finish_dispatch()
2711 dsq = find_dsq_for_dispatch(this_rq(), dsq_id, p); in finish_dispatch()
2714 dispatch_to_local_dsq(rq, dsq, p, enq_flags); in finish_dispatch()
2716 dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); in finish_dispatch()
2883 struct task_struct *p; in process_ddsp_deferred_locals() local
2894 while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals, in process_ddsp_deferred_locals()
2898 list_del_init(&p->scx.dsq_list.node); in process_ddsp_deferred_locals()
2900 dsq = find_dsq_for_dispatch(rq, p->scx.ddsp_dsq_id, p); in process_ddsp_deferred_locals()
2902 dispatch_to_local_dsq(rq, dsq, p, p->scx.ddsp_enq_flags); in process_ddsp_deferred_locals()
2906 static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) in set_next_task_scx() argument
2908 if (p->scx.flags & SCX_TASK_QUEUED) { in set_next_task_scx()
2913 ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC); in set_next_task_scx()
2914 dispatch_dequeue(rq, p); in set_next_task_scx()
2917 p->se.exec_start = rq_clock_task(rq); in set_next_task_scx()
2920 if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED)) in set_next_task_scx()
2921 SCX_CALL_OP_TASK(SCX_KF_REST, running, p); in set_next_task_scx()
2923 clr_task_runnable(p, true); in set_next_task_scx()
2929 if ((p->scx.slice == SCX_SLICE_INF) != in set_next_task_scx()
2931 if (p->scx.slice == SCX_SLICE_INF) in set_next_task_scx()
3009 static void put_prev_task_scx(struct rq *rq, struct task_struct *p, in put_prev_task_scx() argument
3015 if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED)) in put_prev_task_scx()
3016 SCX_CALL_OP_TASK(SCX_KF_REST, stopping, p, true); in put_prev_task_scx()
3018 if (p->scx.flags & SCX_TASK_QUEUED) { in put_prev_task_scx()
3019 set_task_runnable(rq, p); in put_prev_task_scx()
3027 if (p->scx.slice && !scx_rq_bypassing(rq)) { in put_prev_task_scx()
3028 dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); in put_prev_task_scx()
3040 do_enqueue_task(rq, p, SCX_ENQ_LAST, -1); in put_prev_task_scx()
3042 do_enqueue_task(rq, p, 0, -1); in put_prev_task_scx()
3060 struct task_struct *p; in pick_task_scx() local
3102 p = prev; in pick_task_scx()
3103 if (!p->scx.slice) { in pick_task_scx()
3104 p->scx.slice = SCX_SLICE_DFL; in pick_task_scx()
3105 trace_android_vh_scx_fix_prev_slice(p); in pick_task_scx()
3108 p = first_local_task(rq); in pick_task_scx()
3109 if (!p) { in pick_task_scx()
3115 if (unlikely(!p->scx.slice)) { in pick_task_scx()
3118 p->comm, p->pid, __func__); in pick_task_scx()
3121 p->scx.slice = SCX_SLICE_DFL; in pick_task_scx()
3125 return p; in pick_task_scx()
3216 static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, in scx_select_cpu_dfl() argument
3242 if (cpumask_test_cpu(cpu, p->cpus_ptr)) in scx_select_cpu_dfl()
3257 cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE); in scx_select_cpu_dfl()
3267 cpu = scx_pick_idle_cpu(p->cpus_ptr, 0); in scx_select_cpu_dfl()
3281 static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags) in select_task_rq_scx() argument
3296 if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) { in select_task_rq_scx()
3302 *ddsp_taskp = p; in select_task_rq_scx()
3305 select_cpu, p, prev_cpu, wake_flags); in select_task_rq_scx()
3315 cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found); in select_task_rq_scx()
3317 p->scx.slice = SCX_SLICE_DFL; in select_task_rq_scx()
3318 p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL; in select_task_rq_scx()
3324 static void task_woken_scx(struct rq *rq, struct task_struct *p) in task_woken_scx() argument
3329 static void set_cpus_allowed_scx(struct task_struct *p, in set_cpus_allowed_scx() argument
3334 trace_android_vh_scx_set_cpus_allowed(p, ac, &done); in set_cpus_allowed_scx()
3338 set_cpus_allowed_common(p, ac); in set_cpus_allowed_scx()
3349 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, in set_cpus_allowed_scx()
3350 (struct cpumask *)p->cpus_ptr); in set_cpus_allowed_scx()
3488 struct task_struct *p; in check_rq_for_timeouts() local
3493 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) { in check_rq_for_timeouts()
3494 unsigned long last_runnable = p->scx.runnable_at; in check_rq_for_timeouts()
3502 p->comm, p->pid, in check_rq_for_timeouts()
3590 static enum scx_task_state scx_get_task_state(const struct task_struct *p) in scx_get_task_state() argument
3592 return (p->scx.flags & SCX_TASK_STATE_MASK) >> SCX_TASK_STATE_SHIFT; in scx_get_task_state()
3595 static void scx_set_task_state(struct task_struct *p, enum scx_task_state state) in scx_set_task_state() argument
3597 enum scx_task_state prev_state = scx_get_task_state(p); in scx_set_task_state()
3620 prev_state, state, p->comm, p->pid); in scx_set_task_state()
3622 p->scx.flags &= ~SCX_TASK_STATE_MASK; in scx_set_task_state()
3623 p->scx.flags |= state << SCX_TASK_STATE_SHIFT; in scx_set_task_state()
3626 static int scx_ops_init_task(struct task_struct *p, struct task_group *tg, bool fork) in scx_ops_init_task() argument
3630 p->scx.disallow = false; in scx_ops_init_task()
3638 ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init_task, p, &args); in scx_ops_init_task()
3645 scx_set_task_state(p, SCX_TASK_INIT); in scx_ops_init_task()
3647 if (p->scx.disallow) { in scx_ops_init_task()
3652 rq = task_rq_lock(p, &rf); in scx_ops_init_task()
3661 if (p->policy == SCHED_EXT) { in scx_ops_init_task()
3662 p->policy = SCHED_NORMAL; in scx_ops_init_task()
3666 task_rq_unlock(rq, p, &rf); in scx_ops_init_task()
3667 } else if (p->policy == SCHED_EXT) { in scx_ops_init_task()
3669 p->comm, p->pid); in scx_ops_init_task()
3673 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; in scx_ops_init_task()
3677 static void scx_ops_enable_task(struct task_struct *p) in scx_ops_enable_task() argument
3681 lockdep_assert_rq_held(task_rq(p)); in scx_ops_enable_task()
3687 if (task_has_idle_policy(p)) in scx_ops_enable_task()
3690 weight = sched_prio_to_weight[p->static_prio - MAX_RT_PRIO]; in scx_ops_enable_task()
3692 p->scx.weight = sched_weight_to_cgroup(weight); in scx_ops_enable_task()
3695 SCX_CALL_OP_TASK(SCX_KF_REST, enable, p); in scx_ops_enable_task()
3696 scx_set_task_state(p, SCX_TASK_ENABLED); in scx_ops_enable_task()
3699 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight); in scx_ops_enable_task()
3702 static void scx_ops_disable_task(struct task_struct *p) in scx_ops_disable_task() argument
3704 lockdep_assert_rq_held(task_rq(p)); in scx_ops_disable_task()
3705 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); in scx_ops_disable_task()
3708 SCX_CALL_OP_TASK(SCX_KF_REST, disable, p); in scx_ops_disable_task()
3709 scx_set_task_state(p, SCX_TASK_READY); in scx_ops_disable_task()
3712 static void scx_ops_exit_task(struct task_struct *p) in scx_ops_exit_task() argument
3718 lockdep_assert_rq_held(task_rq(p)); in scx_ops_exit_task()
3720 switch (scx_get_task_state(p)) { in scx_ops_exit_task()
3729 scx_ops_disable_task(p); in scx_ops_exit_task()
3737 SCX_CALL_OP_TASK(SCX_KF_REST, exit_task, p, &args); in scx_ops_exit_task()
3738 scx_set_task_state(p, SCX_TASK_NONE); in scx_ops_exit_task()
3754 void scx_pre_fork(struct task_struct *p) in scx_pre_fork() argument
3765 int scx_fork(struct task_struct *p) in scx_fork() argument
3770 return scx_ops_init_task(p, task_group(p), true); in scx_fork()
3775 void scx_post_fork(struct task_struct *p) in scx_post_fork() argument
3778 scx_set_task_state(p, SCX_TASK_READY); in scx_post_fork()
3785 if (p->sched_class == &ext_sched_class) { in scx_post_fork()
3789 rq = task_rq_lock(p, &rf); in scx_post_fork()
3790 scx_ops_enable_task(p); in scx_post_fork()
3791 task_rq_unlock(rq, p, &rf); in scx_post_fork()
3796 list_add_tail(&p->scx.tasks_node, &scx_tasks); in scx_post_fork()
3802 void scx_cancel_fork(struct task_struct *p) in scx_cancel_fork() argument
3808 rq = task_rq_lock(p, &rf); in scx_cancel_fork()
3809 WARN_ON_ONCE(scx_get_task_state(p) >= SCX_TASK_READY); in scx_cancel_fork()
3810 scx_ops_exit_task(p); in scx_cancel_fork()
3811 task_rq_unlock(rq, p, &rf); in scx_cancel_fork()
3817 void sched_ext_free(struct task_struct *p) in sched_ext_free() argument
3822 list_del_init(&p->scx.tasks_node); in sched_ext_free()
3829 if (scx_get_task_state(p) != SCX_TASK_NONE) { in sched_ext_free()
3833 rq = task_rq_lock(p, &rf); in sched_ext_free()
3834 scx_ops_exit_task(p); in sched_ext_free()
3835 task_rq_unlock(rq, p, &rf); in sched_ext_free()
3839 static void reweight_task_scx(struct rq *rq, struct task_struct *p, in reweight_task_scx() argument
3842 lockdep_assert_rq_held(task_rq(p)); in reweight_task_scx()
3844 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); in reweight_task_scx()
3846 SCX_CALL_OP_TASK(SCX_KF_REST, set_weight, p, p->scx.weight); in reweight_task_scx()
3849 static void prio_changed_scx(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_scx() argument
3853 static void switching_to_scx(struct rq *rq, struct task_struct *p) in switching_to_scx() argument
3855 scx_ops_enable_task(p); in switching_to_scx()
3862 SCX_CALL_OP_TASK(SCX_KF_REST, set_cpumask, p, in switching_to_scx()
3863 (struct cpumask *)p->cpus_ptr); in switching_to_scx()
3865 trace_android_vh_switching_to_scx(rq, p); in switching_to_scx()
3868 static void switched_from_scx(struct rq *rq, struct task_struct *p) in switched_from_scx() argument
3870 scx_ops_disable_task(p); in switched_from_scx()
3873 static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} in wakeup_preempt_scx() argument
3874 static void switched_to_scx(struct rq *rq, struct task_struct *p) {} in switched_to_scx() argument
3876 int scx_check_setscheduler(struct task_struct *p, int policy) in scx_check_setscheduler() argument
3878 lockdep_assert_rq_held(task_rq(p)); in scx_check_setscheduler()
3881 if (scx_enabled() && READ_ONCE(p->scx.disallow) && in scx_check_setscheduler()
3882 p->policy != policy && policy == SCHED_EXT) in scx_check_setscheduler()
3891 struct task_struct *p = rq->curr; in scx_can_stop_tick() local
3896 if (p->sched_class != &ext_sched_class) in scx_can_stop_tick()
3993 struct task_struct *p; in scx_cgroup_can_attach() local
4002 cgroup_taskset_for_each(p, css, tset) { in scx_cgroup_can_attach()
4003 struct cgroup *from = tg_cgrp(task_group(p)); in scx_cgroup_can_attach()
4006 WARN_ON_ONCE(p->scx.cgrp_moving_from); in scx_cgroup_can_attach()
4018 p, from, css->cgroup); in scx_cgroup_can_attach()
4023 p->scx.cgrp_moving_from = from; in scx_cgroup_can_attach()
4029 cgroup_taskset_for_each(p, css, tset) { in scx_cgroup_can_attach()
4030 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from) in scx_cgroup_can_attach()
4031 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p, in scx_cgroup_can_attach()
4032 p->scx.cgrp_moving_from, css->cgroup); in scx_cgroup_can_attach()
4033 p->scx.cgrp_moving_from = NULL; in scx_cgroup_can_attach()
4040 void scx_cgroup_move_task(struct task_struct *p) in scx_cgroup_move_task() argument
4049 if (SCX_HAS_OP(cgroup_move) && !WARN_ON_ONCE(!p->scx.cgrp_moving_from)) in scx_cgroup_move_task()
4050 SCX_CALL_OP_TASK(SCX_KF_UNLOCKED, cgroup_move, p, in scx_cgroup_move_task()
4051 p->scx.cgrp_moving_from, tg_cgrp(task_group(p))); in scx_cgroup_move_task()
4052 p->scx.cgrp_moving_from = NULL; in scx_cgroup_move_task()
4063 struct task_struct *p; in scx_cgroup_cancel_attach() local
4068 cgroup_taskset_for_each(p, css, tset) { in scx_cgroup_cancel_attach()
4069 if (SCX_HAS_OP(cgroup_cancel_move) && p->scx.cgrp_moving_from) in scx_cgroup_cancel_attach()
4070 SCX_CALL_OP(SCX_KF_UNLOCKED, cgroup_cancel_move, p, in scx_cgroup_cancel_attach()
4071 p->scx.cgrp_moving_from, css->cgroup); in scx_cgroup_cancel_attach()
4072 p->scx.cgrp_moving_from = NULL; in scx_cgroup_cancel_attach()
4510 struct task_struct *p, *n; in scx_ops_bypass() local
4539 list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list, in scx_ops_bypass()
4544 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); in scx_ops_bypass()
4612 struct task_struct *p; in scx_ops_disable_workfn() local
4677 while ((p = scx_task_iter_next_locked(&sti))) { in scx_ops_disable_workfn()
4678 const struct sched_class *old_class = p->sched_class; in scx_ops_disable_workfn()
4680 __setscheduler_class(p->policy, p->prio); in scx_ops_disable_workfn()
4683 if (old_class != new_class && p->se.sched_delayed) in scx_ops_disable_workfn()
4684 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in scx_ops_disable_workfn()
4686 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); in scx_ops_disable_workfn()
4688 p->sched_class = new_class; in scx_ops_disable_workfn()
4689 check_class_changing(task_rq(p), p, old_class); in scx_ops_disable_workfn()
4690 trace_android_vh_scx_task_switch_finish(p, 0); in scx_ops_disable_workfn()
4694 check_class_changed(task_rq(p), p, old_class, p->prio); in scx_ops_disable_workfn()
4695 scx_ops_exit_task(p); in scx_ops_disable_workfn()
4907 struct task_struct *p, char marker) in scx_dump_task() argument
4911 unsigned long ops_state = atomic_long_read(&p->scx.ops_state); in scx_dump_task()
4914 if (p->scx.dsq) in scx_dump_task()
4916 (unsigned long long)p->scx.dsq->id); in scx_dump_task()
4920 marker, task_state_to_char(p), p->comm, p->pid, in scx_dump_task()
4921 jiffies_delta_msecs(p->scx.runnable_at, dctx->at_jiffies)); in scx_dump_task()
4923 scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, in scx_dump_task()
4924 p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, in scx_dump_task()
4927 p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf, in scx_dump_task()
4928 p->scx.dsq_vtime); in scx_dump_task()
4929 dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr)); in scx_dump_task()
4933 SCX_CALL_OP(SCX_KF_REST, dump_task, dctx, p); in scx_dump_task()
4938 bt_len = stack_trace_save_tsk(p, bt, SCX_EXIT_BT_LEN, 1); in scx_dump_task()
4990 struct task_struct *p; in scx_dump_state() local
5060 list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) in scx_dump_state()
5061 scx_dump_task(&s, &dctx, p, ' '); in scx_dump_state()
5161 struct task_struct *p; in scx_ops_enable() local
5352 while ((p = scx_task_iter_next_locked(&sti))) { in scx_ops_enable()
5358 if (!tryget_task_struct(p)) in scx_ops_enable()
5363 ret = scx_ops_init_task(p, task_group(p), false); in scx_ops_enable()
5365 put_task_struct(p); in scx_ops_enable()
5369 ret, p->comm, p->pid); in scx_ops_enable()
5373 scx_set_task_state(p, SCX_TASK_READY); in scx_ops_enable()
5375 put_task_struct(p); in scx_ops_enable()
5397 while ((p = scx_task_iter_next_locked(&sti))) { in scx_ops_enable()
5398 const struct sched_class *old_class = p->sched_class; in scx_ops_enable()
5400 __setscheduler_class(p->policy, p->prio); in scx_ops_enable()
5403 if (!tryget_task_struct(p)) in scx_ops_enable()
5406 if (old_class != new_class && p->se.sched_delayed) in scx_ops_enable()
5407 dequeue_task(task_rq(p), p, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in scx_ops_enable()
5409 sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx); in scx_ops_enable()
5411 p->scx.slice = SCX_SLICE_DFL; in scx_ops_enable()
5412 p->sched_class = new_class; in scx_ops_enable()
5413 check_class_changing(task_rq(p), p, old_class); in scx_ops_enable()
5414 trace_android_vh_scx_task_switch_finish(p, 1); in scx_ops_enable()
5418 check_class_changed(task_rq(p), p, old_class, p->prio); in scx_ops_enable()
5419 put_task_struct(p); in scx_ops_enable()
5720 static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } in select_cpu_stub() argument
5721 static void enqueue_stub(struct task_struct *p, u64 enq_flags) {} in enqueue_stub() argument
5722 static void dequeue_stub(struct task_struct *p, u64 enq_flags) {} in dequeue_stub() argument
5723 static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {} in dispatch_stub() argument
5724 static void tick_stub(struct task_struct *p) {} in tick_stub() argument
5725 static void runnable_stub(struct task_struct *p, u64 enq_flags) {} in runnable_stub() argument
5726 static void running_stub(struct task_struct *p) {} in running_stub() argument
5727 static void stopping_stub(struct task_struct *p, bool runnable) {} in stopping_stub() argument
5728 static void quiescent_stub(struct task_struct *p, u64 deq_flags) {} in quiescent_stub() argument
5731 static void set_weight_stub(struct task_struct *p, u32 weight) {} in set_weight_stub() argument
5732 static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {} in set_cpumask_stub() argument
5736 static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL;… in init_task_stub() argument
5737 static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {} in exit_task_stub() argument
5738 static void enable_stub(struct task_struct *p) {} in enable_stub() argument
5739 static void disable_stub(struct task_struct *p) {} in disable_stub() argument
5743 static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { r… in cgroup_prep_move_stub() argument
5744 static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} in cgroup_move_stub() argument
5745 static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) … in cgroup_cancel_move_stub() argument
5754 static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {} in dump_task_stub() argument
5964 void print_scx_info(const char *log_lvl, struct task_struct *p) in print_scx_info() argument
5979 if (copy_from_kernel_nofault(&class, &p->sched_class, sizeof(class)) || in print_scx_info()
5986 if (!copy_from_kernel_nofault(&runnable_at, &p->scx.runnable_at, in print_scx_info()
6092 __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, in scx_bpf_select_cpu_dfl() argument
6107 return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle); in scx_bpf_select_cpu_dfl()
6126 static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags) in scx_dispatch_preamble() argument
6133 if (unlikely(!p)) { in scx_dispatch_preamble()
6146 static void scx_dispatch_commit(struct task_struct *p, u64 dsq_id, u64 enq_flags) in scx_dispatch_commit() argument
6153 mark_direct_dispatch(ddsp_task, p, dsq_id, enq_flags); in scx_dispatch_commit()
6163 .task = p, in scx_dispatch_commit()
6164 .qseq = atomic_long_read(&p->scx.ops_state) & SCX_OPSS_QSEQ_MASK, in scx_dispatch_commit()
6208 __bpf_kfunc void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, in scx_bpf_dispatch() argument
6211 if (!scx_dispatch_preamble(p, enq_flags)) in scx_bpf_dispatch()
6215 p->scx.slice = slice; in scx_bpf_dispatch()
6217 p->scx.slice = p->scx.slice ?: 1; in scx_bpf_dispatch()
6219 scx_dispatch_commit(p, dsq_id, enq_flags); in scx_bpf_dispatch()
6239 __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, in scx_bpf_dispatch_vtime() argument
6242 if (!scx_dispatch_preamble(p, enq_flags)) in scx_bpf_dispatch_vtime()
6246 p->scx.slice = slice; in scx_bpf_dispatch_vtime()
6248 p->scx.slice = p->scx.slice ?: 1; in scx_bpf_dispatch_vtime()
6250 p->scx.dsq_vtime = vtime; in scx_bpf_dispatch_vtime()
6252 scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); in scx_bpf_dispatch_vtime()
6268 struct task_struct *p, u64 dsq_id, in scx_dispatch_from_dsq() argument
6285 src_rq = task_rq(p); in scx_dispatch_from_dsq()
6307 if (unlikely(p->scx.dsq != src_dsq || in scx_dispatch_from_dsq()
6308 u32_before(kit->cursor.priv, p->scx.dsq_seq) || in scx_dispatch_from_dsq()
6309 p->scx.holding_cpu >= 0) || in scx_dispatch_from_dsq()
6310 WARN_ON_ONCE(src_rq != task_rq(p))) { in scx_dispatch_from_dsq()
6316 dst_dsq = find_dsq_for_dispatch(this_rq, dsq_id, p); in scx_dispatch_from_dsq()
6324 p->scx.dsq_vtime = kit->vtime; in scx_dispatch_from_dsq()
6326 p->scx.slice = kit->slice; in scx_dispatch_from_dsq()
6329 locked_rq = move_task_between_dsqs(p, enq_flags, src_dsq, dst_dsq); in scx_dispatch_from_dsq()
6488 struct task_struct *p, u64 dsq_id, in scx_bpf_dispatch_from_dsq() argument
6492 p, dsq_id, enq_flags); in scx_bpf_dispatch_from_dsq()
6514 struct task_struct *p, u64 dsq_id, in scx_bpf_dispatch_vtime_from_dsq() argument
6518 p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); in scx_bpf_dispatch_vtime_from_dsq()
6552 struct task_struct *p, *n; in scx_bpf_reenqueue_local() local
6565 list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, in scx_bpf_reenqueue_local()
6578 if (p->migration_pending) in scx_bpf_reenqueue_local()
6581 dispatch_dequeue(rq, p); in scx_bpf_reenqueue_local()
6582 list_add_tail(&p->scx.dsq_list.node, &tasks); in scx_bpf_reenqueue_local()
6585 list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { in scx_bpf_reenqueue_local()
6586 list_del_init(&p->scx.dsq_list.node); in scx_bpf_reenqueue_local()
6587 do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); in scx_bpf_reenqueue_local()
6804 struct task_struct *p; in bpf_iter_scx_dsq_next() local
6813 p = NULL; in bpf_iter_scx_dsq_next()
6815 p = container_of(&kit->cursor, struct task_struct, scx.dsq_list); in bpf_iter_scx_dsq_next()
6823 p = nldsq_next_task(kit->dsq, p, rev); in bpf_iter_scx_dsq_next()
6824 } while (p && unlikely(u32_before(kit->cursor.priv, p->scx.dsq_seq))); in bpf_iter_scx_dsq_next()
6826 if (p) { in bpf_iter_scx_dsq_next()
6828 list_move_tail(&kit->cursor.node, &p->scx.dsq_list.node); in bpf_iter_scx_dsq_next()
6830 list_move(&kit->cursor.node, &p->scx.dsq_list.node); in bpf_iter_scx_dsq_next()
6837 return p; in bpf_iter_scx_dsq_next()
7264 __bpf_kfunc bool scx_bpf_task_running(const struct task_struct *p) in scx_bpf_task_running() argument
7266 return task_rq(p)->curr == p; in scx_bpf_task_running()
7273 __bpf_kfunc s32 scx_bpf_task_cpu(const struct task_struct *p) in scx_bpf_task_cpu() argument
7275 return task_cpu(p); in scx_bpf_task_cpu()
7302 __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) in scx_bpf_task_cgroup() argument
7304 struct task_group *tg = p->sched_task_group; in scx_bpf_task_cgroup()
7307 if (!scx_kf_allowed_on_arg_tasks(__SCX_KF_RQ_LOCKED, p)) in scx_bpf_task_cgroup()