• Home
  • Raw
  • Download

Lines Matching refs:p

277 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)  in task_cfs_rq()  argument
279 return p->se.cfs_rq; in task_cfs_rq()
462 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() argument
464 return &task_rq(p)->cfs; in task_cfs_rq()
469 struct task_struct *p = task_of(se); in cfs_rq_of() local
470 struct rq *rq = task_rq(p); in cfs_rq_of()
754 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
755 static unsigned long task_h_load(struct task_struct *p);
805 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
807 struct sched_entity *se = &p->se; in post_init_entity_util_avg()
827 if (p->sched_class != &fair_sched_class) { in post_init_entity_util_avg()
851 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
923 struct task_struct *p; in update_stats_wait_end() local
932 p = task_of(se); in update_stats_wait_end()
933 if (task_on_rq_migrating(p)) { in update_stats_wait_end()
942 trace_sched_stat_wait(p, delta); in update_stats_wait_end()
1121 static struct numa_group *deref_task_numa_group(struct task_struct *p) in deref_task_numa_group() argument
1123 return rcu_dereference_check(p->numa_group, p == current || in deref_task_numa_group()
1124 (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()
1127 static struct numa_group *deref_curr_numa_group(struct task_struct *p) in deref_curr_numa_group() argument
1129 return rcu_dereference_protected(p->numa_group, p == current); in deref_curr_numa_group()
1135 static unsigned int task_nr_scan_windows(struct task_struct *p) in task_nr_scan_windows() argument
1146 rss = get_mm_rss(p->mm); in task_nr_scan_windows()
1157 static unsigned int task_scan_min(struct task_struct *p) in task_scan_min() argument
1167 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); in task_scan_min()
1171 static unsigned int task_scan_start(struct task_struct *p) in task_scan_start() argument
1173 unsigned long smin = task_scan_min(p); in task_scan_start()
1179 ng = rcu_dereference(p->numa_group); in task_scan_start()
1193 static unsigned int task_scan_max(struct task_struct *p) in task_scan_max() argument
1195 unsigned long smin = task_scan_min(p); in task_scan_max()
1200 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); in task_scan_max()
1203 ng = deref_curr_numa_group(p); in task_scan_max()
1219 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
1221 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_enqueue()
1222 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
1225 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
1227 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_dequeue()
1228 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1240 pid_t task_numa_group_id(struct task_struct *p) in task_numa_group_id() argument
1246 ng = rcu_dereference(p->numa_group); in task_numa_group_id()
1265 static inline unsigned long task_faults(struct task_struct *p, int nid) in task_faults() argument
1267 if (!p->numa_faults) in task_faults()
1270 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + in task_faults()
1271 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; in task_faults()
1274 static inline unsigned long group_faults(struct task_struct *p, int nid) in group_faults() argument
1276 struct numa_group *ng = deref_task_numa_group(p); in group_faults()
1328 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, in score_nearby_nodes() argument
1369 faults = task_faults(p, node); in score_nearby_nodes()
1371 faults = group_faults(p, node); in score_nearby_nodes()
1398 static inline unsigned long task_weight(struct task_struct *p, int nid, in task_weight() argument
1403 if (!p->numa_faults) in task_weight()
1406 total_faults = p->total_numa_faults; in task_weight()
1411 faults = task_faults(p, nid); in task_weight()
1412 faults += score_nearby_nodes(p, nid, dist, true); in task_weight()
1417 static inline unsigned long group_weight(struct task_struct *p, int nid, in group_weight() argument
1420 struct numa_group *ng = deref_task_numa_group(p); in group_weight()
1431 faults = group_faults(p, nid); in group_weight()
1432 faults += score_nearby_nodes(p, nid, dist, false); in group_weight()
1437 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, in should_numa_migrate_memory() argument
1440 struct numa_group *ng = deref_curr_numa_group(p); in should_numa_migrate_memory()
1453 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && in should_numa_migrate_memory()
1454 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) in should_numa_migrate_memory()
1479 if (cpupid_match_pid(p, last_cpupid)) in should_numa_migrate_memory()
1502 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > in should_numa_migrate_memory()
1503 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; in should_numa_migrate_memory()
1555 struct task_struct *p; member
1644 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) in update_numa_stats()
1664 struct task_struct *p, long imp) in task_numa_assign() argument
1676 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign()
1702 if (p) in task_numa_assign()
1703 get_task_struct(p); in task_numa_assign()
1705 env->best_task = p; in task_numa_assign()
1754 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
1776 if (cur == env->p) { in task_numa_compare()
1875 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
1970 load = task_h_load(env->p); in task_numa_find_cpu()
1978 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu()
1987 static int task_numa_migrate(struct task_struct *p) in task_numa_migrate() argument
1990 .p = p, in task_numa_migrate()
1992 .src_cpu = task_cpu(p), in task_numa_migrate()
1993 .src_nid = task_node(p), in task_numa_migrate()
2029 sched_setnuma(p, task_node(p)); in task_numa_migrate()
2033 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
2035 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2036 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2038 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
2039 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
2052 ng = deref_curr_numa_group(p); in task_numa_migrate()
2055 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
2061 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2062 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2066 taskimp = task_weight(p, nid, dist) - taskweight; in task_numa_migrate()
2067 groupimp = group_weight(p, nid, dist) - groupweight; in task_numa_migrate()
2092 if (nid != p->numa_preferred_nid) in task_numa_migrate()
2093 sched_setnuma(p, nid); in task_numa_migrate()
2098 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); in task_numa_migrate()
2104 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
2107 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); in task_numa_migrate()
2111 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
2115 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); in task_numa_migrate()
2121 static void numa_migrate_preferred(struct task_struct *p) in numa_migrate_preferred() argument
2126 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) in numa_migrate_preferred()
2130 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); in numa_migrate_preferred()
2131 p->numa_migrate_retry = jiffies + interval; in numa_migrate_preferred()
2134 if (task_node(p) == p->numa_preferred_nid) in numa_migrate_preferred()
2138 task_numa_migrate(p); in numa_migrate_preferred()
2184 static void update_task_scan_period(struct task_struct *p, in update_task_scan_period() argument
2191 unsigned long remote = p->numa_faults_locality[0]; in update_task_scan_period()
2192 unsigned long local = p->numa_faults_locality[1]; in update_task_scan_period()
2201 if (local + shared == 0 || p->numa_faults_locality[2]) { in update_task_scan_period()
2202 p->numa_scan_period = min(p->numa_scan_period_max, in update_task_scan_period()
2203 p->numa_scan_period << 1); in update_task_scan_period()
2205 p->mm->numa_next_scan = jiffies + in update_task_scan_period()
2206 msecs_to_jiffies(p->numa_scan_period); in update_task_scan_period()
2217 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); in update_task_scan_period()
2250 p->numa_scan_period = clamp(p->numa_scan_period + diff, in update_task_scan_period()
2251 task_scan_min(p), task_scan_max(p)); in update_task_scan_period()
2252 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in update_task_scan_period()
2262 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) in numa_get_avg_runtime() argument
2266 now = p->se.exec_start; in numa_get_avg_runtime()
2267 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2269 if (p->last_task_numa_placement) { in numa_get_avg_runtime()
2270 delta = runtime - p->last_sum_exec_runtime; in numa_get_avg_runtime()
2271 *period = now - p->last_task_numa_placement; in numa_get_avg_runtime()
2277 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2281 p->last_sum_exec_runtime = runtime; in numa_get_avg_runtime()
2282 p->last_task_numa_placement = now; in numa_get_avg_runtime()
2292 static int preferred_group_nid(struct task_struct *p, int nid) in preferred_group_nid() argument
2313 score = group_weight(p, node, dist); in preferred_group_nid()
2349 faults += group_faults(p, b); in preferred_group_nid()
2375 static void task_numa_placement(struct task_struct *p) in task_numa_placement() argument
2390 seq = READ_ONCE(p->mm->numa_scan_seq); in task_numa_placement()
2391 if (p->numa_scan_seq == seq) in task_numa_placement()
2393 p->numa_scan_seq = seq; in task_numa_placement()
2394 p->numa_scan_period_max = task_scan_max(p); in task_numa_placement()
2396 total_faults = p->numa_faults_locality[0] + in task_numa_placement()
2397 p->numa_faults_locality[1]; in task_numa_placement()
2398 runtime = numa_get_avg_runtime(p, &period); in task_numa_placement()
2401 ng = deref_curr_numa_group(p); in task_numa_placement()
2423 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; in task_numa_placement()
2424 fault_types[priv] += p->numa_faults[membuf_idx]; in task_numa_placement()
2425 p->numa_faults[membuf_idx] = 0; in task_numa_placement()
2435 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / in task_numa_placement()
2437 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; in task_numa_placement()
2438 p->numa_faults[cpubuf_idx] = 0; in task_numa_placement()
2440 p->numa_faults[mem_idx] += diff; in task_numa_placement()
2441 p->numa_faults[cpu_idx] += f_diff; in task_numa_placement()
2442 faults += p->numa_faults[mem_idx]; in task_numa_placement()
2443 p->total_numa_faults += diff; in task_numa_placement()
2473 max_nid = preferred_group_nid(p, max_nid); in task_numa_placement()
2478 if (max_nid != p->numa_preferred_nid) in task_numa_placement()
2479 sched_setnuma(p, max_nid); in task_numa_placement()
2482 update_task_scan_period(p, fault_types[0], fault_types[1]); in task_numa_placement()
2496 static void task_numa_group(struct task_struct *p, int cpupid, int flags, in task_numa_group() argument
2505 if (unlikely(!deref_curr_numa_group(p))) { in task_numa_group()
2517 grp->gid = p->pid; in task_numa_group()
2523 grp->faults[i] = p->numa_faults[i]; in task_numa_group()
2525 grp->total_faults = p->total_numa_faults; in task_numa_group()
2528 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
2541 my_grp = deref_curr_numa_group(p); in task_numa_group()
2581 my_grp->faults[i] -= p->numa_faults[i]; in task_numa_group()
2582 grp->faults[i] += p->numa_faults[i]; in task_numa_group()
2584 my_grp->total_faults -= p->total_numa_faults; in task_numa_group()
2585 grp->total_faults += p->total_numa_faults; in task_numa_group()
2593 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
2610 void task_numa_free(struct task_struct *p, bool final) in task_numa_free() argument
2613 struct numa_group *grp = rcu_dereference_raw(p->numa_group); in task_numa_free()
2614 unsigned long *numa_faults = p->numa_faults; in task_numa_free()
2624 grp->faults[i] -= p->numa_faults[i]; in task_numa_free()
2625 grp->total_faults -= p->total_numa_faults; in task_numa_free()
2629 RCU_INIT_POINTER(p->numa_group, NULL); in task_numa_free()
2634 p->numa_faults = NULL; in task_numa_free()
2637 p->total_numa_faults = 0; in task_numa_free()
2648 struct task_struct *p = current; in task_numa_fault() local
2659 if (!p->mm) in task_numa_fault()
2663 if (unlikely(!p->numa_faults)) { in task_numa_fault()
2664 int size = sizeof(*p->numa_faults) * in task_numa_fault()
2667 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); in task_numa_fault()
2668 if (!p->numa_faults) in task_numa_fault()
2671 p->total_numa_faults = 0; in task_numa_fault()
2672 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in task_numa_fault()
2682 priv = cpupid_match_pid(p, last_cpupid); in task_numa_fault()
2684 task_numa_group(p, last_cpupid, flags, &priv); in task_numa_fault()
2693 ng = deref_curr_numa_group(p); in task_numa_fault()
2703 if (time_after(jiffies, p->numa_migrate_retry)) { in task_numa_fault()
2704 task_numa_placement(p); in task_numa_fault()
2705 numa_migrate_preferred(p); in task_numa_fault()
2709 p->numa_pages_migrated += pages; in task_numa_fault()
2711 p->numa_faults_locality[2] += pages; in task_numa_fault()
2713 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; in task_numa_fault()
2714 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; in task_numa_fault()
2715 p->numa_faults_locality[local] += pages; in task_numa_fault()
2718 static void reset_ptenuma_scan(struct task_struct *p) in reset_ptenuma_scan() argument
2728 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); in reset_ptenuma_scan()
2729 p->mm->numa_scan_offset = 0; in reset_ptenuma_scan()
2739 struct task_struct *p = current; in task_numa_work() local
2740 struct mm_struct *mm = p->mm; in task_numa_work()
2741 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
2747 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); in task_numa_work()
2758 if (p->flags & PF_EXITING) in task_numa_work()
2773 if (p->numa_scan_period == 0) { in task_numa_work()
2774 p->numa_scan_period_max = task_scan_max(p); in task_numa_work()
2775 p->numa_scan_period = task_scan_start(p); in task_numa_work()
2778 next_scan = now + msecs_to_jiffies(p->numa_scan_period); in task_numa_work()
2786 p->node_stamp += 2 * TICK_NSEC; in task_numa_work()
2800 reset_ptenuma_scan(p); in task_numa_work()
2863 reset_ptenuma_scan(p); in task_numa_work()
2872 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
2873 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
2874 p->node_stamp += 32 * diff; in task_numa_work()
2878 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) in init_numa_balancing() argument
2881 struct mm_struct *mm = p->mm; in init_numa_balancing()
2890 p->node_stamp = 0; in init_numa_balancing()
2891 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; in init_numa_balancing()
2892 p->numa_scan_period = sysctl_numa_balancing_scan_delay; in init_numa_balancing()
2894 p->numa_work.next = &p->numa_work; in init_numa_balancing()
2895 p->numa_faults = NULL; in init_numa_balancing()
2896 RCU_INIT_POINTER(p->numa_group, NULL); in init_numa_balancing()
2897 p->last_task_numa_placement = 0; in init_numa_balancing()
2898 p->last_sum_exec_runtime = 0; in init_numa_balancing()
2900 init_task_work(&p->numa_work, task_numa_work); in init_numa_balancing()
2904 p->numa_preferred_nid = NUMA_NO_NODE; in init_numa_balancing()
2918 p->node_stamp = delay; in init_numa_balancing()
2955 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
2957 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
2963 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) in update_scan_period()
2974 if (p->numa_scan_seq) { in update_scan_period()
2980 if (dst_nid == p->numa_preferred_nid || in update_scan_period()
2981 (p->numa_preferred_nid != NUMA_NO_NODE && in update_scan_period()
2982 src_nid != p->numa_preferred_nid)) in update_scan_period()
2986 p->numa_scan_period = task_scan_start(p); in update_scan_period()
2994 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
2998 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
3002 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3132 void reweight_task(struct task_struct *p, int prio) in reweight_task() argument
3134 struct sched_entity *se = &p->se; in reweight_task()
3923 static inline unsigned long task_util(struct task_struct *p) in task_util() argument
3925 return READ_ONCE(p->se.avg.util_avg); in task_util()
3928 static inline unsigned long _task_util_est(struct task_struct *p) in _task_util_est() argument
3930 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
3935 static inline unsigned long task_util_est(struct task_struct *p) in task_util_est() argument
3937 return max(task_util(p), _task_util_est(p)); in task_util_est()
3941 struct task_struct *p) in util_est_enqueue() argument
3950 enqueued += _task_util_est(p); in util_est_enqueue()
3957 struct task_struct *p) in util_est_dequeue() argument
3966 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); in util_est_dequeue()
3988 struct task_struct *p, in util_est_update() argument
3995 trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret); in util_est_update()
4013 ue = p->se.avg.util_est; in util_est_update()
4023 ue.enqueued = task_util(p); in util_est_update()
4048 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update()
4073 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
4075 trace_sched_util_est_se_tp(&p->se); in util_est_update()
4201 static inline int task_fits_cpu(struct task_struct *p, int cpu) in task_fits_cpu() argument
4203 unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN); in task_fits_cpu()
4204 unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX); in task_fits_cpu()
4205 unsigned long util = task_util_est(p); in task_fits_cpu()
4209 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) in update_misfit_status() argument
4213 trace_android_rvh_update_misfit_status(p, rq, &need_update); in update_misfit_status()
4217 if (!p || p->nr_cpus_allowed == 1) { in update_misfit_status()
4222 if (task_fits_cpu(p, cpu_of(rq))) { in update_misfit_status()
4231 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); in update_misfit_status()
4258 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
4261 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_dequeue() argument
4264 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_update() argument
4266 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} in update_misfit_status() argument
5624 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
5626 struct sched_entity *se = &p->se; in hrtick_start_fair()
5629 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
5637 if (rq->curr == p) in hrtick_start_fair()
5662 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
5718 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
5721 struct sched_entity *se = &p->se; in enqueue_task_fair()
5722 int idle_h_nr_running = task_has_idle_policy(p); in enqueue_task_fair()
5732 util_est_enqueue(&rq->cfs, p); in enqueue_task_fair()
5739 should_iowait_boost = p->in_iowait; in enqueue_task_fair()
5740 trace_android_rvh_set_iowait(p, &should_iowait_boost); in enqueue_task_fair()
5760 trace_android_rvh_enqueue_task_fair(rq, p, flags); in enqueue_task_fair()
5831 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
5834 struct sched_entity *se = &p->se; in dequeue_task_fair()
5836 int idle_h_nr_running = task_has_idle_policy(p); in dequeue_task_fair()
5839 util_est_dequeue(&rq->cfs, p); in dequeue_task_fair()
5867 trace_android_rvh_dequeue_task_fair(rq, p, flags); in dequeue_task_fair()
5892 util_est_update(&rq->cfs, p, task_sleep); in dequeue_task_fair()
5932 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) in cpu_load_without() argument
5938 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
5945 lsub_positive(&load, task_h_load(p)); in cpu_load_without()
5955 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) in cpu_runnable_without() argument
5961 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
5968 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
5978 static void record_wakee(struct task_struct *p) in record_wakee() argument
5989 if (current->last_wakee != p) { in record_wakee()
5990 current->last_wakee = p; in record_wakee()
6012 static int wake_wide(struct task_struct *p) in wake_wide() argument
6015 unsigned int slave = p->wakee_flips; in wake_wide()
6062 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() argument
6079 task_load = task_h_load(p); in wake_affine_weight()
6104 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() argument
6113 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine()
6115 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); in wake_affine()
6120 schedstat_inc(p->se.statistics.nr_wakeups_affine); in wake_affine()
6125 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
6131 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu() argument
6145 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { in find_idlest_group_cpu()
6183 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, in find_idlest_cpu() argument
6188 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) in find_idlest_cpu()
6196 sync_entity_load_avg(&p->se); in find_idlest_cpu()
6208 group = find_idlest_group(sd, p, cpu); in find_idlest_cpu()
6214 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
6294 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_core() argument
6305 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_core()
6333 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() argument
6341 if (!cpumask_test_cpu(cpu, p->cpus_ptr) || in select_idle_smt()
6353 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_core() argument
6358 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_smt() argument
6370 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_cpu() argument
6403 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_cpu()
6424 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_capacity() argument
6431 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_capacity()
6433 task_util = task_util_est(p); in select_idle_capacity()
6434 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_capacity()
6435 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_capacity()
6468 static int select_idle_sibling(struct task_struct *p, int prev, int target) in select_idle_sibling() argument
6479 sync_entity_load_avg(&p->se); in select_idle_sibling()
6480 task_util = task_util_est(p); in select_idle_sibling()
6481 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_sibling()
6482 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_sibling()
6514 recent_used_cpu = p->recent_used_cpu; in select_idle_sibling()
6519 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && in select_idle_sibling()
6525 p->recent_used_cpu = prev; in select_idle_sibling()
6544 i = select_idle_capacity(p, sd, target); in select_idle_sibling()
6553 i = select_idle_core(p, sd, target); in select_idle_sibling()
6557 i = select_idle_cpu(p, sd, target); in select_idle_sibling()
6561 i = select_idle_smt(p, sd, target); in select_idle_sibling()
6633 static unsigned long cpu_util_without(int cpu, struct task_struct *p) in cpu_util_without() argument
6639 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
6646 lsub_positive(&util, task_util(p)); in cpu_util_without()
6695 if (unlikely(task_on_rq_queued(p) || current == p)) in cpu_util_without()
6696 lsub_positive(&estimated, _task_util_est(p)); in cpu_util_without()
6713 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) in cpu_util_next() argument
6724 if (task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util_next()
6725 sub_positive(&util, task_util(p)); in cpu_util_next()
6726 else if (task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util_next()
6727 util += task_util(p); in cpu_util_next()
6739 util_est += _task_util_est(p); in cpu_util_next()
6755 compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) in compute_energy() argument
6773 unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu); in compute_energy()
6774 struct task_struct *tsk = cpu == dst_cpu ? p : NULL; in compute_energy()
6843 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync) in find_energy_efficient_cpu() argument
6846 unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0; in find_energy_efficient_cpu()
6847 unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024; in find_energy_efficient_cpu()
6860 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
6861 trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu); in find_energy_efficient_cpu()
6872 cpumask_test_cpu(cpu, p->cpus_ptr) && in find_energy_efficient_cpu()
6873 task_fits_cpu(p, cpu)) { in find_energy_efficient_cpu()
6888 if (!task_util_est(p) && p_util_min == 0) in find_energy_efficient_cpu()
6891 latency_sensitive = uclamp_latency_sensitive(p); in find_energy_efficient_cpu()
6892 boosted = uclamp_boosted(p); in find_energy_efficient_cpu()
6903 base_energy_pd = compute_energy(p, -1, pd); in find_energy_efficient_cpu()
6907 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in find_energy_efficient_cpu()
6910 util = cpu_util_next(cpu, p, cpu); in find_energy_efficient_cpu()
6946 prev_delta = compute_energy(p, prev_cpu, pd); in find_energy_efficient_cpu()
6987 cur_delta = compute_energy(p, max_spare_cap_cpu, pd); in find_energy_efficient_cpu()
7032 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) in select_task_rq_fair() argument
7043 sync_entity_load_avg(&p->se); in select_task_rq_fair()
7044 trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag, in select_task_rq_fair()
7050 record_wakee(p); in select_task_rq_fair()
7053 new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync); in select_task_rq_fair()
7059 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); in select_task_rq_fair()
7071 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); in select_task_rq_fair()
7085 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); in select_task_rq_fair()
7089 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); in select_task_rq_fair()
7106 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) in migrate_task_rq_fair() argument
7114 if (p->state == TASK_WAKING) { in migrate_task_rq_fair()
7115 struct sched_entity *se = &p->se; in migrate_task_rq_fair()
7134 if (p->on_rq == TASK_ON_RQ_MIGRATING) { in migrate_task_rq_fair()
7139 lockdep_assert_held(&task_rq(p)->lock); in migrate_task_rq_fair()
7140 detach_entity_cfs_rq(&p->se); in migrate_task_rq_fair()
7151 remove_entity_load_avg(&p->se); in migrate_task_rq_fair()
7155 p->se.avg.last_update_time = 0; in migrate_task_rq_fair()
7157 update_scan_period(p, new_cpu); in migrate_task_rq_fair()
7160 static void task_dead_fair(struct task_struct *p) in task_dead_fair() argument
7162 remove_entity_load_avg(&p->se); in task_dead_fair()
7257 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup() argument
7260 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup()
7298 likely(!task_has_idle_policy(p))) in check_preempt_wakeup()
7305 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) in check_preempt_wakeup()
7310 trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &nopreempt, in check_preempt_wakeup()
7352 struct task_struct *p = NULL; in pick_next_task_fair() local
7407 p = task_of(se); in pick_next_task_fair()
7408 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev); in pick_next_task_fair()
7414 if (prev != p) { in pick_next_task_fair()
7441 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev); in pick_next_task_fair()
7454 p = task_of(se); in pick_next_task_fair()
7463 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
7467 hrtick_start_fair(rq, p); in pick_next_task_fair()
7469 update_misfit_status(p, rq); in pick_next_task_fair()
7471 return p; in pick_next_task_fair()
7554 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) in yield_to_task_fair() argument
7556 struct sched_entity *se = &p->se; in yield_to_task_fair()
7777 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
7783 if (p->sched_class != &fair_sched_class) in task_hot()
7786 if (unlikely(task_has_idle_policy(p))) in task_hot()
7797 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
7798 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
7806 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
7817 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
7819 struct numa_group *numa_group = rcu_dereference(p->numa_group); in migrate_degrades_locality()
7826 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
7836 if (src_nid == p->numa_preferred_nid) { in migrate_degrades_locality()
7844 if (dst_nid == p->numa_preferred_nid) in migrate_degrades_locality()
7853 src_weight = group_weight(p, src_nid, dist); in migrate_degrades_locality()
7854 dst_weight = group_weight(p, dst_nid, dist); in migrate_degrades_locality()
7856 src_weight = task_weight(p, src_nid, dist); in migrate_degrades_locality()
7857 dst_weight = task_weight(p, dst_nid, dist); in migrate_degrades_locality()
7864 static inline int migrate_degrades_locality(struct task_struct *p, in migrate_degrades_locality() argument
7875 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
7882 trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate); in can_migrate_task()
7893 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
7897 if (kthread_is_per_cpu(p)) in can_migrate_task()
7900 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
7903 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); in can_migrate_task()
7920 if (cpumask_test_cpu(cpu, p->cpus_ptr)) { in can_migrate_task()
7933 if (task_running(env->src_rq, p)) { in can_migrate_task()
7934 schedstat_inc(p->se.statistics.nr_failed_migrations_running); in can_migrate_task()
7944 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
7946 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
7952 schedstat_inc(p->se.statistics.nr_forced_migrations); in can_migrate_task()
7957 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); in can_migrate_task()
7964 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
7975 trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p, in detach_task()
7980 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
7981 set_task_cpu(p, env->dst_cpu); in detach_task()
7992 struct task_struct *p; in detach_one_task() local
7996 list_for_each_entry_reverse(p, in detach_one_task()
7998 if (!can_migrate_task(p, env)) in detach_one_task()
8001 detach_task(p, env); in detach_one_task()
8010 return p; in detach_one_task()
8027 struct task_struct *p; in detach_tasks() local
8043 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
8057 if (!can_migrate_task(p, env)) in detach_tasks()
8069 load = max_t(unsigned long, task_h_load(p), 1); in detach_tasks()
8088 util = task_util_est(p); in detach_tasks()
8102 if (task_fits_cpu(p, env->src_cpu)) in detach_tasks()
8109 detach_task(p, env); in detach_tasks()
8110 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8133 list_move(&p->se.group_node, tasks); in detach_tasks()
8149 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
8153 BUG_ON(task_rq(p) != rq); in attach_task()
8154 activate_task(rq, p, ENQUEUE_NOCLOCK); in attach_task()
8155 check_preempt_curr(rq, p, 0); in attach_task()
8162 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
8168 attach_task(rq, p); in attach_one_task()
8179 struct task_struct *p; in attach_tasks() local
8186 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
8187 list_del_init(&p->se.group_node); in attach_tasks()
8189 attach_task(env->dst_rq, p); in attach_tasks()
8362 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
8364 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load()
8367 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
8383 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
8385 return p->se.avg.load_avg; in task_h_load()
8989 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) in task_running_on_cpu() argument
8992 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
8995 if (task_on_rq_queued(p)) in task_running_on_cpu()
9008 static int idle_cpu_without(int cpu, struct task_struct *p) in idle_cpu_without() argument
9012 if (rq->curr != rq->idle && rq->curr != p) in idle_cpu_without()
9039 struct task_struct *p) in update_sg_wakeup_stats() argument
9053 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
9054 sgs->group_util += cpu_util_without(i, p); in update_sg_wakeup_stats()
9055 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
9056 local = task_running_on_cpu(i, p); in update_sg_wakeup_stats()
9065 if (!nr_running && idle_cpu_without(i, p)) in update_sg_wakeup_stats()
9071 task_fits_cpu(p, i)) in update_sg_wakeup_stats()
9150 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) in find_idlest_group() argument
9169 p->cpus_ptr)) in find_idlest_group()
9182 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
9260 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) in find_idlest_group()
9264 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) in find_idlest_group()
10260 struct task_struct *p = NULL; in active_load_balance_cpu_stop() local
10316 p = detach_one_task(&env); in active_load_balance_cpu_stop()
10317 if (p) { in active_load_balance_cpu_stop()
10330 if (p) in active_load_balance_cpu_stop()
10331 attach_one_task(target_rq, p); in active_load_balance_cpu_stop()
11149 static void task_fork_fair(struct task_struct *p) in task_fork_fair() argument
11152 struct sched_entity *se = &p->se, *curr; in task_fork_fair()
11185 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
11187 if (!task_on_rq_queued(p)) in prio_changed_fair()
11198 if (rq->curr == p) { in prio_changed_fair()
11199 if (p->prio > oldprio) in prio_changed_fair()
11202 check_preempt_curr(rq, p, 0); in prio_changed_fair()
11205 static inline bool vruntime_normalized(struct task_struct *p) in vruntime_normalized() argument
11207 struct sched_entity *se = &p->se; in vruntime_normalized()
11214 if (p->on_rq) in vruntime_normalized()
11227 (p->state == TASK_WAKING && p->sched_remote_wakeup)) in vruntime_normalized()
11294 static void detach_task_cfs_rq(struct task_struct *p) in detach_task_cfs_rq() argument
11296 struct sched_entity *se = &p->se; in detach_task_cfs_rq()
11299 if (!vruntime_normalized(p)) { in detach_task_cfs_rq()
11311 static void attach_task_cfs_rq(struct task_struct *p) in attach_task_cfs_rq() argument
11313 struct sched_entity *se = &p->se; in attach_task_cfs_rq()
11318 if (!vruntime_normalized(p)) in attach_task_cfs_rq()
11322 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
11324 detach_task_cfs_rq(p); in switched_from_fair()
11327 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
11329 attach_task_cfs_rq(p); in switched_to_fair()
11331 if (task_on_rq_queued(p)) { in switched_to_fair()
11337 if (rq->curr == p) in switched_to_fair()
11340 check_preempt_curr(rq, p, 0); in switched_to_fair()
11349 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) in set_next_task_fair() argument
11351 struct sched_entity *se = &p->se; in set_next_task_fair()
11354 if (task_on_rq_queued(p)) { in set_next_task_fair()
11385 static void task_set_group_fair(struct task_struct *p) in task_set_group_fair() argument
11387 struct sched_entity *se = &p->se; in task_set_group_fair()
11389 set_task_rq(p, task_cpu(p)); in task_set_group_fair()
11393 static void task_move_group_fair(struct task_struct *p) in task_move_group_fair() argument
11395 detach_task_cfs_rq(p); in task_move_group_fair()
11396 set_task_rq(p, task_cpu(p)); in task_move_group_fair()
11400 p->se.avg.last_update_time = 0; in task_move_group_fair()
11402 attach_task_cfs_rq(p); in task_move_group_fair()
11405 static void task_change_group_fair(struct task_struct *p, int type) in task_change_group_fair() argument
11409 task_set_group_fair(p); in task_change_group_fair()
11413 task_move_group_fair(p); in task_change_group_fair()
11679 void show_numa_stats(struct task_struct *p, struct seq_file *m) in show_numa_stats() argument
11686 ng = rcu_dereference(p->numa_group); in show_numa_stats()
11688 if (p->numa_faults) { in show_numa_stats()
11689 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; in show_numa_stats()
11690 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; in show_numa_stats()