Lines Matching refs:p
789 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
790 static unsigned long task_h_load(struct task_struct *p);
838 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
840 struct sched_entity *se = &p->se; in post_init_entity_util_avg()
846 if (p->sched_class != &fair_sched_class) { in post_init_entity_util_avg()
883 void post_init_entity_util_avg(struct task_struct *p) in post_init_entity_util_avg() argument
943 struct task_struct *p = NULL; in update_stats_wait_start_fair() local
951 p = task_of(se); in update_stats_wait_start_fair()
953 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair()
960 struct task_struct *p = NULL; in update_stats_wait_end_fair() local
977 p = task_of(se); in update_stats_wait_end_fair()
979 __update_stats_wait_end(rq_of(cfs_rq), p, stats); in update_stats_wait_end_fair()
1137 static struct numa_group *deref_task_numa_group(struct task_struct *p) in deref_task_numa_group() argument
1139 return rcu_dereference_check(p->numa_group, p == current || in deref_task_numa_group()
1140 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()
1143 static struct numa_group *deref_curr_numa_group(struct task_struct *p) in deref_curr_numa_group() argument
1145 return rcu_dereference_protected(p->numa_group, p == current); in deref_curr_numa_group()
1151 static unsigned int task_nr_scan_windows(struct task_struct *p) in task_nr_scan_windows() argument
1162 rss = get_mm_rss(p->mm); in task_nr_scan_windows()
1173 static unsigned int task_scan_min(struct task_struct *p) in task_scan_min() argument
1183 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); in task_scan_min()
1187 static unsigned int task_scan_start(struct task_struct *p) in task_scan_start() argument
1189 unsigned long smin = task_scan_min(p); in task_scan_start()
1195 ng = rcu_dereference(p->numa_group); in task_scan_start()
1209 static unsigned int task_scan_max(struct task_struct *p) in task_scan_max() argument
1211 unsigned long smin = task_scan_min(p); in task_scan_max()
1216 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); in task_scan_max()
1219 ng = deref_curr_numa_group(p); in task_scan_max()
1235 static void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
1237 rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_enqueue()
1238 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); in account_numa_enqueue()
1241 static void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
1243 rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); in account_numa_dequeue()
1244 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); in account_numa_dequeue()
1256 pid_t task_numa_group_id(struct task_struct *p) in task_numa_group_id() argument
1262 ng = rcu_dereference(p->numa_group); in task_numa_group_id()
1281 static inline unsigned long task_faults(struct task_struct *p, int nid) in task_faults() argument
1283 if (!p->numa_faults) in task_faults()
1286 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + in task_faults()
1287 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; in task_faults()
1290 static inline unsigned long group_faults(struct task_struct *p, int nid) in group_faults() argument
1292 struct numa_group *ng = deref_task_numa_group(p); in group_faults()
1344 static unsigned long score_nearby_nodes(struct task_struct *p, int nid, in score_nearby_nodes() argument
1386 faults = task_faults(p, node); in score_nearby_nodes()
1388 faults = group_faults(p, node); in score_nearby_nodes()
1415 static inline unsigned long task_weight(struct task_struct *p, int nid, in task_weight() argument
1420 if (!p->numa_faults) in task_weight()
1423 total_faults = p->total_numa_faults; in task_weight()
1428 faults = task_faults(p, nid); in task_weight()
1429 faults += score_nearby_nodes(p, nid, dist, true); in task_weight()
1434 static inline unsigned long group_weight(struct task_struct *p, int nid, in group_weight() argument
1437 struct numa_group *ng = deref_task_numa_group(p); in group_weight()
1448 faults = group_faults(p, nid); in group_weight()
1449 faults += score_nearby_nodes(p, nid, dist, false); in group_weight()
1568 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, in should_numa_migrate_memory() argument
1571 struct numa_group *ng = deref_curr_numa_group(p); in should_numa_migrate_memory()
1619 if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && in should_numa_migrate_memory()
1620 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) in should_numa_migrate_memory()
1645 if (cpupid_match_pid(p, last_cpupid)) in should_numa_migrate_memory()
1668 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > in should_numa_migrate_memory()
1669 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; in should_numa_migrate_memory()
1721 struct task_struct *p; member
1809 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) in update_numa_stats()
1829 struct task_struct *p, long imp) in task_numa_assign() argument
1841 !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { in task_numa_assign()
1867 if (p) in task_numa_assign()
1868 get_task_struct(p); in task_numa_assign()
1870 env->best_task = p; in task_numa_assign()
1919 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
1941 if (cur == env->p) { in task_numa_compare()
2049 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
2145 load = task_h_load(env->p); in task_numa_find_cpu()
2153 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu()
2162 static int task_numa_migrate(struct task_struct *p) in task_numa_migrate() argument
2165 .p = p, in task_numa_migrate()
2167 .src_cpu = task_cpu(p), in task_numa_migrate()
2168 .src_nid = task_node(p), in task_numa_migrate()
2206 sched_setnuma(p, task_node(p)); in task_numa_migrate()
2210 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
2212 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2213 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2215 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
2216 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
2229 ng = deref_curr_numa_group(p); in task_numa_migrate()
2232 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
2238 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
2239 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
2243 taskimp = task_weight(p, nid, dist) - taskweight; in task_numa_migrate()
2244 groupimp = group_weight(p, nid, dist) - groupweight; in task_numa_migrate()
2269 if (nid != p->numa_preferred_nid) in task_numa_migrate()
2270 sched_setnuma(p, nid); in task_numa_migrate()
2275 trace_sched_stick_numa(p, env.src_cpu, NULL, -1); in task_numa_migrate()
2281 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
2284 trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); in task_numa_migrate()
2288 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
2292 trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); in task_numa_migrate()
2298 static void numa_migrate_preferred(struct task_struct *p) in numa_migrate_preferred() argument
2303 if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) in numa_migrate_preferred()
2307 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); in numa_migrate_preferred()
2308 p->numa_migrate_retry = jiffies + interval; in numa_migrate_preferred()
2311 if (task_node(p) == p->numa_preferred_nid) in numa_migrate_preferred()
2315 task_numa_migrate(p); in numa_migrate_preferred()
2361 static void update_task_scan_period(struct task_struct *p, in update_task_scan_period() argument
2368 unsigned long remote = p->numa_faults_locality[0]; in update_task_scan_period()
2369 unsigned long local = p->numa_faults_locality[1]; in update_task_scan_period()
2378 if (local + shared == 0 || p->numa_faults_locality[2]) { in update_task_scan_period()
2379 p->numa_scan_period = min(p->numa_scan_period_max, in update_task_scan_period()
2380 p->numa_scan_period << 1); in update_task_scan_period()
2382 p->mm->numa_next_scan = jiffies + in update_task_scan_period()
2383 msecs_to_jiffies(p->numa_scan_period); in update_task_scan_period()
2394 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); in update_task_scan_period()
2427 p->numa_scan_period = clamp(p->numa_scan_period + diff, in update_task_scan_period()
2428 task_scan_min(p), task_scan_max(p)); in update_task_scan_period()
2429 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in update_task_scan_period()
2439 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) in numa_get_avg_runtime() argument
2443 now = p->se.exec_start; in numa_get_avg_runtime()
2444 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2446 if (p->last_task_numa_placement) { in numa_get_avg_runtime()
2447 delta = runtime - p->last_sum_exec_runtime; in numa_get_avg_runtime()
2448 *period = now - p->last_task_numa_placement; in numa_get_avg_runtime()
2454 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2458 p->last_sum_exec_runtime = runtime; in numa_get_avg_runtime()
2459 p->last_task_numa_placement = now; in numa_get_avg_runtime()
2469 static int preferred_group_nid(struct task_struct *p, int nid) in preferred_group_nid() argument
2490 score = group_weight(p, node, dist); in preferred_group_nid()
2526 faults += group_faults(p, b); in preferred_group_nid()
2552 static void task_numa_placement(struct task_struct *p) in task_numa_placement() argument
2567 seq = READ_ONCE(p->mm->numa_scan_seq); in task_numa_placement()
2568 if (p->numa_scan_seq == seq) in task_numa_placement()
2570 p->numa_scan_seq = seq; in task_numa_placement()
2571 p->numa_scan_period_max = task_scan_max(p); in task_numa_placement()
2573 total_faults = p->numa_faults_locality[0] + in task_numa_placement()
2574 p->numa_faults_locality[1]; in task_numa_placement()
2575 runtime = numa_get_avg_runtime(p, &period); in task_numa_placement()
2578 ng = deref_curr_numa_group(p); in task_numa_placement()
2600 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; in task_numa_placement()
2601 fault_types[priv] += p->numa_faults[membuf_idx]; in task_numa_placement()
2602 p->numa_faults[membuf_idx] = 0; in task_numa_placement()
2612 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / in task_numa_placement()
2614 f_diff = f_weight - p->numa_faults[cpu_idx] / 2; in task_numa_placement()
2615 p->numa_faults[cpubuf_idx] = 0; in task_numa_placement()
2617 p->numa_faults[mem_idx] += diff; in task_numa_placement()
2618 p->numa_faults[cpu_idx] += f_diff; in task_numa_placement()
2619 faults += p->numa_faults[mem_idx]; in task_numa_placement()
2620 p->total_numa_faults += diff; in task_numa_placement()
2665 max_nid = preferred_group_nid(p, max_nid); in task_numa_placement()
2670 if (max_nid != p->numa_preferred_nid) in task_numa_placement()
2671 sched_setnuma(p, max_nid); in task_numa_placement()
2674 update_task_scan_period(p, fault_types[0], fault_types[1]); in task_numa_placement()
2688 static void task_numa_group(struct task_struct *p, int cpupid, int flags, in task_numa_group() argument
2697 if (unlikely(!deref_curr_numa_group(p))) { in task_numa_group()
2710 grp->gid = p->pid; in task_numa_group()
2713 grp->faults[i] = p->numa_faults[i]; in task_numa_group()
2715 grp->total_faults = p->total_numa_faults; in task_numa_group()
2718 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
2731 my_grp = deref_curr_numa_group(p); in task_numa_group()
2771 my_grp->faults[i] -= p->numa_faults[i]; in task_numa_group()
2772 grp->faults[i] += p->numa_faults[i]; in task_numa_group()
2774 my_grp->total_faults -= p->total_numa_faults; in task_numa_group()
2775 grp->total_faults += p->total_numa_faults; in task_numa_group()
2783 rcu_assign_pointer(p->numa_group, grp); in task_numa_group()
2800 void task_numa_free(struct task_struct *p, bool final) in task_numa_free() argument
2803 struct numa_group *grp = rcu_dereference_raw(p->numa_group); in task_numa_free()
2804 unsigned long *numa_faults = p->numa_faults; in task_numa_free()
2814 grp->faults[i] -= p->numa_faults[i]; in task_numa_free()
2815 grp->total_faults -= p->total_numa_faults; in task_numa_free()
2819 RCU_INIT_POINTER(p->numa_group, NULL); in task_numa_free()
2824 p->numa_faults = NULL; in task_numa_free()
2827 p->total_numa_faults = 0; in task_numa_free()
2838 struct task_struct *p = current; in task_numa_fault() local
2849 if (!p->mm) in task_numa_fault()
2862 if (unlikely(!p->numa_faults)) { in task_numa_fault()
2863 int size = sizeof(*p->numa_faults) * in task_numa_fault()
2866 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); in task_numa_fault()
2867 if (!p->numa_faults) in task_numa_fault()
2870 p->total_numa_faults = 0; in task_numa_fault()
2871 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); in task_numa_fault()
2881 priv = cpupid_match_pid(p, last_cpupid); in task_numa_fault()
2883 task_numa_group(p, last_cpupid, flags, &priv); in task_numa_fault()
2892 ng = deref_curr_numa_group(p); in task_numa_fault()
2902 if (time_after(jiffies, p->numa_migrate_retry)) { in task_numa_fault()
2903 task_numa_placement(p); in task_numa_fault()
2904 numa_migrate_preferred(p); in task_numa_fault()
2908 p->numa_pages_migrated += pages; in task_numa_fault()
2910 p->numa_faults_locality[2] += pages; in task_numa_fault()
2912 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; in task_numa_fault()
2913 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; in task_numa_fault()
2914 p->numa_faults_locality[local] += pages; in task_numa_fault()
2917 static void reset_ptenuma_scan(struct task_struct *p) in reset_ptenuma_scan() argument
2927 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); in reset_ptenuma_scan()
2928 p->mm->numa_scan_offset = 0; in reset_ptenuma_scan()
2938 struct task_struct *p = current; in task_numa_work() local
2939 struct mm_struct *mm = p->mm; in task_numa_work()
2940 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
2947 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); in task_numa_work()
2958 if (p->flags & PF_EXITING) in task_numa_work()
2973 if (p->numa_scan_period == 0) { in task_numa_work()
2974 p->numa_scan_period_max = task_scan_max(p); in task_numa_work()
2975 p->numa_scan_period = task_scan_start(p); in task_numa_work()
2978 next_scan = now + msecs_to_jiffies(p->numa_scan_period); in task_numa_work()
2986 p->node_stamp += 2 * TICK_NSEC; in task_numa_work()
3001 reset_ptenuma_scan(p); in task_numa_work()
3066 reset_ptenuma_scan(p); in task_numa_work()
3075 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3076 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3077 p->node_stamp += 32 * diff; in task_numa_work()
3081 void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) in init_numa_balancing() argument
3084 struct mm_struct *mm = p->mm; in init_numa_balancing()
3093 p->node_stamp = 0; in init_numa_balancing()
3094 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; in init_numa_balancing()
3095 p->numa_scan_period = sysctl_numa_balancing_scan_delay; in init_numa_balancing()
3096 p->numa_migrate_retry = 0; in init_numa_balancing()
3098 p->numa_work.next = &p->numa_work; in init_numa_balancing()
3099 p->numa_faults = NULL; in init_numa_balancing()
3100 p->numa_pages_migrated = 0; in init_numa_balancing()
3101 p->total_numa_faults = 0; in init_numa_balancing()
3102 RCU_INIT_POINTER(p->numa_group, NULL); in init_numa_balancing()
3103 p->last_task_numa_placement = 0; in init_numa_balancing()
3104 p->last_sum_exec_runtime = 0; in init_numa_balancing()
3106 init_task_work(&p->numa_work, task_numa_work); in init_numa_balancing()
3110 p->numa_preferred_nid = NUMA_NO_NODE; in init_numa_balancing()
3124 p->node_stamp = delay; in init_numa_balancing()
3161 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3163 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period()
3169 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) in update_scan_period()
3180 if (p->numa_scan_seq) { in update_scan_period()
3186 if (dst_nid == p->numa_preferred_nid || in update_scan_period()
3187 (p->numa_preferred_nid != NUMA_NO_NODE && in update_scan_period()
3188 src_nid != p->numa_preferred_nid)) in update_scan_period()
3192 p->numa_scan_period = task_scan_start(p); in update_scan_period()
3200 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) in account_numa_enqueue() argument
3204 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) in account_numa_dequeue() argument
3208 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3345 void reweight_task(struct task_struct *p, int prio) in reweight_task() argument
3347 struct sched_entity *se = &p->se; in reweight_task()
4286 static inline unsigned long task_util(struct task_struct *p) in task_util() argument
4288 return READ_ONCE(p->se.avg.util_avg); in task_util()
4291 static inline unsigned long _task_util_est(struct task_struct *p) in _task_util_est() argument
4293 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4298 static inline unsigned long task_util_est(struct task_struct *p) in task_util_est() argument
4300 return max(task_util(p), _task_util_est(p)); in task_util_est()
4304 struct task_struct *p) in util_est_enqueue() argument
4313 enqueued += _task_util_est(p); in util_est_enqueue()
4320 struct task_struct *p) in util_est_dequeue() argument
4329 enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); in util_est_dequeue()
4351 struct task_struct *p, in util_est_update() argument
4358 trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret); in util_est_update()
4376 ue = p->se.avg.util_est; in util_est_update()
4386 ue.enqueued = task_util(p); in util_est_update()
4411 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update()
4436 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
4438 trace_sched_util_est_se_tp(&p->se); in util_est_update()
4565 static inline int task_fits_cpu(struct task_struct *p, int cpu) in task_fits_cpu() argument
4567 unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN); in task_fits_cpu()
4568 unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX); in task_fits_cpu()
4569 unsigned long util = task_util_est(p); in task_fits_cpu()
4577 inline void update_misfit_status(struct task_struct *p, struct rq *rq) in update_misfit_status() argument
4581 trace_android_rvh_update_misfit_status(p, rq, &need_update); in update_misfit_status()
4585 if (!p || p->nr_cpus_allowed == 1) { in update_misfit_status()
4590 if (task_fits_cpu(p, cpu_of(rq))) { in update_misfit_status()
4599 rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); in update_misfit_status()
4633 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
4636 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_dequeue() argument
4639 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_update() argument
4641 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} in update_misfit_status() argument
6026 static void hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6028 struct sched_entity *se = &p->se; in hrtick_start_fair()
6031 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
6039 if (task_current(rq, p)) in hrtick_start_fair()
6064 hrtick_start_fair(struct rq *rq, struct task_struct *p) in hrtick_start_fair() argument
6130 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_fair() argument
6133 struct sched_entity *se = &p->se; in enqueue_task_fair()
6134 int idle_h_nr_running = task_has_idle_policy(p); in enqueue_task_fair()
6144 util_est_enqueue(&rq->cfs, p); in enqueue_task_fair()
6151 should_iowait_boost = p->in_iowait; in enqueue_task_fair()
6152 trace_android_rvh_set_iowait(p, rq, &should_iowait_boost); in enqueue_task_fair()
6175 trace_android_rvh_enqueue_task_fair(rq, p, flags); in enqueue_task_fair()
6227 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_fair() argument
6230 struct sched_entity *se = &p->se; in dequeue_task_fair()
6232 int idle_h_nr_running = task_has_idle_policy(p); in dequeue_task_fair()
6235 util_est_dequeue(&rq->cfs, p); in dequeue_task_fair()
6266 trace_android_rvh_dequeue_task_fair(rq, p, flags); in dequeue_task_fair()
6294 util_est_update(&rq->cfs, p, task_sleep); in dequeue_task_fair()
6335 static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) in cpu_load_without() argument
6341 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6348 lsub_positive(&load, task_h_load(p)); in cpu_load_without()
6358 static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) in cpu_runnable_without() argument
6364 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6371 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
6381 static void record_wakee(struct task_struct *p) in record_wakee() argument
6392 if (current->last_wakee != p) { in record_wakee()
6393 current->last_wakee = p; in record_wakee()
6415 static int wake_wide(struct task_struct *p) in wake_wide() argument
6418 unsigned int slave = p->wakee_flips; in wake_wide()
6468 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, in wake_affine_weight() argument
6485 task_load = task_h_load(p); in wake_affine_weight()
6510 static int wake_affine(struct sched_domain *sd, struct task_struct *p, in wake_affine() argument
6519 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); in wake_affine()
6521 schedstat_inc(p->stats.nr_wakeups_affine_attempts); in wake_affine()
6526 schedstat_inc(p->stats.nr_wakeups_affine); in wake_affine()
6531 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
6537 find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) in find_idlest_group_cpu() argument
6551 for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { in find_idlest_group_cpu()
6554 if (!sched_core_cookie_match(rq, p)) in find_idlest_group_cpu()
6593 static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, in find_idlest_cpu() argument
6598 if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) in find_idlest_cpu()
6606 sync_entity_load_avg(&p->se); in find_idlest_cpu()
6618 group = find_idlest_group(sd, p, cpu); in find_idlest_cpu()
6624 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
6646 static inline int __select_idle_cpu(int cpu, struct task_struct *p) in __select_idle_cpu() argument
6649 sched_cpu_cookie_match(cpu_rq(cpu), p)) in __select_idle_cpu()
6713 static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu) in select_idle_core() argument
6722 if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { in select_idle_core()
6730 if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr)) in select_idle_core()
6744 static int select_idle_smt(struct task_struct *p, int target) in select_idle_smt() argument
6748 for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) { in select_idle_smt()
6769 static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle… in select_idle_core() argument
6771 return __select_idle_cpu(core, p); in select_idle_core()
6774 static inline int select_idle_smt(struct task_struct *p, int target) in select_idle_smt() argument
6786 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int … in select_idle_cpu() argument
6796 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_cpu()
6843 i = select_idle_core(p, cpu, cpus, &idle_cpu); in select_idle_cpu()
6850 idle_cpu = __select_idle_cpu(cpu, p); in select_idle_cpu()
6880 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) in select_idle_capacity() argument
6888 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); in select_idle_capacity()
6890 task_util = task_util_est(p); in select_idle_capacity()
6891 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_capacity()
6892 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_capacity()
6945 static int select_idle_sibling(struct task_struct *p, int prev, int target) in select_idle_sibling() argument
6957 sync_entity_load_avg(&p->se); in select_idle_sibling()
6958 task_util = task_util_est(p); in select_idle_sibling()
6959 util_min = uclamp_eff_value(p, UCLAMP_MIN); in select_idle_sibling()
6960 util_max = uclamp_eff_value(p, UCLAMP_MAX); in select_idle_sibling()
6997 recent_used_cpu = p->recent_used_cpu; in select_idle_sibling()
6998 p->recent_used_cpu = prev; in select_idle_sibling()
7003 cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && in select_idle_sibling()
7023 i = select_idle_capacity(p, sd, target); in select_idle_sibling()
7036 i = select_idle_smt(p, prev); in select_idle_sibling()
7042 i = select_idle_cpu(p, sd, has_idle_core, target); in select_idle_sibling()
7053 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) in cpu_util_next() argument
7064 if (task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util_next()
7065 lsub_positive(&util, task_util(p)); in cpu_util_next()
7066 else if (task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util_next()
7067 util += task_util(p); in cpu_util_next()
7101 util_est += _task_util_est(p); in cpu_util_next()
7102 else if (unlikely(task_on_rq_queued(p) || current == p)) in cpu_util_next()
7103 lsub_positive(&util_est, _task_util_est(p)); in cpu_util_next()
7124 static unsigned long cpu_util_without(int cpu, struct task_struct *p) in cpu_util_without() argument
7127 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
7130 return cpu_util_next(cpu, p, -1); in cpu_util_without()
7156 struct task_struct *p, int prev_cpu) in eenv_task_busy_time() argument
7164 busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap); in eenv_task_busy_time()
7192 struct task_struct *p) in eenv_pd_busy_time() argument
7198 unsigned long util = cpu_util_next(cpu, p, -1); in eenv_pd_busy_time()
7215 struct task_struct *p, int dst_cpu) in eenv_pd_max_util() argument
7221 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL; in eenv_pd_max_util()
7222 unsigned long util = cpu_util_next(cpu, p, dst_cpu); in eenv_pd_max_util()
7246 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu) in compute_energy() argument
7248 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu); in compute_energy()
7296 static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync) in find_energy_efficient_cpu() argument
7300 unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0; in find_energy_efficient_cpu()
7301 unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024; in find_energy_efficient_cpu()
7312 trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu); in find_energy_efficient_cpu()
7316 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
7325 cpumask_test_cpu(cpu, p->cpus_ptr) && in find_energy_efficient_cpu()
7326 task_fits_cpu(p, cpu)) { in find_energy_efficient_cpu()
7343 if (!task_util_est(p) && p_util_min == 0) in find_energy_efficient_cpu()
7346 eenv_task_busy_time(&eenv, p, prev_cpu); in find_energy_efficient_cpu()
7378 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in find_energy_efficient_cpu()
7381 util = cpu_util_next(cpu, p, cpu); in find_energy_efficient_cpu()
7432 eenv_pd_busy_time(&eenv, cpus, p); in find_energy_efficient_cpu()
7434 base_energy = compute_energy(&eenv, pd, cpus, p, -1); in find_energy_efficient_cpu()
7438 prev_delta = compute_energy(&eenv, pd, cpus, p, in find_energy_efficient_cpu()
7462 cur_delta = compute_energy(&eenv, pd, cpus, p, in find_energy_efficient_cpu()
7509 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) in select_task_rq_fair() argument
7522 sync_entity_load_avg(&p->se); in select_task_rq_fair()
7523 trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag, in select_task_rq_fair()
7531 lockdep_assert_held(&p->pi_lock); in select_task_rq_fair()
7533 record_wakee(p); in select_task_rq_fair()
7536 new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync); in select_task_rq_fair()
7542 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); in select_task_rq_fair()
7554 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); in select_task_rq_fair()
7573 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); in select_task_rq_fair()
7576 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); in select_task_rq_fair()
7588 static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) in migrate_task_rq_fair() argument
7590 struct sched_entity *se = &p->se; in migrate_task_rq_fair()
7598 if (READ_ONCE(p->__state) == TASK_WAKING) { in migrate_task_rq_fair()
7604 if (!task_on_rq_migrating(p)) { in migrate_task_rq_fair()
7623 update_scan_period(p, new_cpu); in migrate_task_rq_fair()
7626 static void task_dead_fair(struct task_struct *p) in task_dead_fair() argument
7628 remove_entity_load_avg(&p->se); in task_dead_fair()
7721 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) in check_preempt_wakeup() argument
7724 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup()
7767 likely(!task_has_idle_policy(p))) in check_preempt_wakeup()
7774 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) in check_preempt_wakeup()
7793 trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &ignore, in check_preempt_wakeup()
7868 struct task_struct *p = NULL; in pick_next_task_fair() local
7923 p = task_of(se); in pick_next_task_fair()
7924 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev); in pick_next_task_fair()
7930 if (prev != p) { in pick_next_task_fair()
7957 trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev); in pick_next_task_fair()
7967 p = task_of(se); in pick_next_task_fair()
7976 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
7980 hrtick_start_fair(rq, p); in pick_next_task_fair()
7982 update_misfit_status(p, rq); in pick_next_task_fair()
7984 return p; in pick_next_task_fair()
8067 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) in yield_to_task_fair() argument
8069 struct sched_entity *se = &p->se; in yield_to_task_fair()
8289 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
8295 if (p->sched_class != &fair_sched_class) in task_hot()
8298 if (unlikely(task_has_idle_policy(p))) in task_hot()
8309 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
8310 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
8320 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
8326 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
8337 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
8339 struct numa_group *numa_group = rcu_dereference(p->numa_group); in migrate_degrades_locality()
8346 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
8356 if (src_nid == p->numa_preferred_nid) { in migrate_degrades_locality()
8364 if (dst_nid == p->numa_preferred_nid) in migrate_degrades_locality()
8373 src_weight = group_weight(p, src_nid, dist); in migrate_degrades_locality()
8374 dst_weight = group_weight(p, dst_nid, dist); in migrate_degrades_locality()
8376 src_weight = task_weight(p, src_nid, dist); in migrate_degrades_locality()
8377 dst_weight = task_weight(p, dst_nid, dist); in migrate_degrades_locality()
8384 static inline int migrate_degrades_locality(struct task_struct *p, in migrate_degrades_locality() argument
8395 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
8402 trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate); in can_migrate_task()
8413 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
8417 if (kthread_is_per_cpu(p)) in can_migrate_task()
8420 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
8423 schedstat_inc(p->stats.nr_failed_migrations_affine); in can_migrate_task()
8443 if (cpumask_test_cpu(cpu, p->cpus_ptr)) { in can_migrate_task()
8456 if (task_on_cpu(env->src_rq, p)) { in can_migrate_task()
8457 schedstat_inc(p->stats.nr_failed_migrations_running); in can_migrate_task()
8471 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
8473 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
8479 schedstat_inc(p->stats.nr_forced_migrations); in can_migrate_task()
8484 schedstat_inc(p->stats.nr_failed_migrations_hot); in can_migrate_task()
8491 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
8502 trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p, in detach_task()
8507 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
8508 set_task_cpu(p, env->dst_cpu); in detach_task()
8519 struct task_struct *p; in detach_one_task() local
8523 list_for_each_entry_reverse(p, in detach_one_task()
8525 if (!can_migrate_task(p, env)) in detach_one_task()
8528 detach_task(p, env); in detach_one_task()
8537 return p; in detach_one_task()
8552 struct task_struct *p; in detach_tasks() local
8593 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
8595 if (!can_migrate_task(p, env)) in detach_tasks()
8607 load = max_t(unsigned long, task_h_load(p), 1); in detach_tasks()
8626 util = task_util_est(p); in detach_tasks()
8640 if (task_fits_cpu(p, env->src_cpu)) in detach_tasks()
8647 detach_task(p, env); in detach_tasks()
8648 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8671 list_move(&p->se.group_node, tasks); in detach_tasks()
8687 static void attach_task(struct rq *rq, struct task_struct *p) in attach_task() argument
8691 WARN_ON_ONCE(task_rq(p) != rq); in attach_task()
8692 activate_task(rq, p, ENQUEUE_NOCLOCK); in attach_task()
8693 check_preempt_curr(rq, p, 0); in attach_task()
8700 static void attach_one_task(struct rq *rq, struct task_struct *p) in attach_one_task() argument
8706 attach_task(rq, p); in attach_one_task()
8717 struct task_struct *p; in attach_tasks() local
8724 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
8725 list_del_init(&p->se.group_node); in attach_tasks()
8727 attach_task(env->dst_rq, p); in attach_tasks()
8892 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
8894 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load()
8897 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
8913 static unsigned long task_h_load(struct task_struct *p) in task_h_load() argument
8915 return p->se.avg.load_avg; in task_h_load()
9592 static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) in task_running_on_cpu() argument
9595 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
9598 if (task_on_rq_queued(p)) in task_running_on_cpu()
9611 static int idle_cpu_without(int cpu, struct task_struct *p) in idle_cpu_without() argument
9615 if (rq->curr != rq->idle && rq->curr != p) in idle_cpu_without()
9642 struct task_struct *p) in update_sg_wakeup_stats() argument
9656 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
9657 sgs->group_util += cpu_util_without(i, p); in update_sg_wakeup_stats()
9658 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
9659 local = task_running_on_cpu(i, p); in update_sg_wakeup_stats()
9668 if (!nr_running && idle_cpu_without(i, p)) in update_sg_wakeup_stats()
9674 task_fits_cpu(p, i)) in update_sg_wakeup_stats()
9753 find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) in find_idlest_group() argument
9769 p->cpus_ptr)) in find_idlest_group()
9773 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) in find_idlest_group()
9786 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
9871 if (cpu_to_node(this_cpu) == p->numa_preferred_nid) in find_idlest_group()
9875 if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) in find_idlest_group()
9887 if (p->nr_cpus_allowed != NR_CPUS) { in find_idlest_group()
9890 cpumask_and(cpus, sched_group_span(local), p->cpus_ptr); in find_idlest_group()
10966 struct task_struct *p = NULL; in active_load_balance_cpu_stop() local
11016 p = detach_one_task(&env); in active_load_balance_cpu_stop()
11017 if (p) { in active_load_balance_cpu_stop()
11030 if (p) in active_load_balance_cpu_stop()
11031 attach_one_task(target_rq, p); in active_load_balance_cpu_stop()
11918 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) in task_vruntime_update() argument
11920 struct sched_entity *se = &p->se; in task_vruntime_update()
11922 if (p->sched_class != &fair_sched_class) in task_vruntime_update()
12010 static void task_fork_fair(struct task_struct *p) in task_fork_fair() argument
12013 struct sched_entity *se = &p->se, *curr; in task_fork_fair()
12046 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_fair() argument
12048 if (!task_on_rq_queued(p)) in prio_changed_fair()
12059 if (task_current(rq, p)) { in prio_changed_fair()
12060 if (p->prio > oldprio) in prio_changed_fair()
12063 check_preempt_curr(rq, p, 0); in prio_changed_fair()
12066 static inline bool vruntime_normalized(struct task_struct *p) in vruntime_normalized() argument
12068 struct sched_entity *se = &p->se; in vruntime_normalized()
12075 if (p->on_rq) in vruntime_normalized()
12088 (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup)) in vruntime_normalized()
12161 static void detach_task_cfs_rq(struct task_struct *p) in detach_task_cfs_rq() argument
12163 struct sched_entity *se = &p->se; in detach_task_cfs_rq()
12166 if (!vruntime_normalized(p)) { in detach_task_cfs_rq()
12178 static void attach_task_cfs_rq(struct task_struct *p) in attach_task_cfs_rq() argument
12180 struct sched_entity *se = &p->se; in attach_task_cfs_rq()
12185 if (!vruntime_normalized(p)) in attach_task_cfs_rq()
12189 static void switched_from_fair(struct rq *rq, struct task_struct *p) in switched_from_fair() argument
12191 detach_task_cfs_rq(p); in switched_from_fair()
12194 static void switched_to_fair(struct rq *rq, struct task_struct *p) in switched_to_fair() argument
12196 attach_task_cfs_rq(p); in switched_to_fair()
12198 if (task_on_rq_queued(p)) { in switched_to_fair()
12204 if (task_current(rq, p)) in switched_to_fair()
12207 check_preempt_curr(rq, p, 0); in switched_to_fair()
12216 static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) in set_next_task_fair() argument
12218 struct sched_entity *se = &p->se; in set_next_task_fair()
12221 if (task_on_rq_queued(p)) { in set_next_task_fair()
12249 static void task_change_group_fair(struct task_struct *p) in task_change_group_fair() argument
12255 if (READ_ONCE(p->__state) == TASK_NEW) in task_change_group_fair()
12258 detach_task_cfs_rq(p); in task_change_group_fair()
12262 p->se.avg.last_update_time = 0; in task_change_group_fair()
12264 set_task_rq(p, task_cpu(p)); in task_change_group_fair()
12265 attach_task_cfs_rq(p); in task_change_group_fair()
12617 void show_numa_stats(struct task_struct *p, struct seq_file *m) in show_numa_stats() argument
12624 ng = rcu_dereference(p->numa_group); in show_numa_stats()
12626 if (p->numa_faults) { in show_numa_stats()
12627 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; in show_numa_stats()
12628 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; in show_numa_stats()