Lines Matching full:cpu
43 int cpu; in sched_get_nr_running_avg() local
52 for_each_possible_cpu(cpu) { in sched_get_nr_running_avg()
56 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); in sched_get_nr_running_avg()
58 diff = curr_time - per_cpu(last_time, cpu); in sched_get_nr_running_avg()
61 tmp_nr = per_cpu(nr_prod_sum, cpu); in sched_get_nr_running_avg()
62 tmp_nr += per_cpu(nr, cpu) * diff; in sched_get_nr_running_avg()
65 tmp_misfit = per_cpu(nr_big_prod_sum, cpu); in sched_get_nr_running_avg()
73 stats[cpu].nr = (int)div64_u64((tmp_nr + NR_THRESHOLD_PCT), in sched_get_nr_running_avg()
75 stats[cpu].nr_misfit = (int)div64_u64((tmp_misfit + in sched_get_nr_running_avg()
77 stats[cpu].nr_max = per_cpu(nr_max, cpu); in sched_get_nr_running_avg()
79 trace_sched_get_nr_running_avg(cpu, stats[cpu].nr, in sched_get_nr_running_avg()
80 stats[cpu].nr_misfit, stats[cpu].nr_max); in sched_get_nr_running_avg()
82 per_cpu(last_time, cpu) = curr_time; in sched_get_nr_running_avg()
83 per_cpu(nr_prod_sum, cpu) = 0; in sched_get_nr_running_avg()
84 per_cpu(nr_big_prod_sum, cpu) = 0; in sched_get_nr_running_avg()
85 per_cpu(iowait_prod_sum, cpu) = 0; in sched_get_nr_running_avg()
86 per_cpu(nr_max, cpu) = per_cpu(nr, cpu); in sched_get_nr_running_avg()
88 spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); in sched_get_nr_running_avg()
98 static inline void update_last_busy_time(int cpu, bool dequeue, in update_last_busy_time() argument
103 if (!hmp_capable() || is_min_capacity_cpu(cpu)) in update_last_busy_time()
106 if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN) in update_last_busy_time()
109 if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) > in update_last_busy_time()
110 capacity_orig_of(cpu)) in update_last_busy_time()
114 atomic64_set(&per_cpu(last_busy_time, cpu), curr_time); in update_last_busy_time()
119 * @cpu: The core id of the nr running driver.
124 * Update average with latest nr_running value for CPU
126 void sched_update_nr_prod(int cpu, long delta, bool inc) in sched_update_nr_prod() argument
132 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); in sched_update_nr_prod()
133 nr_running = per_cpu(nr, cpu); in sched_update_nr_prod()
135 diff = curr_time - per_cpu(last_time, cpu); in sched_update_nr_prod()
137 per_cpu(last_time, cpu) = curr_time; in sched_update_nr_prod()
138 per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta); in sched_update_nr_prod()
140 BUG_ON((s64)per_cpu(nr, cpu) < 0); in sched_update_nr_prod()
142 if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu)) in sched_update_nr_prod()
143 per_cpu(nr_max, cpu) = per_cpu(nr, cpu); in sched_update_nr_prod()
145 update_last_busy_time(cpu, !inc, nr_running, curr_time); in sched_update_nr_prod()
147 per_cpu(nr_prod_sum, cpu) += nr_running * diff; in sched_update_nr_prod()
148 per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; in sched_update_nr_prod()
149 spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); in sched_update_nr_prod()
154 * Returns the CPU utilization % in the last window.
157 unsigned int sched_get_cpu_util(int cpu) in sched_get_cpu_util() argument
159 struct rq *rq = cpu_rq(cpu); in sched_get_cpu_util()
167 capacity = capacity_orig_of(cpu); in sched_get_cpu_util()
183 u64 sched_get_cpu_last_busy_time(int cpu) in sched_get_cpu_last_busy_time() argument
185 return atomic64_read(&per_cpu(last_busy_time, cpu)); in sched_get_cpu_last_busy_time()