Searched refs:load (Results 1 – 9 of 9) sorted by relevance
/kernel/sched/ |
D | pelt.c | 111 unsigned long load, unsigned long runnable, int running) in accumulate_sum() argument 137 if (load) in accumulate_sum() 138 sa->load_sum += load * contrib; in accumulate_sum() 177 unsigned long load, unsigned long runnable, int running) in ___update_load_sum() argument 210 if (!load) in ___update_load_sum() 220 if (!accumulate_sum(delta, sa, load, runnable, running)) in ___update_load_sum() 227 ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable) in ___update_load_avg() argument 234 sa->load_avg = div_u64(load * sa->load_sum, divider); in ___update_load_avg() 294 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq()
|
D | fair.c | 664 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair() 665 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair() 697 struct load_weight *load; in sched_slice() local 701 load = &cfs_rq->load; in sched_slice() 704 lw = cfs_rq->load; in sched_slice() 706 update_load_add(&lw, se->load.weight); in sched_slice() 707 load = &lw; in sched_slice() 709 slice = __calc_delta(slice, se->load.weight, load); in sched_slice() 745 sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average() 747 se->runnable_weight = se->load.weight; in init_entity_runnable_average() [all …]
|
D | loadavg.c | 157 calc_load_n(unsigned long load, unsigned long exp, in calc_load_n() argument 160 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); in calc_load_n()
|
D | debug.c | 404 P(se->load.weight); in print_cfs_group_stats() 525 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); in print_cfs_rq() 942 P(se.load.weight); in proc_sched_show_task()
|
D | core.c | 751 struct load_weight *load = &p->se.load; in set_load_weight() local 757 load->weight = scale_load(WEIGHT_IDLEPRIO); in set_load_weight() 758 load->inv_weight = WMULT_IDLEPRIO; in set_load_weight() 759 p->se.runnable_weight = load->weight; in set_load_weight() 770 load->weight = scale_load(sched_prio_to_weight[prio]); in set_load_weight() 771 load->inv_weight = sched_prio_to_wmult[prio]; in set_load_weight() 772 p->se.runnable_weight = load->weight; in set_load_weight()
|
D | sched.h | 493 struct load_weight load; member 703 return scale_load_down(se->load.weight); in se_weight()
|
/kernel/ |
D | Kconfig.preempt | 34 under load. 51 system is under load, at the cost of slightly lower throughput
|
D | kexec_file.c | 64 if (!image->fops || !image->fops->load) in kexec_image_load_default() 67 return image->fops->load(image, image->kernel_buf, in kexec_image_load_default()
|
/kernel/trace/ |
D | Kconfig | 782 boot up or module load. With this option, they will not be freed, as
|