Lines Matching refs:local
1942 unsigned long local = p->numa_faults_locality[1]; in update_task_scan_period() local
1951 if (local + shared == 0 || p->numa_faults_locality[2]) { in update_task_scan_period()
1968 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); in update_task_scan_period()
2401 int local = !!(flags & TNF_FAULT_LOCAL); in task_numa_fault() local
2444 if (!priv && !local && ng && ng->active_nodes > 1 && in task_numa_fault()
2447 local = 1; in task_numa_fault()
2465 p->numa_faults_locality[local] += pages; in task_numa_fault()
7980 struct sched_group *local; /* Local group in this sd */ member
8000 .local = NULL, in init_sd_lb_stats()
8410 (!group_smaller_max_cpu_capacity(sg, sds->local) || in update_sd_pick_busiest()
8433 group_smaller_min_cpu_capacity(sds->local, sg)) in update_sd_pick_busiest()
8509 struct sg_lb_stats *local = &sds->local_stat; in update_sd_lb_stats() local
8525 sds->local = sg; in update_sd_lb_stats()
8526 sgs = local; in update_sd_lb_stats()
8548 if (prefer_sibling && sds->local && in update_sd_lb_stats()
8549 group_has_capacity(env, local) && in update_sd_lb_stats()
8550 (sgs->sum_nr_running > local->sum_nr_running + 1)) { in update_sd_lb_stats()
8658 struct sg_lb_stats *local, *busiest; in fix_small_imbalance() local
8660 local = &sds->local_stat; in fix_small_imbalance()
8663 if (!local->sum_nr_running) in fix_small_imbalance()
8664 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); in fix_small_imbalance()
8665 else if (busiest->load_per_task > local->load_per_task) in fix_small_imbalance()
8673 local->avg_load + (scaled_busy_load_per_task * imbn)) { in fix_small_imbalance()
8686 capa_now += local->group_capacity * in fix_small_imbalance()
8687 min(local->load_per_task, local->avg_load); in fix_small_imbalance()
8701 local->group_capacity; in fix_small_imbalance()
8704 local->group_capacity; in fix_small_imbalance()
8706 capa_move += local->group_capacity * in fix_small_imbalance()
8707 min(local->load_per_task, local->avg_load + tmp); in fix_small_imbalance()
8725 local->sum_nr_running < local->group_weight && in fix_small_imbalance()
8726 local->group_capacity < busiest->group_capacity) in fix_small_imbalance()
8739 struct sg_lb_stats *local, *busiest; in calculate_imbalance() local
8741 local = &sds->local_stat; in calculate_imbalance()
8761 local->avg_load >= sds->avg_load)) { in calculate_imbalance()
8770 local->group_type == group_overloaded) { in calculate_imbalance()
8792 (sds->avg_load - local->avg_load) * local->group_capacity in calculate_imbalance()
8806 local->sum_nr_running < local->group_weight)) { in calculate_imbalance()
8836 struct sg_lb_stats *local, *busiest; in find_busiest_group() local
8858 local = &sds.local_stat; in find_busiest_group()
8885 if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) && in find_busiest_group()
8897 if (local->avg_load >= busiest->avg_load) in find_busiest_group()
8904 if (local->avg_load >= sds.avg_load) in find_busiest_group()
8916 (local->idle_cpus <= (busiest->idle_cpus + 1))) in find_busiest_group()
8924 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()