Lines Matching refs:j
1460 int i,j; in sched_numa_warn() local
1471 for (j = 0; j < nr_node_ids; j++) in sched_numa_warn()
1472 printk(KERN_CONT "%02d ", node_distance(i,j)); in sched_numa_warn()
1553 int i, j; in sched_init_numa() local
1565 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa()
1566 int distance = node_distance(i, j); in sched_init_numa()
1588 for (i = 0, j = 0; i < nr_levels; i++, j++) { in sched_init_numa()
1589 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j); in sched_init_numa()
1590 sched_domains_numa_distance[i] = j; in sched_init_numa()
1627 for (j = 0; j < nr_node_ids; j++) { in sched_init_numa()
1634 sched_domains_numa_masks[i][j] = mask; in sched_init_numa()
1637 if (sched_debug() && (node_distance(j, k) != node_distance(k, j))) in sched_init_numa()
1640 if (node_distance(j, k) > sched_domains_numa_distance[i]) in sched_init_numa()
1674 for (j = 1; j < nr_levels; i++, j++) { in sched_init_numa()
1679 .numa_level = j, in sched_init_numa()
1695 int i, j; in sched_domains_numa_masks_set() local
1698 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_set()
1699 if (node_distance(j, node) <= sched_domains_numa_distance[i]) in sched_domains_numa_masks_set()
1700 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_set()
1707 int i, j; in sched_domains_numa_masks_clear() local
1710 for (j = 0; j < nr_node_ids; j++) in sched_domains_numa_masks_clear()
1711 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_clear()
1725 int i, j = cpu_to_node(cpu); in sched_numa_find_closest() local
1728 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]); in sched_numa_find_closest()
1740 int j; in __sdt_alloc() local
1761 for_each_cpu(j, cpu_map) { in __sdt_alloc()
1768 GFP_KERNEL, cpu_to_node(j)); in __sdt_alloc()
1772 *per_cpu_ptr(sdd->sd, j) = sd; in __sdt_alloc()
1775 GFP_KERNEL, cpu_to_node(j)); in __sdt_alloc()
1779 *per_cpu_ptr(sdd->sds, j) = sds; in __sdt_alloc()
1782 GFP_KERNEL, cpu_to_node(j)); in __sdt_alloc()
1788 *per_cpu_ptr(sdd->sg, j) = sg; in __sdt_alloc()
1791 GFP_KERNEL, cpu_to_node(j)); in __sdt_alloc()
1796 sgc->id = j; in __sdt_alloc()
1799 *per_cpu_ptr(sdd->sgc, j) = sgc; in __sdt_alloc()
1809 int j; in __sdt_free() local
1814 for_each_cpu(j, cpu_map) { in __sdt_free()
1818 sd = *per_cpu_ptr(sdd->sd, j); in __sdt_free()
1821 kfree(*per_cpu_ptr(sdd->sd, j)); in __sdt_free()
1825 kfree(*per_cpu_ptr(sdd->sds, j)); in __sdt_free()
1827 kfree(*per_cpu_ptr(sdd->sg, j)); in __sdt_free()
1829 kfree(*per_cpu_ptr(sdd->sgc, j)); in __sdt_free()
1915 int i, j, asym_level = 0; in asym_cpu_capacity_level() local
1946 for_each_cpu_and(j, tl->mask(i), cpu_map) { in asym_cpu_capacity_level()
1949 capacity = arch_scale_cpu_capacity(j); in asym_cpu_capacity_level()
2212 int i, j, n; in partition_sched_domains_locked() local
2238 for (j = 0; j < n && !new_topology; j++) { in partition_sched_domains_locked()
2239 if (cpumask_equal(doms_cur[i], doms_new[j]) && in partition_sched_domains_locked()
2240 dattrs_equal(dattr_cur, i, dattr_new, j)) { in partition_sched_domains_locked()
2270 for (j = 0; j < n && !new_topology; j++) { in partition_sched_domains_locked()
2271 if (cpumask_equal(doms_new[i], doms_cur[j]) && in partition_sched_domains_locked()
2272 dattrs_equal(dattr_new, i, dattr_cur, j)) in partition_sched_domains_locked()
2284 for (j = 0; j < n && !sched_energy_update; j++) { in partition_sched_domains_locked()
2285 if (cpumask_equal(doms_new[i], doms_cur[j]) && in partition_sched_domains_locked()
2286 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()