• Home
  • Raw
  • Download

Lines Matching refs:i

344 	int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);  in build_perf_domains()  local
363 for_each_cpu(i, cpu_map) { in build_perf_domains()
365 if (find_pd(pd, i)) in build_perf_domains()
369 policy = cpufreq_cpu_get(i); in build_perf_domains()
382 tmp = pd_init(i); in build_perf_domains()
848 int i; in build_balance_mask() local
852 for_each_cpu(i, sg_span) { in build_balance_mask()
853 sibling = *per_cpu_ptr(sdd->sd, i); in build_balance_mask()
867 cpumask_set_cpu(i, mask); in build_balance_mask()
937 int i; in build_overlap_sched_groups() local
941 for_each_cpu_wrap(i, span, cpu) { in build_overlap_sched_groups()
944 if (cpumask_test_cpu(i, covered)) in build_overlap_sched_groups()
947 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
959 if (!cpumask_test_cpu(i, sched_domain_span(sibling))) in build_overlap_sched_groups()
1111 int i; in build_sched_groups() local
1118 for_each_cpu_wrap(i, span, cpu) { in build_sched_groups()
1121 if (cpumask_test_cpu(i, covered)) in build_sched_groups()
1124 sg = get_group(i, sdd); in build_sched_groups()
1473 int i,j; in sched_numa_warn() local
1482 for (i = 0; i < nr_node_ids; i++) { in sched_numa_warn()
1485 printk(KERN_CONT "%02d ", node_distance(i,j)); in sched_numa_warn()
1493 int i; in find_numa_distance() local
1498 for (i = 0; i < sched_domains_numa_levels; i++) { in find_numa_distance()
1499 if (sched_domains_numa_distance[i] == distance) in find_numa_distance()
1563 int i, j, k; in sched_init_numa() local
1581 for (i = 0; i < nr_node_ids; i++) { in sched_init_numa()
1584 int distance = node_distance(i, k); in sched_init_numa()
1596 if (sched_debug() && node_distance(k, i) != distance) in sched_init_numa()
1599 if (sched_debug() && i && !find_numa_distance(distance)) in sched_init_numa()
1642 for (i = 0; i < level; i++) { in sched_init_numa()
1643 sched_domains_numa_masks[i] = in sched_init_numa()
1645 if (!sched_domains_numa_masks[i]) in sched_init_numa()
1653 sched_domains_numa_masks[i][j] = mask; in sched_init_numa()
1656 if (node_distance(j, k) > sched_domains_numa_distance[i]) in sched_init_numa()
1665 for (i = 0; sched_domain_topology[i].mask; i++); in sched_init_numa()
1667 tl = kzalloc((i + level + 1) * in sched_init_numa()
1675 for (i = 0; sched_domain_topology[i].mask; i++) in sched_init_numa()
1676 tl[i] = sched_domain_topology[i]; in sched_init_numa()
1681 tl[i++] = (struct sched_domain_topology_level){ in sched_init_numa()
1690 for (j = 1; j < level; i++, j++) { in sched_init_numa()
1691 tl[i] = (struct sched_domain_topology_level){ in sched_init_numa()
1711 int i, j; in sched_domains_numa_masks_set() local
1713 for (i = 0; i < sched_domains_numa_levels; i++) { in sched_domains_numa_masks_set()
1715 if (node_distance(j, node) <= sched_domains_numa_distance[i]) in sched_domains_numa_masks_set()
1716 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_set()
1723 int i, j; in sched_domains_numa_masks_clear() local
1725 for (i = 0; i < sched_domains_numa_levels; i++) { in sched_domains_numa_masks_clear()
1727 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); in sched_domains_numa_masks_clear()
1741 int i, j = cpu_to_node(cpu); in sched_numa_find_closest() local
1743 for (i = 0; i < sched_domains_numa_levels; i++) { in sched_numa_find_closest()
1744 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]); in sched_numa_find_closest()
1895 int i, j, asym_level = 0; in asym_cpu_capacity_level() local
1903 for_each_cpu(i, cpu_map) { in asym_cpu_capacity_level()
1904 if (arch_scale_cpu_capacity(i) != cap) { in asym_cpu_capacity_level()
1918 for_each_cpu(i, cpu_map) { in asym_cpu_capacity_level()
1919 unsigned long max_capacity = arch_scale_cpu_capacity(i); in asym_cpu_capacity_level()
1926 for_each_cpu_and(j, tl->mask(i), cpu_map) { in asym_cpu_capacity_level()
1957 int i, ret = -ENOMEM; in build_sched_domains() local
1971 for_each_cpu(i, cpu_map) { in build_sched_domains()
1983 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i); in build_sched_domains()
1986 *per_cpu_ptr(d.sd, i) = sd; in build_sched_domains()
1995 for_each_cpu(i, cpu_map) { in build_sched_domains()
1996 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
1999 if (build_overlap_sched_groups(sd, i)) in build_sched_domains()
2002 if (build_sched_groups(sd, i)) in build_sched_domains()
2009 for (i = nr_cpumask_bits-1; i >= 0; i--) { in build_sched_domains()
2010 if (!cpumask_test_cpu(i, cpu_map)) in build_sched_domains()
2013 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { in build_sched_domains()
2014 claim_allocations(i, sd); in build_sched_domains()
2015 init_sched_groups_capacity(i, sd); in build_sched_domains()
2021 for_each_cpu(i, cpu_map) { in build_sched_domains()
2022 sd = *per_cpu_ptr(d.sd, i); in build_sched_domains()
2023 cpu_attach_domain(sd, d.rd, i); in build_sched_domains()
2065 int i; in alloc_sched_domains() local
2071 for (i = 0; i < ndoms; i++) { in alloc_sched_domains()
2072 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { in alloc_sched_domains()
2073 free_sched_domains(doms, i); in alloc_sched_domains()
2082 unsigned int i; in free_sched_domains() local
2083 for (i = 0; i < ndoms; i++) in free_sched_domains()
2084 free_cpumask_var(doms[i]); in free_sched_domains()
2119 int i; in detach_destroy_domains() local
2125 for_each_cpu(i, cpu_map) in detach_destroy_domains()
2126 cpu_attach_domain(NULL, &def_root_domain, i); in detach_destroy_domains()
2177 int i, j, n; in partition_sched_domains_locked() local
2202 for (i = 0; i < ndoms_cur; i++) { in partition_sched_domains_locked()
2204 if (cpumask_equal(doms_cur[i], doms_new[j]) && in partition_sched_domains_locked()
2205 dattrs_equal(dattr_cur, i, dattr_new, j)) { in partition_sched_domains_locked()
2214 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2220 detach_destroy_domains(doms_cur[i]); in partition_sched_domains_locked()
2234 for (i = 0; i < ndoms_new; i++) { in partition_sched_domains_locked()
2236 if (cpumask_equal(doms_new[i], doms_cur[j]) && in partition_sched_domains_locked()
2237 dattrs_equal(dattr_new, i, dattr_cur, j)) in partition_sched_domains_locked()
2241 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); in partition_sched_domains_locked()
2248 for (i = 0; i < ndoms_new; i++) { in partition_sched_domains_locked()
2250 if (cpumask_equal(doms_new[i], doms_cur[j]) && in partition_sched_domains_locked()
2257 has_eas |= build_perf_domains(doms_new[i]); in partition_sched_domains_locked()