/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
D | paca.c | 63 size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE); in alloc_shared_lppaca() 274 paca_nr_cpu_ids = nr_cpu_ids; in allocate_paca_ptrs() 276 paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in allocate_paca_ptrs() 323 new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; in free_unused_pacas() 328 paca_nr_cpu_ids = nr_cpu_ids; in free_unused_pacas() 341 paca_ptrs_size + paca_struct_size, nr_cpu_ids); in free_unused_pacas()
|
D | setup-common.c | 335 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) in show_cpuinfo() 347 if ((*pos) < nr_cpu_ids) in c_start() 451 cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), in smp_setup_cpu_maps() 455 __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); in smp_setup_cpu_maps() 482 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { in smp_setup_cpu_maps() 499 if (cpu >= nr_cpu_ids) { in smp_setup_cpu_maps() 535 if (maxcpus > nr_cpu_ids) { in smp_setup_cpu_maps() 539 maxcpus, nr_cpu_ids); in smp_setup_cpu_maps() 540 maxcpus = nr_cpu_ids; in smp_setup_cpu_maps() 847 memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32)); in smp_setup_pacas()
|
/kernel/linux/linux-5.10/arch/arm/mach-spear/ |
D | platsmp.c | 102 if (ncores > nr_cpu_ids) { in spear13xx_smp_init_cpus() 104 ncores, nr_cpu_ids); in spear13xx_smp_init_cpus() 105 ncores = nr_cpu_ids; in spear13xx_smp_init_cpus()
|
/kernel/linux/linux-5.10/arch/arm/mach-bcm/ |
D | bcm63xx_smp.c | 64 if (ncores > nr_cpu_ids) { in scu_a9_enable() 66 ncores, nr_cpu_ids); in scu_a9_enable() 67 ncores = nr_cpu_ids; in scu_a9_enable()
|
/kernel/linux/linux-5.10/kernel/ |
D | smp.c | 170 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu)) in csd_lock_wait_toolong() 299 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { in generic_exec_single() 596 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; in smp_call_function_any() 641 if (cpu >= nr_cpu_ids) in smp_call_function_many_cond() 650 if (next_cpu >= nr_cpu_ids) { in smp_call_function_many_cond() 775 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) in nrcpus() 776 nr_cpu_ids = nr_cpus; in nrcpus() 795 unsigned int nr_cpu_ids __read_mostly = NR_CPUS; 796 EXPORT_SYMBOL(nr_cpu_ids); 801 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; in setup_nr_cpu_ids() [all …]
|
/kernel/linux/linux-5.10/arch/riscv/kernel/ |
D | smpboot.c | 98 if (cpuid > nr_cpu_ids) in setup_smp() 100 cpuid, nr_cpu_ids); in setup_smp() 102 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { in setup_smp()
|
/kernel/linux/linux-5.10/include/trace/events/ |
D | walt.h | 18 memcpy(dst, src, nr_cpu_ids * sizeof(u32)); in __window_data() 20 memset(dst, 0, nr_cpu_ids * sizeof(u32)); in __window_data() 154 __dynamic_array(u32, curr_sum, nr_cpu_ids) 155 __dynamic_array(u32, prev_sum, nr_cpu_ids) 195 __window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids), 197 __window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids),
|
/kernel/linux/linux-5.10/include/linux/ |
D | cpumask.h | 34 #define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) 37 #define nr_cpu_ids 1U macro 39 extern unsigned int nr_cpu_ids; 45 #define nr_cpumask_bits nr_cpu_ids 288 (cpu) < nr_cpu_ids;) 300 (cpu) < nr_cpu_ids;) 336 (cpu) < nr_cpu_ids;) 957 nr_cpu_ids); in cpumap_print_to_pagebuf()
|
/kernel/linux/linux-5.10/arch/arm/kernel/ |
D | devtree.c | 148 if (WARN(cpuidx > nr_cpu_ids, "DT /cpu %u nodes greater than " in arm_dt_init_cpu_maps() 150 cpuidx, nr_cpu_ids)) { in arm_dt_init_cpu_maps() 151 cpuidx = nr_cpu_ids; in arm_dt_init_cpu_maps()
|
/kernel/linux/linux-5.10/kernel/irq/ |
D | ipi.c | 70 if (next < nr_cpu_ids) in irq_reserve_ipi() 72 if (next < nr_cpu_ids) { in irq_reserve_ipi() 167 if (!data || !ipimask || cpu >= nr_cpu_ids) in ipi_get_hwirq() 197 if (cpu >= nr_cpu_ids) in ipi_send_verify()
|
D | cpuhotplug.c | 40 if (cpumask_any_but(m, cpu) < nr_cpu_ids && in irq_needs_fixup() 41 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { in irq_needs_fixup() 123 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq()
|
D | migration.c | 29 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) { in irq_fixup_move_pending() 77 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { in irq_move_masked_irq()
|
/kernel/linux/linux-5.10/scripts/gdb/linux/ |
D | timerlist.py | 152 nr_cpu_ids = 1 154 nr_cpu_ids = gdb.parse_and_eval("nr_cpu_ids") 158 num_bytes = (nr_cpu_ids + 7) / 8 172 extra = nr_cpu_ids % 8
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
D | cputhreads.h | 56 if (cpu < nr_cpu_ids) in cpu_thread_mask_to_cores() 65 return nr_cpu_ids >> threads_shift; in cpu_nr_cores()
|
/kernel/linux/linux-5.10/arch/arm/mach-omap2/ |
D | omap-smp.c | 278 if (ncores > nr_cpu_ids) { in omap4_smp_init_cpus() 280 ncores, nr_cpu_ids); in omap4_smp_init_cpus() 281 ncores = nr_cpu_ids; in omap4_smp_init_cpus()
|
/kernel/linux/linux-5.10/drivers/perf/ |
D | arm_pmu_platform.c | 89 cpu = nr_cpu_ids; in pmu_parse_irq_affinity() 124 if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) { in pmu_parse_irqs() 144 if (cpu >= nr_cpu_ids) in pmu_parse_irqs()
|
/kernel/linux/linux-5.10/kernel/bpf/ |
D | percpu_freelist.c | 72 if (cpu >= nr_cpu_ids) in ___pcpu_freelist_push_nmi() 141 if (cpu >= nr_cpu_ids) in ___pcpu_freelist_pop() 176 if (cpu >= nr_cpu_ids) in ___pcpu_freelist_pop_nmi()
|
/kernel/linux/linux-5.10/arch/x86/xen/ |
D | smp_pv.c | 162 for (i = 0; i < nr_cpu_ids; i++) { in _get_smp_config() 183 nr_cpu_ids = nr_cpu_ids - subtract; in _get_smp_config() 250 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) in xen_pv_smp_prepare_cpus()
|
/kernel/linux/linux-5.10/arch/x86/kernel/ |
D | tsc_sync.c | 101 if (next_cpu >= nr_cpu_ids) in tsc_sync_check_timer_fn() 209 refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids; in tsc_store_and_check_tsc_adjust() 211 if (refcpu >= nr_cpu_ids) { in tsc_store_and_check_tsc_adjust()
|
/kernel/linux/linux-5.10/lib/ |
D | cpumask.c | 255 if (next >= nr_cpu_ids) in cpumask_any_and_distribute() 258 if (next < nr_cpu_ids) in cpumask_any_and_distribute()
|
/kernel/linux/linux-5.10/kernel/sched/ |
D | cpupri.c | 76 if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) in __cpupri_find() 269 cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); in cpupri_init()
|
D | isolation.c | 30 if (cpu < nr_cpu_ids) in housekeeping_any_cpu() 88 if (err < 0 || cpumask_last(non_housekeeping_mask) >= nr_cpu_ids) { in housekeeping_setup()
|
/kernel/linux/linux-5.10/drivers/base/ |
D | cpu.c | 287 if (total_cpus && nr_cpu_ids < total_cpus) { in print_cpus_offline() 290 if (nr_cpu_ids == total_cpus-1) in print_cpus_offline() 291 len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids); in print_cpus_offline() 294 nr_cpu_ids, total_cpus - 1); in print_cpus_offline() 428 if (cpu < nr_cpu_ids && cpu_possible(cpu)) in get_cpu_device()
|
/kernel/linux/linux-5.10/arch/x86/events/intel/ |
D | cstate.c | 341 if (cpu >= nr_cpu_ids) in cstate_pmu_event_init() 410 if (target < nr_cpu_ids) { in cstate_cpu_exit() 421 if (target < nr_cpu_ids) { in cstate_cpu_exit() 440 if (has_cstate_core && target >= nr_cpu_ids) in cstate_cpu_init() 449 if (has_cstate_pkg && target >= nr_cpu_ids) in cstate_cpu_init()
|
/kernel/linux/linux-5.10/arch/arm64/include/asm/ |
D | smp_plat.h | 37 for (cpu = 0; cpu < nr_cpu_ids; cpu++) in get_logical_index()
|