Searched refs:NR_CPUS (Results 1 – 25 of 326) sorted by relevance
12345678910>>...14
/kernel/linux/linux-5.10/include/linux/ |
D | rcu_node_tree.h | 52 #if NR_CPUS <= RCU_FANOUT_1 59 #elif NR_CPUS <= RCU_FANOUT_2 62 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 67 #elif NR_CPUS <= RCU_FANOUT_3 70 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 71 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) 76 #elif NR_CPUS <= RCU_FANOUT_4 79 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) 80 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) 81 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
|
D | cpumask.h | 17 typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 36 #if NR_CPUS == 1 47 #define nr_cpumask_bits ((unsigned int)NR_CPUS) 107 #if NR_CPUS > 1 138 #if defined(CONFIG_CPU_ISOLATION_OPT) && NR_CPUS > 1 170 #if NR_CPUS == 1 341 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ 827 extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); 847 bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS); in reset_cpu_possible_mask() 917 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; [all …]
|
D | blockgroup_lock.h | 14 #define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32))
|
/kernel/linux/linux-5.10/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/ |
D | preempt.c | 26 struct lock_impl cpu_preemption_locks[NR_CPUS] = { 28 #if (NR_CPUS - 1) & 1 31 #if (NR_CPUS - 1) & 2 34 #if (NR_CPUS - 1) & 4 37 #if (NR_CPUS - 1) & 8 40 #if (NR_CPUS - 1) & 16 43 #if (NR_CPUS - 1) & 32 67 assume(thread_cpu_id < NR_CPUS); in preempt_disable()
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
D | irq.h | 47 extern void *critirq_ctx[NR_CPUS]; 48 extern void *dbgirq_ctx[NR_CPUS]; 49 extern void *mcheckirq_ctx[NR_CPUS]; 55 extern void *hardirq_ctx[NR_CPUS]; 56 extern void *softirq_ctx[NR_CPUS];
|
/kernel/linux/linux-5.10/tools/testing/selftests/fpu/ |
D | run_test_fpu.sh | 23 NR_CPUS=$(getconf _NPROCESSORS_ONLN) 24 if [ ! $NR_CPUS ]; then 25 NR_CPUS=1 41 for c in $(seq 1 $NR_CPUS); do
|
/kernel/linux/linux-5.10/arch/ia64/kernel/ |
D | err_inject.c | 43 static u64 call_start[NR_CPUS]; 44 static u64 phys_addr[NR_CPUS]; 45 static u64 err_type_info[NR_CPUS]; 46 static u64 err_struct_info[NR_CPUS]; 51 } __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; 52 static s64 status[NR_CPUS]; 53 static u64 capabilities[NR_CPUS]; 54 static u64 resources[NR_CPUS];
|
/kernel/linux/linux-5.10/arch/riscv/kernel/ |
D | cpu_ops.c | 16 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; 18 void *__cpu_up_stack_pointer[NR_CPUS] __section(".data"); 19 void *__cpu_up_task_pointer[NR_CPUS] __section(".data");
|
D | smp.c | 33 unsigned long __cpuid_to_hartid_map[NR_CPUS] = { 34 [0 ... NR_CPUS-1] = INVALID_HARTID 46 } ipi_data[NR_CPUS] __cacheline_aligned; 52 for (i = 0; i < NR_CPUS; i++) in riscv_hartid_to_cpuid()
|
/kernel/linux/linux-5.10/arch/sparc/include/asm/ |
D | topology_64.h | 53 extern cpumask_t cpu_core_map[NR_CPUS]; 54 extern cpumask_t cpu_core_sib_map[NR_CPUS]; 55 extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
|
/kernel/linux/linux-5.10/arch/ia64/include/asm/ |
D | smp.h | 50 int cpu_phys_id[NR_CPUS]; 55 extern cpumask_t cpu_core_map[NR_CPUS]; 74 for (i = 0; i < NR_CPUS; ++i) in cpu_logical_id()
|
D | numa.h | 25 extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 51 extern struct node_cpuid_s node_cpuid[NR_CPUS];
|
/kernel/linux/linux-5.10/arch/ia64/include/asm/native/ |
D | irq.h | 14 #if (NR_VECTORS + 32 * NR_CPUS) < 1024 15 #define IA64_NATIVE_NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
|
/kernel/linux/linux-5.10/arch/arm/mach-shmobile/ |
D | headsmp.S | 118 cmp r1, #NR_CPUS 141 .space NR_CPUS * 4 144 .space NR_CPUS * 4 147 .space NR_CPUS * 4
|
/kernel/linux/linux-5.10/arch/sparc/kernel/ |
D | leon_smp.c | 54 extern volatile unsigned long cpu_callin_map[NR_CPUS]; 167 (unsigned int)nrcpu, (unsigned int)NR_CPUS, in leon_boot_cpus() 237 for (i = 0; i < NR_CPUS; i++) { in leon_smp_done() 368 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ 369 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ 380 register int high = NR_CPUS - 1; in leon_cross_call()
|
D | smp_32.c | 42 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; 190 if (cpuid >= NR_CPUS) in smp_prepare_cpus() 234 if (mid < NR_CPUS) { in smp_setup_cpu_possible_map() 246 if (cpuid >= NR_CPUS) { in smp_prepare_boot_cpu()
|
/kernel/linux/linux-5.10/arch/sh/kernel/ |
D | irq.c | 65 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; 66 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; 68 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; 69 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
|
/kernel/linux/linux-5.10/arch/sh/include/asm/ |
D | fixmap.h | 54 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, 58 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
D | smp.h | 18 extern int __cpu_number_map[NR_CPUS]; 22 extern int __cpu_logical_map[NR_CPUS];
|
/kernel/linux/linux-5.10/tools/testing/selftests/arm64/fp/ |
D | sve-stress | 8 NR_CPUS=`nproc` 42 for x in `seq 0 $((NR_CPUS * 4))`; do
|
D | fpsimd-stress | 8 NR_CPUS=`nproc` 43 for x in `seq 0 $((NR_CPUS * 4))`; do
|
/kernel/linux/linux-5.10/arch/powerpc/lib/ |
D | locks.c | 29 BUG_ON(holder_cpu >= NR_CPUS); in splpar_spin_yield() 55 BUG_ON(holder_cpu >= NR_CPUS); in splpar_rw_yield()
|
/kernel/linux/linux-5.10/tools/testing/selftests/rseq/ |
D | run_param_test.sh | 4 NR_CPUS=`grep '^processor' /proc/cpuinfo | wc -l` 35 NR_THREADS=$((6*${NR_CPUS}))
|
/kernel/linux/linux-5.10/arch/x86/kernel/ |
D | cpuid.c | 152 if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, in cpuid_init() 176 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); in cpuid_init() 185 __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); in cpuid_exit()
|
/kernel/linux/linux-5.10/arch/alpha/kernel/ |
D | smp.c | 54 struct cpuinfo_alpha cpu_data[NR_CPUS]; 60 } ipi_data[NR_CPUS] __cacheline_aligned; 246 for (i = 0; i < NR_CPUS; i++) { in recv_secondary_console_msg() 489 for(cpu = 0; cpu < NR_CPUS; cpu++) in smp_cpus_done() 653 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_tlb_mm() 700 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_tlb_page() 754 for (cpu = 0; cpu < NR_CPUS; cpu++) { in flush_icache_user_page()
|
12345678910>>...14