Searched refs:threads_per_core (Results 1 – 20 of 20) sorted by relevance
23 extern int threads_per_core;28 #define threads_per_core 1 macro52 for (i = 0; i < NR_CPUS; i += threads_per_core) { in cpu_thread_mask_to_cores()83 return cpu & (threads_per_core - 1); in cpu_thread_in_core()93 return cpu & ~(threads_per_core - 1); in cpu_first_thread_sibling()98 return cpu | (threads_per_core - 1); in cpu_last_thread_sibling()109 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8)) in cpu_first_tlb_thread_sibling()117 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8)) in cpu_last_tlb_thread_sibling()125 if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8)) in cpu_tlb_thread_sibling_step()
152 for (i = cpu + 1; i < cpu + threads_per_core; i++) in wait_for_sync_step()207 for (i = cpu + 1; i < cpu + threads_per_core; i++) in unsplit_core()326 threads_per_subcore = threads_per_core / subcores_per_core; in cpu_update_split_mode()431 if (setup_max_cpus % threads_per_core) in subcore_init()
319 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; in power7_idle_insn()370 == threads_per_core) in power7_idle_insn()612 unsigned long core_thread_mask = (1UL << threads_per_core) - 1; in power9_idle_stop()822 int need_awake = threads_per_core; in pnv_power9_force_smt4_catch()825 cpu0 = cpu & ~(threads_per_core - 1); in pnv_power9_force_smt4_catch()826 for (thr = 0; thr < threads_per_core; ++thr) { in pnv_power9_force_smt4_catch()832 for (thr = 0; thr < threads_per_core; ++thr) { in pnv_power9_force_smt4_catch()842 for (thr = 0; thr < threads_per_core; ++thr) { in pnv_power9_force_smt4_catch()851 for (thr = 0; thr < threads_per_core; ++thr) { in pnv_power9_force_smt4_catch()868 cpu0 = cpu & ~(threads_per_core - 1); in pnv_power9_force_smt4_release()[all …]
594 int nid, rc = 0, core_id = (cpu / threads_per_core); in core_imc_mem_init()631 int core_id = (cpu / threads_per_core); in is_core_imc_mem_inited()709 core_id = cpu / threads_per_core; in ppc_core_imc_cpu_offline()767 core_id = event->cpu / threads_per_core; in core_imc_counters_release()835 core_id = event->cpu / threads_per_core; in core_imc_event_init()1127 core_id = smp_processor_id() / threads_per_core; in thread_imc_event_add()1162 core_id = smp_processor_id() / threads_per_core; in thread_imc_event_del()1201 int core_id = (cpu_id / threads_per_core); in trace_imc_mem_alloc()1352 int core_id = smp_processor_id() / threads_per_core; in trace_imc_event_add()1402 int core_id = smp_processor_id() / threads_per_core; in trace_imc_event_del()[all …]
1730 if (threads_per_core == 8) in hv_24x7_init()
288 if (threads_per_core == 2) { in smp_85xx_kick_cpu()319 } else if (threads_per_core == 1) { in smp_85xx_kick_cpu()326 } else if (threads_per_core > 2) { in smp_85xx_kick_cpu()
134 smt_enabled_at_boot = threads_per_core; in check_smt_enabled()139 smt_enabled_at_boot = threads_per_core; in check_smt_enabled()149 min(threads_per_core, smt); in check_smt_enabled()159 smt_enabled_at_boot = threads_per_core; in check_smt_enabled()
388 int threads_per_core, threads_per_subcore, threads_shift __read_mostly; variable390 EXPORT_SYMBOL_GPL(threads_per_core);399 threads_per_core = tpc; in cpu_init_thread_core_maps()
946 for (i = first_thread; i < first_thread + threads_per_core; i++) { in update_mask_from_threadgroup()1152 int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); in smp_prepare_cpus()1277 if (threads_per_core > 1 && secondaries_inhibited() && in __cpu_up()1575 for (i = first_thread; i < first_thread + threads_per_core; i++) in add_cpu_to_masks()
928 idx = cpu / threads_per_core; in cpu_to_chip_id()
148 if (threads_per_core == 2) { in rcpm_v2_cpu_die()160 if (threads_per_core == 1) in rcpm_v2_cpu_die()
353 if (threads_per_core == 2) in kvmppc_core_init_vm_e500mc()364 if (threads_per_core == 2) in kvmppc_core_destroy_vm_e500mc()400 kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core); in kvmppc_e500mc_init()
299 if (threads_per_core == 2) in get_thread_specific_lpid()
5150 for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { in kvmppc_alloc_host_rm_ops()6024 int first_cpu = i * threads_per_core; in kvm_init_subcore_bitmap()6038 for (j = 0; j < threads_per_core; j++) { in kvm_init_subcore_bitmap()
726 if (c->threads_per_core > 1 || c->cores_per_socket > 1) in show_cpuinfo()838 c->threads_per_core = c->cores_per_socket = c->num_log = 1; in identify_cpu()843 if (c->threads_per_core > smp_num_siblings) in identify_cpu()844 smp_num_siblings = c->threads_per_core; in identify_cpu()
581 if (cpu_data(cpu)->threads_per_core == 1 && in remove_siblinginfo()739 if (cpu_data(cpu)->threads_per_core == 1 && in __cpu_up()814 c->threads_per_core = info.overview_tpc; in identify_siblings()
144 if (cpu_data(cpu)->threads_per_core <= 1 && in cache_shared_cpu_map_setup()
207 vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core, in init_cpu_associativity()209 pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core, in init_cpu_associativity()232 assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE]; in __get_cpu_associativity()
214 unsigned char threads_per_core; /* Threads per core */ member
774 for (i = 0; i < threads_per_core; i++) { in verify_cpu_node_mapping()