/arch/x86/kernel/cpu/ |
D | cacheinfo.c | 137 } split; member 146 } split; member 153 } split; member 285 eax->split.is_self_initializing = 1; in amd_cpuid4() 286 eax->split.type = types[leaf]; in amd_cpuid4() 287 eax->split.level = levels[leaf]; in amd_cpuid4() 288 eax->split.num_threads_sharing = 0; in amd_cpuid4() 289 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; in amd_cpuid4() 293 eax->split.is_fully_associative = 1; in amd_cpuid4() 294 ebx->split.coherency_line_size = line_size - 1; in amd_cpuid4() [all …]
|
/arch/x86/include/asm/ |
D | perf_event.h | 119 } split; member 132 } split; member 143 } split; member 159 } split; member 171 } split; member 183 } split; member
|
/arch/x86/kvm/vmx/ |
D | pmu_intel.c | 497 pmu->version = eax.split.version_id; in intel_pmu_refresh() 503 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh() 505 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp); in intel_pmu_refresh() 506 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh() 507 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len); in intel_pmu_refresh() 509 ((1ull << eax.split.mask_length) - 1); in intel_pmu_refresh() 515 min_t(int, edx.split.num_counters_fixed, in intel_pmu_refresh() 517 edx.split.bit_width_fixed = min_t(int, in intel_pmu_refresh() 518 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); in intel_pmu_refresh() 520 ((u64)1 << edx.split.bit_width_fixed) - 1; in intel_pmu_refresh()
|
/arch/x86/events/zhaoxin/ |
D | core.c | 522 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT - 1) in zhaoxin_pmu_init() 525 version = eax.split.version_id; in zhaoxin_pmu_init() 533 x86_pmu.num_counters = eax.split.num_counters; in zhaoxin_pmu_init() 534 x86_pmu.cntval_bits = eax.split.bit_width; in zhaoxin_pmu_init() 535 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; in zhaoxin_pmu_init() 537 x86_pmu.events_mask_len = eax.split.mask_length; in zhaoxin_pmu_init() 539 x86_pmu.num_counters_fixed = edx.split.num_counters_fixed; in zhaoxin_pmu_init()
|
/arch/x86/tools/ |
D | chkobjdump.awk | 23 split(verstr, ver, ".");
|
D | objdump_reformat.awk | 25 if (split($0, field, "\t") < 3) {
|
D | gen-insn-attr-x86.awk | 295 count = split($(i++), opnds, ",")
|
/arch/nds32/ |
D | Kconfig.cpu | 169 Depending on the selected kernel/user memory split, minimum 192 prompt "Memory split" 196 Select the desired split between kernel and user memory. 202 bool "3G/1G user/kernel split" 204 bool "3G/1G user/kernel split (for full 1G low memory)" 206 bool "2G/2G user/kernel split" 208 bool "1G/3G user/kernel split"
|
/arch/riscv/boot/dts/microchip/ |
D | microchip-mpfs.dtsi | 51 tlb-split; 78 tlb-split; 105 tlb-split; 132 tlb-split;
|
/arch/riscv/boot/dts/sifive/ |
D | fu540-c000.dtsi | 56 tlb-split; 80 tlb-split; 104 tlb-split; 128 tlb-split;
|
D | fu740-c000.dtsi | 58 tlb-split; 82 tlb-split; 106 tlb-split; 130 tlb-split;
|
/arch/x86/events/intel/ |
D | lbr.c | 1495 lbr_nr = fls(eax.split.lbr_depth_mask) * 8; in intel_pmu_arch_lbr_init() 1503 x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask; in intel_pmu_arch_lbr_init() 1504 x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset; in intel_pmu_arch_lbr_init() 1505 x86_pmu.lbr_lip = eax.split.lbr_lip; in intel_pmu_arch_lbr_init() 1506 x86_pmu.lbr_cpl = ebx.split.lbr_cpl; in intel_pmu_arch_lbr_init() 1507 x86_pmu.lbr_filter = ebx.split.lbr_filter; in intel_pmu_arch_lbr_init() 1508 x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack; in intel_pmu_arch_lbr_init() 1509 x86_pmu.lbr_mispred = ecx.split.lbr_mispred; in intel_pmu_arch_lbr_init() 1510 x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr; in intel_pmu_arch_lbr_init() 1511 x86_pmu.lbr_br_type = ecx.split.lbr_br_type; in intel_pmu_arch_lbr_init()
|
D | core.c | 4867 if (ebx.split.no_branch_misses_retired) { in intel_nehalem_quirk() 4875 ebx.split.no_branch_misses_retired = 0; in intel_nehalem_quirk() 5566 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT) in intel_pmu_init() 5569 version = eax.split.version_id; in intel_pmu_init() 5576 x86_pmu.num_counters = eax.split.num_counters; in intel_pmu_init() 5577 x86_pmu.cntval_bits = eax.split.bit_width; in intel_pmu_init() 5578 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1; in intel_pmu_init() 5581 x86_pmu.events_mask_len = eax.split.mask_length; in intel_pmu_init() 5593 max((int)edx.split.num_counters_fixed, assume); in intel_pmu_init() 5619 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated; in intel_pmu_init()
|
/arch/x86/kernel/cpu/resctrl/ |
D | internal.h | 474 } split; member 482 } split; member 490 } split; member
|
D | core.c | 186 hw_res->num_closid = edx.split.cos_max + 1; in __get_mem_config_intel() 187 max_delay = eax.split.max_delay + 1; in __get_mem_config_intel() 221 hw_res->num_closid = edx.split.cos_max + 1; in __rdt_get_mem_config_amd() 252 hw_res->num_closid = edx.split.cos_max + 1; in rdt_get_cache_alloc_cfg() 253 r->cache.cbm_len = eax.split.cbm_len + 1; in rdt_get_cache_alloc_cfg() 254 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; in rdt_get_cache_alloc_cfg()
|
/arch/x86/kvm/ |
D | cpuid.c | 749 eax.split.version_id = min(cap.version, 2); in __do_cpuid_func() 750 eax.split.num_counters = cap.num_counters_gp; in __do_cpuid_func() 751 eax.split.bit_width = cap.bit_width_gp; in __do_cpuid_func() 752 eax.split.mask_length = cap.events_mask_len; in __do_cpuid_func() 754 edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); in __do_cpuid_func() 755 edx.split.bit_width_fixed = cap.bit_width_fixed; in __do_cpuid_func() 757 edx.split.anythread_deprecated = 1; in __do_cpuid_func() 758 edx.split.reserved1 = 0; in __do_cpuid_func() 759 edx.split.reserved2 = 0; in __do_cpuid_func()
|
/arch/sh/cchips/ |
D | Kconfig | 28 # These will also be split into the Kconfig's below
|
/arch/mips/mm/ |
D | tlbex.c | 1419 u32 *split; in build_r4000_tlb_refill_handler() local 1426 split = labels[i].addr; in build_r4000_tlb_refill_handler() 1431 if (split > tlb_handler + MIPS64_REFILL_INSNS || in build_r4000_tlb_refill_handler() 1432 split < p - MIPS64_REFILL_INSNS) in build_r4000_tlb_refill_handler() 1441 split = tlb_handler + MIPS64_REFILL_INSNS - 2; in build_r4000_tlb_refill_handler() 1448 if (uasm_insn_has_bdelay(relocs, split - 1)) in build_r4000_tlb_refill_handler() 1449 split--; in build_r4000_tlb_refill_handler() 1452 uasm_copy_handler(relocs, labels, tlb_handler, split, f); in build_r4000_tlb_refill_handler() 1453 f += split - tlb_handler; in build_r4000_tlb_refill_handler() 1459 if (uasm_insn_has_bdelay(relocs, split)) in build_r4000_tlb_refill_handler() [all …]
|
/arch/arm/boot/dts/ |
D | intel-ixp4xx-reference-design.dtsi | 70 intel,ixp4xx-eb-ahb-split-transfers = <0>;
|
D | intel-ixp42x-arcom-vulcan.dts | 73 intel,ixp4xx-eb-ahb-split-transfers = <1>;
|
D | intel-ixp42x-gateworks-gw2348.dts | 97 intel,ixp4xx-eb-ahb-split-transfers = <0>;
|
D | intel-ixp43x-gateworks-gw2358.dts | 113 intel,ixp4xx-eb-ahb-split-transfers = <0>;
|
/arch/powerpc/boot/dts/fsl/ |
D | bsc9132si-post.dtsi | 39 /* FIXME: Test whether interrupts are split */
|
/arch/arm64/boot/dts/mediatek/ |
D | mt8173.dtsi | 1194 split0: split@14018000 { 1195 compatible = "mediatek,mt8173-disp-split"; 1201 split1: split@14019000 { 1202 compatible = "mediatek,mt8173-disp-split";
|
/arch/arm/ |
D | Kconfig | 1249 prompt "Memory split" 1253 Select the desired split between kernel and user memory. 1259 bool "3G/1G user/kernel split" 1262 bool "3G/1G user/kernel split (for full 1G low memory)" 1264 bool "2G/2G user/kernel split" 1266 bool "1G/3G user/kernel split" 1470 Depending on the selected kernel/user memory split, minimum
|