Lines Matching refs:this_leaf
329 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) in amd_init_l3_cache() argument
338 this_leaf->nb = node_to_amd_nb(node); in amd_init_l3_cache()
339 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) in amd_init_l3_cache()
340 amd_calc_l3_indices(this_leaf->nb); in amd_init_l3_cache()
363 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, in show_cache_disable() argument
368 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) in show_cache_disable()
371 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); in show_cache_disable()
380 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
383 return show_cache_disable(this_leaf, buf, slot); \
450 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, in store_cache_disable() argument
460 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) in store_cache_disable()
463 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); in store_cache_disable()
468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); in store_cache_disable()
480 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
484 return store_cache_disable(this_leaf, buf, count, slot); \
495 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) in show_subcaches() argument
497 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) in show_subcaches()
504 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, in store_subcaches() argument
512 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) in store_subcaches()
533 struct _cpuid4_info_regs *this_leaf) in cpuid4_cache_lookup_regs() argument
542 amd_init_l3_cache(this_leaf, index); in cpuid4_cache_lookup_regs()
550 this_leaf->eax = eax; in cpuid4_cache_lookup_regs()
551 this_leaf->ebx = ebx; in cpuid4_cache_lookup_regs()
552 this_leaf->ecx = ecx; in cpuid4_cache_lookup_regs()
553 this_leaf->size = (ecx.split.number_of_sets + 1) * in cpuid4_cache_lookup_regs()
600 struct _cpuid4_info_regs this_leaf; in init_intel_cacheinfo() local
603 retval = cpuid4_cache_lookup_regs(i, &this_leaf); in init_intel_cacheinfo()
605 switch (this_leaf.eax.split.level) { in init_intel_cacheinfo()
607 if (this_leaf.eax.split.type == in init_intel_cacheinfo()
609 new_l1d = this_leaf.size/1024; in init_intel_cacheinfo()
610 else if (this_leaf.eax.split.type == in init_intel_cacheinfo()
612 new_l1i = this_leaf.size/1024; in init_intel_cacheinfo()
615 new_l2 = this_leaf.size/1024; in init_intel_cacheinfo()
616 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; in init_intel_cacheinfo()
621 new_l3 = this_leaf.size/1024; in init_intel_cacheinfo()
622 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; in init_intel_cacheinfo()
730 struct _cpuid4_info *this_leaf; in cache_shared_amd_cpu_map_setup() local
740 this_leaf = CPUID4_INFO_IDX(i, index); in cache_shared_amd_cpu_map_setup()
744 set_bit(sibling, this_leaf->shared_cpu_map); in cache_shared_amd_cpu_map_setup()
752 this_leaf = CPUID4_INFO_IDX(i, index); in cache_shared_amd_cpu_map_setup()
756 set_bit(sibling, this_leaf->shared_cpu_map); in cache_shared_amd_cpu_map_setup()
766 struct _cpuid4_info *this_leaf, *sibling_leaf; in cache_shared_cpu_map_setup() local
776 this_leaf = CPUID4_INFO_IDX(cpu, index); in cache_shared_cpu_map_setup()
777 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; in cache_shared_cpu_map_setup()
780 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); in cache_shared_cpu_map_setup()
788 to_cpumask(this_leaf->shared_cpu_map)); in cache_shared_cpu_map_setup()
801 struct _cpuid4_info *this_leaf, *sibling_leaf; in cache_remove_shared_cpu_map() local
804 this_leaf = CPUID4_INFO_IDX(cpu, index); in cache_remove_shared_cpu_map()
805 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { in cache_remove_shared_cpu_map()
838 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); in get_cpu_leaves() local
840 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); in get_cpu_leaves()
891 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
894 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
903 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, in show_size() argument
906 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); in show_size()
909 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, in show_shared_cpu_map_func() argument
918 mask = to_cpumask(this_leaf->shared_cpu_map); in show_shared_cpu_map_func()
940 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, in show_type() argument
943 switch (this_leaf->base.eax.split.type) { in show_type()
1024 struct _index_kobject *this_leaf = to_object(kobj); in show() local
1028 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), in show()
1029 buf, this_leaf->cpu) : in show()
1038 struct _index_kobject *this_leaf = to_object(kobj); in store() local
1042 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), in store()
1043 buf, count, this_leaf->cpu) : in store()
1108 struct _cpuid4_info *this_leaf; in cache_add_dev() local
1128 this_leaf = CPUID4_INFO_IDX(cpu, i); in cache_add_dev()
1132 if (this_leaf->base.nb) in cache_add_dev()