Home
last modified time | relevance | path

Searched refs:cu_info (Results 1 – 22 of 22) sorted by relevance

/drivers/gpu/drm/amd/amdkfd/
Dkfd_mqd_manager.c100 struct kfd_cu_info cu_info; in mqd_symmetrically_map_cu_mask() local
104 amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info); in mqd_symmetrically_map_cu_mask()
106 if (cu_mask_count > cu_info.cu_active_number) in mqd_symmetrically_map_cu_mask()
107 cu_mask_count = cu_info.cu_active_number; in mqd_symmetrically_map_cu_mask()
109 for (se = 0; se < cu_info.num_shader_engines; se++) in mqd_symmetrically_map_cu_mask()
110 for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) in mqd_symmetrically_map_cu_mask()
111 cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]); in mqd_symmetrically_map_cu_mask()
127 if (se == cu_info.num_shader_engines) { in mqd_symmetrically_map_cu_mask()
Dkfd_crat.c546 struct kfd_cu_info *cu_info, in fill_in_pcache() argument
612 struct kfd_cu_info *cu_info, in kfd_fill_gpu_cache_info() argument
696 for (i = 0; i < cu_info->num_shader_engines; i++) { in kfd_fill_gpu_cache_info()
697 for (j = 0; j < cu_info->num_shader_arrays_per_engine; in kfd_fill_gpu_cache_info()
699 for (k = 0; k < cu_info->num_cu_per_sh; in kfd_fill_gpu_cache_info()
704 cu_info, in kfd_fill_gpu_cache_info()
706 cu_info->cu_bitmap[i][j], in kfd_fill_gpu_cache_info()
1146 struct kfd_cu_info cu_info; in kfd_create_vcrat_image_gpu() local
1192 amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info); in kfd_create_vcrat_image_gpu()
1193 cu->num_simd_per_cu = cu_info.simd_per_cu; in kfd_create_vcrat_image_gpu()
[all …]
Dkfd_topology.c1194 struct kfd_cu_info cu_info; in kfd_topology_add_device() local
1268 amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info); in kfd_topology_add_device()
1274 cu_info.num_shader_arrays_per_engine; in kfd_topology_add_device()
1347 cu_info.simd_per_cu * cu_info.cu_active_number; in kfd_topology_add_device()
/drivers/gpu/drm/amd/amdgpu/
Damdgpu_amdkfd.c479 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) in amdgpu_amdkfd_get_cu_info() argument
482 struct amdgpu_cu_info acu_info = adev->gfx.cu_info; in amdgpu_amdkfd_get_cu_info()
484 memset(cu_info, 0, sizeof(*cu_info)); in amdgpu_amdkfd_get_cu_info()
485 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap)) in amdgpu_amdkfd_get_cu_info()
488 cu_info->cu_active_number = acu_info.number; in amdgpu_amdkfd_get_cu_info()
489 cu_info->cu_ao_mask = acu_info.ao_cu_mask; in amdgpu_amdkfd_get_cu_info()
490 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0], in amdgpu_amdkfd_get_cu_info()
492 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines; in amdgpu_amdkfd_get_cu_info()
493 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; in amdgpu_amdkfd_get_cu_info()
494 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh; in amdgpu_amdkfd_get_cu_info()
[all …]
Damdgpu_discovery.c406 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size); in amdgpu_discovery_get_gfx_info()
407 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd); in amdgpu_discovery_get_gfx_info()
408 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu); in amdgpu_discovery_get_gfx_info()
409 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size); in amdgpu_discovery_get_gfx_info()
Damdgpu_atomfirmware.c455 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size); in amdgpu_atomfirmware_get_gfx_info()
456 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); in amdgpu_atomfirmware_get_gfx_info()
457 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu; in amdgpu_atomfirmware_get_gfx_info()
458 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); in amdgpu_atomfirmware_get_gfx_info()
Dgfx_v9_0.h32 int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
Damdgpu_kms.c723 dev_info.cu_active_number = adev->gfx.cu_info.number; in amdgpu_info_ioctl()
724 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; in amdgpu_info_ioctl()
726 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], in amdgpu_info_ioctl()
727 sizeof(adev->gfx.cu_info.ao_cu_bitmap)); in amdgpu_info_ioctl()
728 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], in amdgpu_info_ioctl()
729 sizeof(adev->gfx.cu_info.bitmap)); in amdgpu_info_ioctl()
746 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; in amdgpu_info_ioctl()
Dgfx_v7_0.c3841 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); in gfx_v7_0_init_ao_cu_mask()
3845 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); in gfx_v7_0_init_ao_cu_mask()
5110 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; in gfx_v7_0_get_cu_info() local
5119 memset(cu_info, 0, sizeof(*cu_info)); in gfx_v7_0_get_cu_info()
5134 cu_info->bitmap[i][j] = bitmap; in gfx_v7_0_get_cu_info()
5147 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; in gfx_v7_0_get_cu_info()
5153 cu_info->number = active_cu_number; in gfx_v7_0_get_cu_info()
5154 cu_info->ao_cu_mask = ao_cu_mask; in gfx_v7_0_get_cu_info()
5155 cu_info->simd_per_cu = NUM_SIMD_PER_CU; in gfx_v7_0_get_cu_info()
5156 cu_info->max_waves_per_simd = 10; in gfx_v7_0_get_cu_info()
[all …]
Dgfx_v6_0.c2788 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); in gfx_v6_0_init_ao_cu_mask()
2792 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); in gfx_v6_0_init_ao_cu_mask()
3577 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; in gfx_v6_0_get_cu_info() local
3586 memset(cu_info, 0, sizeof(*cu_info)); in gfx_v6_0_get_cu_info()
3601 cu_info->bitmap[i][j] = bitmap; in gfx_v6_0_get_cu_info()
3614 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; in gfx_v6_0_get_cu_info()
3621 cu_info->number = active_cu_number; in gfx_v6_0_get_cu_info()
3622 cu_info->ao_cu_mask = ao_cu_mask; in gfx_v6_0_get_cu_info()
Damdgpu_amdkfd.h170 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
Damdgpu_gfx.h308 struct amdgpu_cu_info cu_info; member
Dgfx_v9_0.c730 struct amdgpu_cu_info *cu_info);
1488 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; in gfx_v9_0_init_always_on_cu_mask() local
1510 if (cu_info->bitmap[i][j] & mask) { in gfx_v9_0_init_always_on_cu_mask()
1523 cu_info->ao_cu_bitmap[i][j] = cu_bitmap; in gfx_v9_0_init_always_on_cu_mask()
2507 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); in gfx_v9_0_constants_init()
6452 struct amdgpu_cu_info *cu_info) in gfx_v9_0_get_cu_info() argument
6458 if (!adev || !cu_info) in gfx_v9_0_get_cu_info()
6495 cu_info->bitmap[i % 4][j + i / 4] = bitmap; in gfx_v9_0_get_cu_info()
6508 cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; in gfx_v9_0_get_cu_info()
6514 cu_info->number = active_cu_number; in gfx_v9_0_get_cu_info()
[all …]
Dgfx_v8_0.c4090 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); in gfx_v8_0_init_pg()
7132 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; in gfx_v8_0_get_cu_info() local
7136 memset(cu_info, 0, sizeof(*cu_info)); in gfx_v8_0_get_cu_info()
7156 cu_info->bitmap[i][j] = bitmap; in gfx_v8_0_get_cu_info()
7169 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; in gfx_v8_0_get_cu_info()
7175 cu_info->number = active_cu_number; in gfx_v8_0_get_cu_info()
7176 cu_info->ao_cu_mask = ao_cu_mask; in gfx_v8_0_get_cu_info()
7177 cu_info->simd_per_cu = NUM_SIMD_PER_CU; in gfx_v8_0_get_cu_info()
7178 cu_info->max_waves_per_simd = 10; in gfx_v8_0_get_cu_info()
7179 cu_info->max_scratch_slots_per_cu = 32; in gfx_v8_0_get_cu_info()
[all …]
Dgfx_v10_0.c243 struct amdgpu_cu_info *cu_info);
1742 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info); in gfx_v10_0_constants_init()
5436 struct amdgpu_cu_info *cu_info) in gfx_v10_0_get_cu_info() argument
5442 if (!adev || !cu_info) in gfx_v10_0_get_cu_info()
5458 cu_info->bitmap[i][j] = bitmap; in gfx_v10_0_get_cu_info()
5471 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; in gfx_v10_0_get_cu_info()
5477 cu_info->number = active_cu_number; in gfx_v10_0_get_cu_info()
5478 cu_info->ao_cu_mask = ao_cu_mask; in gfx_v10_0_get_cu_info()
5479 cu_info->simd_per_cu = NUM_SIMD_PER_CU; in gfx_v10_0_get_cu_info()
Damdgpu_device.c1484 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); in amdgpu_device_parse_gpu_info_fw()
1485 adev->gfx.cu_info.max_waves_per_simd = in amdgpu_device_parse_gpu_info_fw()
1487 adev->gfx.cu_info.max_scratch_slots_per_cu = in amdgpu_device_parse_gpu_info_fw()
1489 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); in amdgpu_device_parse_gpu_info_fw()
/drivers/gpu/drm/amd/powerplay/hwmgr/
Dsmu7_clockpowergating.c426 adev->gfx.cu_info.number); in smu7_powergate_gfx()
Dvega12_hwmgr.c425 data->total_active_cus = adev->gfx.cu_info.number; in vega12_hwmgr_backend_init()
Dvega20_hwmgr.c467 data->total_active_cus = adev->gfx.cu_info.number; in vega20_hwmgr_backend_init()
Dvega10_hwmgr.c916 data->total_active_cus = adev->gfx.cu_info.number; in vega10_hwmgr_backend_init()
/drivers/net/ethernet/mellanox/mlxsw/
Dspectrum.c6363 struct netdev_notifier_changeupper_info *cu_info; in mlxsw_sp_netdevice_vxlan_event() local
6372 cu_info = container_of(info, in mlxsw_sp_netdevice_vxlan_event()
6375 upper_dev = cu_info->upper_dev; in mlxsw_sp_netdevice_vxlan_event()
6382 if (cu_info->linking) { in mlxsw_sp_netdevice_vxlan_event()
/drivers/gpu/drm/amd/powerplay/smumgr/
Dvegam_smumgr.c1918 adev->gfx.cu_info.number); in vegam_enable_reconfig_cus()