/drivers/gpu/drm/radeon/ |
D | r600_cs.c | 972 struct radeon_bo_list *reloc; in r600_cs_check_reg() local 1017 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r600_cs_check_reg() 1023 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg() 1035 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); in r600_cs_check_reg() 1044 if (reloc->tiling_flags & RADEON_TILING_MACRO) { in r600_cs_check_reg() 1077 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); in r600_cs_check_reg() 1085 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg() 1086 track->vgt_strmout_bo[tmp] = reloc->robj; in r600_cs_check_reg() 1087 track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset; in r600_cs_check_reg() 1100 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm); in r600_cs_check_reg() [all …]
|
D | evergreen_cs.c | 1097 struct radeon_bo_list *reloc; in evergreen_cs_handle_reg() local 1143 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in evergreen_cs_handle_reg() 1149 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in evergreen_cs_handle_reg() 1172 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in evergreen_cs_handle_reg() 1180 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); in evergreen_cs_handle_reg() 1181 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); in evergreen_cs_handle_reg() 1182 if (reloc->tiling_flags & RADEON_TILING_MACRO) { in evergreen_cs_handle_reg() 1185 evergreen_tiling_fields(reloc->tiling_flags, in evergreen_cs_handle_reg() 1214 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in evergreen_cs_handle_reg() 1221 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in evergreen_cs_handle_reg() [all …]
|
D | r200.c | 149 struct radeon_bo_list *reloc; in r200_packet0_check() local 181 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r200_packet0_check() 188 track->zb.robj = reloc->robj; in r200_packet0_check() 191 ib[idx] = idx_value + ((u32)reloc->gpu_offset); in r200_packet0_check() 194 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r200_packet0_check() 201 track->cb[0].robj = reloc->robj; in r200_packet0_check() 204 ib[idx] = idx_value + ((u32)reloc->gpu_offset); in r200_packet0_check() 213 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r200_packet0_check() 221 if (reloc->tiling_flags & RADEON_TILING_MACRO) in r200_packet0_check() 223 if (reloc->tiling_flags & RADEON_TILING_MICRO) in r200_packet0_check() [all …]
|
D | r300.c | 635 struct radeon_bo_list *reloc; in r300_packet0_check() local 669 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r300_packet0_check() 676 track->cb[i].robj = reloc->robj; in r300_packet0_check() 679 ib[idx] = idx_value + ((u32)reloc->gpu_offset); in r300_packet0_check() 682 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r300_packet0_check() 689 track->zb.robj = reloc->robj; in r300_packet0_check() 692 ib[idx] = idx_value + ((u32)reloc->gpu_offset); in r300_packet0_check() 711 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r300_packet0_check() 721 ((idx_value & ~31) + (u32)reloc->gpu_offset); in r300_packet0_check() 723 if (reloc->tiling_flags & RADEON_TILING_MACRO) in r300_packet0_check() [all …]
|
D | r100.c | 1267 struct radeon_bo_list *reloc; in r100_reloc_pitch_offset() local 1270 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r100_reloc_pitch_offset() 1280 tmp += (((u32)reloc->gpu_offset) >> 10); in r100_reloc_pitch_offset() 1283 if (reloc->tiling_flags & RADEON_TILING_MACRO) in r100_reloc_pitch_offset() 1285 if (reloc->tiling_flags & RADEON_TILING_MICRO) { in r100_reloc_pitch_offset() 1306 struct radeon_bo_list *reloc; in r100_packet3_load_vbpntr() local 1323 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r100_packet3_load_vbpntr() 1331 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset); in r100_packet3_load_vbpntr() 1334 track->arrays[i + 0].robj = reloc->robj; in r100_packet3_load_vbpntr() 1336 r = radeon_cs_packet_next_reloc(p, &reloc, 0); in r100_packet3_load_vbpntr() [all …]
|
D | radeon_uvd.c | 577 struct radeon_bo_list *reloc; in radeon_uvd_cs_reloc() local 591 reloc = &p->relocs[(idx / 4)]; in radeon_uvd_cs_reloc() 592 start = reloc->gpu_offset; in radeon_uvd_cs_reloc() 593 end = start + radeon_bo_size(reloc->robj); in radeon_uvd_cs_reloc() 637 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); in radeon_uvd_cs_reloc()
|
D | radeon_vce.c | 474 struct radeon_bo_list *reloc; in radeon_vce_cs_reloc() local 488 reloc = &p->relocs[(idx / 4)]; in radeon_vce_cs_reloc() 489 start = reloc->gpu_offset; in radeon_vce_cs_reloc() 490 end = start + radeon_bo_size(reloc->robj); in radeon_vce_cs_reloc()
|
D | radeon_cs.c | 254 struct radeon_bo_list *reloc; in radeon_cs_sync_rings() local 257 list_for_each_entry(reloc, &p->validated, tv.head) { in radeon_cs_sync_rings() 260 resv = reloc->robj->tbo.base.resv; in radeon_cs_sync_rings() 262 reloc->tv.num_shared); in radeon_cs_sync_rings()
|
/drivers/gpu/host1x/ |
D | job.c | 108 struct host1x_reloc *reloc = &job->relocs[i]; in pin_job() local 112 reloc->target.bo = host1x_bo_get(reloc->target.bo); in pin_job() 113 if (!reloc->target.bo) { in pin_job() 118 phys_addr = host1x_bo_pin(reloc->target.bo, &sgt); in pin_job() 121 job->unpins[job->num_unpins].bo = reloc->target.bo; in pin_job() 196 struct host1x_reloc *reloc = &job->relocs[i]; in do_relocs() local 198 reloc->target.offset) >> reloc->shift; in do_relocs() 202 if (cmdbuf != reloc->cmdbuf.bo) in do_relocs() 207 reloc->cmdbuf.offset / sizeof(u32) + in do_relocs() 212 if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) { in do_relocs() [all …]
|
/drivers/gpu/drm/qxl/ |
D | qxl_ioctl.c | 209 struct drm_qxl_reloc reloc; in qxl_process_single_command() local 212 if (copy_from_user(&reloc, u + i, sizeof(reloc))) { in qxl_process_single_command() 219 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { in qxl_process_single_command() 220 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type); in qxl_process_single_command() 225 reloc_info[i].type = reloc.reloc_type; in qxl_process_single_command() 227 if (reloc.dst_handle) { in qxl_process_single_command() 228 ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release, in qxl_process_single_command() 232 reloc_info[i].dst_offset = reloc.dst_offset; in qxl_process_single_command() 235 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; in qxl_process_single_command() 240 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) { in qxl_process_single_command() [all …]
|
/drivers/gpu/drm/i915/gem/ |
D | i915_gem_execbuffer.c | 890 relocation_target(const struct drm_i915_gem_relocation_entry *reloc, in relocation_target() argument 893 return gen8_canonical_addr((int)reloc->delta + target->node.start); in relocation_target() 1257 const struct drm_i915_gem_relocation_entry *reloc, in relocate_entry() argument 1261 u64 offset = reloc->offset; in relocate_entry() 1262 u64 target_offset = relocation_target(reloc, target); in relocate_entry() 1348 const struct drm_i915_gem_relocation_entry *reloc) in eb_relocate_entry() argument 1354 target = eb_get_vma(eb, reloc->target_handle); in eb_relocate_entry() 1359 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { in eb_relocate_entry() 1363 reloc->target_handle, in eb_relocate_entry() 1364 (int) reloc->offset, in eb_relocate_entry() [all …]
|
/drivers/gpu/drm/i915/gt/ |
D | intel_renderstate.h | 32 const u32 *reloc; member 39 .reloc = gen ## _g ## _null_state_relocs, \
|
D | intel_renderstate.c | 95 if (i * 4 == rodata->reloc[reloc_index]) { in render_state_setup() 113 if (rodata->reloc[reloc_index] != -1) { in render_state_setup()
|
/drivers/gpu/drm/nouveau/ |
D | nouveau_gem.c | 610 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; in nouveau_gem_pushbuf_reloc_apply() local 614 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); in nouveau_gem_pushbuf_reloc_apply() 615 if (IS_ERR(reloc)) in nouveau_gem_pushbuf_reloc_apply() 616 return PTR_ERR(reloc); in nouveau_gem_pushbuf_reloc_apply() 619 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; in nouveau_gem_pushbuf_reloc_apply() 682 u_free(reloc); in nouveau_gem_pushbuf_reloc_apply()
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_execbuf.c | 1161 struct vmw_relocation *reloc; in vmw_translate_mob_ptr() local 1176 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); in vmw_translate_mob_ptr() 1177 if (!reloc) in vmw_translate_mob_ptr() 1180 reloc->mob_loc = id; in vmw_translate_mob_ptr() 1181 reloc->vbo = vmw_bo; in vmw_translate_mob_ptr() 1184 list_add_tail(&reloc->head, &sw_context->bo_relocations); in vmw_translate_mob_ptr() 1216 struct vmw_relocation *reloc; in vmw_translate_guest_ptr() local 1231 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc)); in vmw_translate_guest_ptr() 1232 if (!reloc) in vmw_translate_guest_ptr() 1235 reloc->location = ptr; in vmw_translate_guest_ptr() [all …]
|
/drivers/gpu/drm/tegra/ |
D | drm.c | 427 struct host1x_reloc *reloc; in tegra_drm_submit() local 436 reloc = &job->relocs[num_relocs]; in tegra_drm_submit() 437 obj = host1x_to_tegra_bo(reloc->cmdbuf.bo); in tegra_drm_submit() 445 if (reloc->cmdbuf.offset & 3 || in tegra_drm_submit() 446 reloc->cmdbuf.offset >= obj->gem.size) { in tegra_drm_submit() 451 obj = host1x_to_tegra_bo(reloc->target.bo); in tegra_drm_submit() 454 if (reloc->target.offset >= obj->gem.size) { in tegra_drm_submit()
|