/external/mesa3d/src/gallium/drivers/radeonsi/ |
D | si_pipe.c | 174 struct si_context *sctx = (struct si_context *)context; in si_destroy_context() local 184 si_release_all_descriptors(sctx); in si_destroy_context() 186 if (sctx->chip_class >= GFX10 && sctx->has_graphics) in si_destroy_context() 187 gfx10_destroy_query(sctx); in si_destroy_context() 189 pipe_resource_reference(&sctx->esgs_ring, NULL); in si_destroy_context() 190 pipe_resource_reference(&sctx->gsvs_ring, NULL); in si_destroy_context() 191 pipe_resource_reference(&sctx->tess_rings, NULL); in si_destroy_context() 192 pipe_resource_reference(&sctx->tess_rings_tmz, NULL); in si_destroy_context() 193 pipe_resource_reference(&sctx->null_const_buf.buffer, NULL); in si_destroy_context() 194 pipe_resource_reference(&sctx->sample_pos_buffer, NULL); in si_destroy_context() [all …]
|
D | si_cp_dma.c | 42 static inline unsigned cp_dma_max_byte_count(struct si_context *sctx) in cp_dma_max_byte_count() argument 45 sctx->chip_class >= GFX9 ? S_414_BYTE_COUNT_GFX9(~0u) : S_414_BYTE_COUNT_GFX6(~0u); in cp_dma_max_byte_count() 55 static void si_emit_cp_dma(struct si_context *sctx, struct radeon_cmdbuf *cs, uint64_t dst_va, in si_emit_cp_dma() argument 61 assert(size <= cp_dma_max_byte_count(sctx)); in si_emit_cp_dma() 62 assert(sctx->chip_class != GFX6 || cache_policy == L2_BYPASS); in si_emit_cp_dma() 64 if (sctx->chip_class >= GFX9) in si_emit_cp_dma() 73 if (sctx->chip_class >= GFX9) in si_emit_cp_dma() 83 if (sctx->chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) && src_va == dst_va) { in si_emit_cp_dma() 89 } else if (sctx->chip_class >= GFX7 && cache_policy != L2_BYPASS) { in si_emit_cp_dma() 100 } else if (sctx->chip_class >= GFX7 && cache_policy != L2_BYPASS) { in si_emit_cp_dma() [all …]
|
D | si_state_streamout.c | 29 static void si_set_streamout_enable(struct si_context *sctx, bool enable); 42 struct si_context *sctx = (struct si_context *)ctx; in si_create_so_target() local 51 unsigned buf_filled_size_size = sctx->screen->use_ngg_streamout ? 8 : 4; in si_create_so_target() 52 u_suballocator_alloc(sctx->allocator_zeroed_memory, buf_filled_size_size, 4, in si_create_so_target() 77 void si_streamout_buffers_dirty(struct si_context *sctx) in si_streamout_buffers_dirty() argument 79 if (!sctx->streamout.enabled_mask) in si_streamout_buffers_dirty() 82 si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_begin); in si_streamout_buffers_dirty() 83 si_set_streamout_enable(sctx, true); in si_streamout_buffers_dirty() 90 struct si_context *sctx = (struct si_context *)ctx; in si_set_streamout_targets() local 91 unsigned old_num_targets = sctx->streamout.num_targets; in si_set_streamout_targets() [all …]
|
D | si_state_draw.c | 68 static void si_emit_derived_tess_state(struct si_context *sctx, const struct pipe_draw_info *info, in si_emit_derived_tess_state() argument 71 struct radeon_cmdbuf *cs = sctx->gfx_cs; in si_emit_derived_tess_state() 77 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso; in si_emit_derived_tess_state() 78 unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id; in si_emit_derived_tess_state() 79 bool has_primid_instancing_bug = sctx->chip_class == GFX6 && sctx->screen->info.max_se == 1; in si_emit_derived_tess_state() 80 unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL]; in si_emit_derived_tess_state() 91 if (sctx->chip_class >= GFX9) { in si_emit_derived_tess_state() 92 if (sctx->tcs_shader.cso) in si_emit_derived_tess_state() 93 ls_current = sctx->tcs_shader.current; in si_emit_derived_tess_state() 95 ls_current = sctx->fixed_func_tcs_shader.current; in si_emit_derived_tess_state() [all …]
|
D | si_descriptors.c | 128 static bool si_upload_descriptors(struct si_context *sctx, struct si_descriptors *desc) in si_upload_descriptors() argument 150 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers); in si_upload_descriptors() 156 u_upload_alloc(sctx->b.const_uploader, first_slot_offset, upload_size, in si_upload_descriptors() 157 si_optimal_tcc_alignment(sctx, upload_size), &buffer_offset, in si_upload_descriptors() 167 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer, RADEON_USAGE_READ, in si_upload_descriptors() 175 assert((desc->buffer->gpu_address >> 32) == sctx->screen->info.address32_hi); in si_upload_descriptors() 176 assert((desc->gpu_address >> 32) == sctx->screen->info.address32_hi); in si_upload_descriptors() 178 si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers); in si_upload_descriptors() 183 si_add_descriptors_to_bo_list(struct si_context *sctx, struct si_descriptors *desc) in si_add_descriptors_to_bo_list() argument 188 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, desc->buffer, RADEON_USAGE_READ, in si_add_descriptors_to_bo_list() [all …]
|
D | si_blit.c | 44 void si_blitter_begin(struct si_context *sctx, enum si_blitter_op op) in si_blitter_begin() argument 46 util_blitter_save_vertex_shader(sctx->blitter, sctx->vs_shader.cso); in si_blitter_begin() 47 util_blitter_save_tessctrl_shader(sctx->blitter, sctx->tcs_shader.cso); in si_blitter_begin() 48 util_blitter_save_tesseval_shader(sctx->blitter, sctx->tes_shader.cso); in si_blitter_begin() 49 util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader.cso); in si_blitter_begin() 50 util_blitter_save_so_targets(sctx->blitter, sctx->streamout.num_targets, in si_blitter_begin() 51 (struct pipe_stream_output_target **)sctx->streamout.targets); in si_blitter_begin() 52 util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer); in si_blitter_begin() 55 util_blitter_save_blend(sctx->blitter, sctx->queued.named.blend); in si_blitter_begin() 56 util_blitter_save_depth_stencil_alpha(sctx->blitter, sctx->queued.named.dsa); in si_blitter_begin() [all …]
|
D | si_state_shaders.c | 560 static void si_emit_shader_es(struct si_context *sctx) in si_emit_shader_es() argument 562 struct si_shader *shader = sctx->queued.named.es->shader; in si_emit_shader_es() 563 unsigned initial_cdw = sctx->gfx_cs->current.cdw; in si_emit_shader_es() 568 radeon_opt_set_context_reg(sctx, R_028AAC_VGT_ESGS_RING_ITEMSIZE, in si_emit_shader_es() 573 radeon_opt_set_context_reg(sctx, R_028B6C_VGT_TF_PARAM, SI_TRACKED_VGT_TF_PARAM, in si_emit_shader_es() 577 radeon_opt_set_context_reg(sctx, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, in si_emit_shader_es() 581 if (initial_cdw != sctx->gfx_cs->current.cdw) in si_emit_shader_es() 582 sctx->context_roll = true; in si_emit_shader_es() 723 static void si_emit_shader_gs(struct si_context *sctx) in si_emit_shader_gs() argument 725 struct si_shader *shader = sctx->queued.named.gs->shader; in si_emit_shader_gs() [all …]
|
D | si_state_binning.c | 63 static struct uvec2 si_get_color_bin_size(struct si_context *sctx, unsigned cb_target_enabled_4bit) in si_get_color_bin_size() argument 65 unsigned num_fragments = sctx->framebuffer.nr_color_samples; in si_get_color_bin_size() 69 for (unsigned i = 0; i < sctx->framebuffer.state.nr_cbufs; i++) { in si_get_color_bin_size() 73 struct si_texture *tex = (struct si_texture *)sctx->framebuffer.state.cbufs[i]->texture; in si_get_color_bin_size() 79 if (si_get_ps_iter_samples(sctx) >= 2) in si_get_color_bin_size() 174 return si_find_bin_size(sctx->screen, table, sum); in si_get_color_bin_size() 177 static struct uvec2 si_get_depth_bin_size(struct si_context *sctx) in si_get_depth_bin_size() argument 179 struct si_state_dsa *dsa = sctx->queued.named.dsa; in si_get_depth_bin_size() 181 if (!sctx->framebuffer.state.zsbuf || (!dsa->depth_enabled && !dsa->stencil_enabled)) { in si_get_depth_bin_size() 187 struct si_texture *tex = (struct si_texture *)sctx->framebuffer.state.zsbuf->texture; in si_get_depth_bin_size() [all …]
|
D | si_compute.c | 224 struct si_context *sctx = (struct si_context *)ctx; in si_create_compute_state() local 251 sel->compiler_ctx_state.debug = sctx->debug; in si_create_compute_state() 252 sel->compiler_ctx_state.is_debug_context = sctx->is_debug; in si_create_compute_state() 255 si_schedule_initial_compile(sctx, MESA_SHADER_COMPUTE, &sel->ready, &sel->compiler_ctx_state, in si_create_compute_state() 272 si_shader_dump(sctx->screen, &program->shader, &sctx->debug, stderr, true); in si_create_compute_state() 273 if (!si_shader_binary_upload(sctx->screen, &program->shader, 0)) { in si_create_compute_state() 286 struct si_context *sctx = (struct si_context *)ctx; in si_bind_compute_state() local 290 sctx->cs_shader_state.program = program; in si_bind_compute_state() 298 si_set_active_descriptors(sctx, in si_bind_compute_state() 301 si_set_active_descriptors(sctx, SI_DESCS_FIRST_COMPUTE + SI_SHADER_DESCS_SAMPLERS_AND_IMAGES, in si_bind_compute_state() [all …]
|
D | si_cp_reg_shadowing.c | 69 si_create_shadowing_ib_preamble(struct si_context *sctx) in si_create_shadowing_ib_preamble() argument 73 if (sctx->chip_class == GFX10) { in si_create_shadowing_ib_preamble() 79 if (sctx->screen->dpbb_allowed) { in si_create_shadowing_ib_preamble() 92 if (sctx->chip_class >= GFX10) { in si_create_shadowing_ib_preamble() 106 } else if (sctx->chip_class == GFX9) { in si_create_shadowing_ib_preamble() 142 si_build_load_reg(sctx->screen, pm4, i, sctx->shadowed_regs); in si_create_shadowing_ib_preamble() 147 void si_init_cp_reg_shadowing(struct si_context *sctx) in si_init_cp_reg_shadowing() argument 149 if (sctx->screen->info.mid_command_buffer_preemption_enabled || in si_init_cp_reg_shadowing() 150 sctx->screen->debug_flags & DBG(SHADOW_REGS)) { in si_init_cp_reg_shadowing() 151 sctx->shadowed_regs = in si_init_cp_reg_shadowing() [all …]
|
D | si_state.c | 69 static void si_emit_cb_render_state(struct si_context *sctx) in si_emit_cb_render_state() argument 71 struct radeon_cmdbuf *cs = sctx->gfx_cs; in si_emit_cb_render_state() 72 struct si_state_blend *blend = sctx->queued.named.blend; in si_emit_cb_render_state() 75 uint32_t cb_target_mask = sctx->framebuffer.colorbuf_enabled_4bit & blend->cb_target_mask; in si_emit_cb_render_state() 84 if (blend->dual_src_blend && sctx->ps_shader.cso && in si_emit_cb_render_state() 85 (sctx->ps_shader.cso->info.colors_written & 0x3) != 0x3) in si_emit_cb_render_state() 91 if (sctx->screen->dpbb_allowed && sctx->last_cb_target_mask != cb_target_mask) { in si_emit_cb_render_state() 92 sctx->last_cb_target_mask = cb_target_mask; in si_emit_cb_render_state() 99 radeon_opt_set_context_reg(sctx, R_028238_CB_TARGET_MASK, SI_TRACKED_CB_TARGET_MASK, in si_emit_cb_render_state() 102 if (sctx->chip_class >= GFX8) { in si_emit_cb_render_state() [all …]
|
D | si_query.c | 60 static void si_query_sw_destroy(struct si_context *sctx, struct si_query *squery) in si_query_sw_destroy() argument 64 sctx->b.screen->fence_reference(sctx->b.screen, &query->fence, NULL); in si_query_sw_destroy() 116 static int64_t si_finish_dma_get_cpu_time(struct si_context *sctx) in si_finish_dma_get_cpu_time() argument 120 si_flush_dma_cs(sctx, 0, &fence); in si_finish_dma_get_cpu_time() 122 sctx->ws->fence_wait(sctx->ws, fence, PIPE_TIMEOUT_INFINITE); in si_finish_dma_get_cpu_time() 123 sctx->ws->fence_reference(&fence, NULL); in si_finish_dma_get_cpu_time() 129 static bool si_query_sw_begin(struct si_context *sctx, struct si_query *squery) in si_query_sw_begin() argument 139 query->begin_result = si_finish_dma_get_cpu_time(sctx); in si_query_sw_begin() 142 query->begin_result = sctx->num_draw_calls; in si_query_sw_begin() 145 query->begin_result = sctx->num_decompress_calls; in si_query_sw_begin() [all …]
|
D | gfx10_query.c | 80 static void emit_shader_query(struct si_context *sctx) in emit_shader_query() argument 82 assert(!list_is_empty(&sctx->shader_query_buffers)); in emit_shader_query() 85 list_last_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list); in emit_shader_query() 89 static void gfx10_release_query_buffers(struct si_context *sctx, in gfx10_release_query_buffers() argument 104 if (qbuf->list.next == &sctx->shader_query_buffers) in gfx10_release_query_buffers() 106 if (qbuf->list.prev == &sctx->shader_query_buffers) in gfx10_release_query_buffers() 115 static bool gfx10_alloc_query_buffer(struct si_context *sctx) in gfx10_alloc_query_buffer() argument 117 if (si_is_atom_dirty(sctx, &sctx->atoms.s.shader_query)) in gfx10_alloc_query_buffer() 122 if (!list_is_empty(&sctx->shader_query_buffers)) { in gfx10_alloc_query_buffer() 123 qbuf = list_last_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list); in gfx10_alloc_query_buffer() [all …]
|
D | si_compute_blit.c | 33 static enum si_cache_policy get_cache_policy(struct si_context *sctx, enum si_coherency coher, in get_cache_policy() argument 36 if ((sctx->chip_class >= GFX9 && (coher == SI_COHERENCY_CB_META || in get_cache_policy() 39 (sctx->chip_class >= GFX7 && coher == SI_COHERENCY_SHADER)) in get_cache_policy() 45 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher, in si_get_flush_flags() argument 67 static void si_launch_grid_internal(struct si_context *sctx, struct pipe_grid_info *info, in si_launch_grid_internal() argument 71 sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | SI_CONTEXT_PS_PARTIAL_FLUSH; in si_launch_grid_internal() 74 sctx->flags |= SI_CONTEXT_INV_VCACHE; in si_launch_grid_internal() 77 sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS; in si_launch_grid_internal() 78 sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS; in si_launch_grid_internal() 81 sctx->render_cond_force_off = true; in si_launch_grid_internal() [all …]
|
D | si_state.h | 466 #define si_pm4_state_changed(sctx, member) \ argument 467 ((sctx)->queued.named.member != (sctx)->emitted.named.member) 469 #define si_pm4_state_enabled_and_changed(sctx, member) \ argument 470 ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member)) 472 #define si_pm4_bind_state(sctx, member, value) \ argument 474 (sctx)->queued.named.member = (value); \ 475 (sctx)->dirty_states |= SI_STATE_BIT(member); \ 478 #define si_pm4_delete_state(sctx, member, value) \ argument 480 if ((sctx)->queued.named.member == (value)) { \ 481 (sctx)->queued.named.member = NULL; \ [all …]
|
D | si_pipe.h | 1288 void cik_init_sdma_functions(struct si_context *sctx); 1299 void si_blitter_begin(struct si_context *sctx, enum si_blitter_op op); 1300 void si_blitter_end(struct si_context *sctx); 1301 void si_init_blit_functions(struct si_context *sctx); 1302 void si_decompress_textures(struct si_context *sctx, unsigned shader_mask); 1309 void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex); 1312 bool si_rings_is_buffer_referenced(struct si_context *sctx, struct pb_buffer *buf, 1314 void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource *resource, 1326 void si_init_buffer_functions(struct si_context *sctx); 1331 bool vi_dcc_clear_level(struct si_context *sctx, struct si_texture *tex, unsigned level, [all …]
|
D | si_build_pm4.h | 149 static inline void radeon_opt_set_context_reg_rmw(struct si_context *sctx, unsigned offset, in radeon_opt_set_context_reg_rmw() argument 153 struct radeon_cmdbuf *cs = sctx->gfx_cs; in radeon_opt_set_context_reg_rmw() 158 if (((sctx->tracked_regs.reg_saved >> reg) & 0x1) != 0x1 || in radeon_opt_set_context_reg_rmw() 159 sctx->tracked_regs.reg_value[reg] != value) { in radeon_opt_set_context_reg_rmw() 162 sctx->tracked_regs.reg_saved |= 0x1ull << reg; in radeon_opt_set_context_reg_rmw() 163 sctx->tracked_regs.reg_value[reg] = value; in radeon_opt_set_context_reg_rmw() 168 static inline void radeon_opt_set_context_reg(struct si_context *sctx, unsigned offset, in radeon_opt_set_context_reg() argument 171 struct radeon_cmdbuf *cs = sctx->gfx_cs; in radeon_opt_set_context_reg() 173 if (((sctx->tracked_regs.reg_saved >> reg) & 0x1) != 0x1 || in radeon_opt_set_context_reg() 174 sctx->tracked_regs.reg_value[reg] != value) { in radeon_opt_set_context_reg() [all …]
|
D | si_compute_prim_discard.c | 837 static bool si_shader_select_prim_discard_cs(struct si_context *sctx, in si_shader_select_prim_discard_cs() argument 841 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; in si_shader_select_prim_discard_cs() 849 si_shader_selector_key_vs(sctx, sctx->vs_shader.cso, &key, &key.part.vs.prolog); in si_shader_select_prim_discard_cs() 864 struct si_shader_selector *ps = sctx->ps_shader.cso; in si_shader_select_prim_discard_cs() 877 key.opt.cs_cull_front = sctx->viewports.y_inverted ? rs->cull_back : rs->cull_front; in si_shader_select_prim_discard_cs() 878 key.opt.cs_cull_back = sctx->viewports.y_inverted ? rs->cull_front : rs->cull_back; in si_shader_select_prim_discard_cs() 886 sctx->cs_prim_discard_state.cso = sctx->vs_shader.cso; in si_shader_select_prim_discard_cs() 887 sctx->cs_prim_discard_state.current = NULL; in si_shader_select_prim_discard_cs() 889 if (!sctx->compiler.passes) in si_shader_select_prim_discard_cs() 890 si_init_compiler(sctx->screen, &sctx->compiler); in si_shader_select_prim_discard_cs() [all …]
|
D | si_clear.c | 216 bool vi_dcc_clear_level(struct si_context *sctx, struct si_texture *tex, unsigned level, in vi_dcc_clear_level() argument 232 if (sctx->chip_class >= GFX9) { in vi_dcc_clear_level() 261 si_clear_buffer(sctx, dcc_buffer, dcc_offset, clear_size, &clear_value, 4, SI_COHERENCY_CB_META, in vi_dcc_clear_level() 373 static void si_do_fast_color_clear(struct si_context *sctx, unsigned *buffers, in si_do_fast_color_clear() argument 376 struct pipe_framebuffer_state *fb = &sctx->framebuffer.state; in si_do_fast_color_clear() 384 if (sctx->render_cond) in si_do_fast_color_clear() 409 if (sctx->chip_class >= GFX9 && tex->buffer.b.b.last_level > 0) in si_do_fast_color_clear() 431 if (sctx->chip_class <= GFX8 && tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D && in si_do_fast_color_clear() 432 !sctx->screen->info.htile_cmask_support_1d_tiling) in si_do_fast_color_clear() 449 if (sctx->family == CHIP_STONEY && !too_small) { in si_do_fast_color_clear() [all …]
|
D | si_debug.c | 38 static void si_dump_bo_list(struct si_context *sctx, const struct radeon_saved_cs *saved, FILE *f); 294 static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f, unsigned offset) in si_dump_mmapped_reg() argument 296 struct radeon_winsys *ws = sctx->ws; in si_dump_mmapped_reg() 300 ac_dump_reg(f, sctx->chip_class, offset, value, ~0); in si_dump_mmapped_reg() 303 static void si_dump_debug_registers(struct si_context *sctx, FILE *f) in si_dump_debug_registers() argument 305 if (!sctx->screen->info.has_read_registers_query) in si_dump_debug_registers() 309 si_dump_mmapped_reg(sctx, f, R_008010_GRBM_STATUS); in si_dump_debug_registers() 312 if (!sctx->screen->info.is_amdgpu || sctx->screen->info.drm_minor < 1) { in si_dump_debug_registers() 317 si_dump_mmapped_reg(sctx, f, R_008008_GRBM_STATUS2); in si_dump_debug_registers() 318 si_dump_mmapped_reg(sctx, f, R_008014_GRBM_STATUS_SE0); in si_dump_debug_registers() [all …]
|
D | si_buffer.c | 33 bool si_rings_is_buffer_referenced(struct si_context *sctx, struct pb_buffer *buf, in si_rings_is_buffer_referenced() argument 36 if (sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs, buf, usage)) { in si_rings_is_buffer_referenced() 39 if (radeon_emitted(sctx->sdma_cs, 0) && in si_rings_is_buffer_referenced() 40 sctx->ws->cs_is_buffer_referenced(sctx->sdma_cs, buf, usage)) { in si_rings_is_buffer_referenced() 46 void *si_buffer_map_sync_with_rings(struct si_context *sctx, struct si_resource *resource, in si_buffer_map_sync_with_rings() argument 55 return sctx->ws->buffer_map(resource->buf, NULL, usage); in si_buffer_map_sync_with_rings() 63 if (radeon_emitted(sctx->gfx_cs, sctx->initial_gfx_cs_size) && in si_buffer_map_sync_with_rings() 64 sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs, resource->buf, rusage)) { in si_buffer_map_sync_with_rings() 66 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); in si_buffer_map_sync_with_rings() 69 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); in si_buffer_map_sync_with_rings() [all …]
|
D | si_pm4.c | 97 void si_pm4_free_state(struct si_context *sctx, struct si_pm4_state *state, unsigned idx) in si_pm4_free_state() argument 102 if (idx != ~0 && sctx->emitted.array[idx] == state) { in si_pm4_free_state() 103 sctx->emitted.array[idx] = NULL; in si_pm4_free_state() 110 void si_pm4_emit(struct si_context *sctx, struct si_pm4_state *state) in si_pm4_emit() argument 112 struct radeon_cmdbuf *cs = sctx->gfx_cs; in si_pm4_emit() 115 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, state->shader->bo, in si_pm4_emit() 122 state->atom.emit(sctx); in si_pm4_emit() 125 void si_pm4_reset_emitted(struct si_context *sctx, bool first_cs) in si_pm4_reset_emitted() argument 127 if (!first_cs && sctx->shadowed_regs) { in si_pm4_reset_emitted() 132 struct si_pm4_state *state = sctx->emitted.array[i]; in si_pm4_reset_emitted() [all …]
|
D | si_fence.c | 167 static void si_add_fence_dependency(struct si_context *sctx, struct pipe_fence_handle *fence) in si_add_fence_dependency() argument 169 struct radeon_winsys *ws = sctx->ws; in si_add_fence_dependency() 171 if (sctx->sdma_cs) in si_add_fence_dependency() 172 ws->cs_add_fence_dependency(sctx->sdma_cs, fence, 0); in si_add_fence_dependency() 173 ws->cs_add_fence_dependency(sctx->gfx_cs, fence, 0); in si_add_fence_dependency() 176 static void si_add_syncobj_signal(struct si_context *sctx, struct pipe_fence_handle *fence) in si_add_syncobj_signal() argument 178 sctx->ws->cs_add_syncobj_signal(sctx->gfx_cs, fence); in si_add_syncobj_signal() 269 struct si_context *sctx; in si_fence_finish() local 273 sctx = (struct si_context *)(ctx ? ctx : NULL); in si_fence_finish() 325 if (sctx && sfence->gfx_unflushed.ctx == sctx && in si_fence_finish() [all …]
|
D | si_dma_cs.c | 28 static void si_dma_emit_wait_idle(struct si_context *sctx) in si_dma_emit_wait_idle() argument 30 struct radeon_cmdbuf *cs = sctx->sdma_cs; in si_dma_emit_wait_idle() 33 if (sctx->chip_class >= GFX7) in si_dma_emit_wait_idle() 39 void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst, uint64_t offset) in si_dma_emit_timestamp() argument 41 struct radeon_cmdbuf *cs = sctx->sdma_cs; in si_dma_emit_timestamp() 44 if (sctx->chip_class == GFX6) { in si_dma_emit_timestamp() 56 si_need_dma_space(sctx, 4, dst, NULL); in si_dma_emit_timestamp() 57 si_dma_emit_wait_idle(sctx); in si_dma_emit_timestamp() 65 void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst, uint64_t offset, in si_sdma_clear_buffer() argument 68 struct radeon_cmdbuf *cs = sctx->sdma_cs; in si_sdma_clear_buffer() [all …]
|
D | cik_sdma.c | 35 static unsigned encode_tile_info(struct si_context *sctx, struct si_texture *tex, unsigned level, in encode_tile_info() argument 38 struct radeon_info *info = &sctx->screen->info; in encode_tile_info() 55 static bool si_sdma_v4_copy_texture(struct si_context *sctx, struct pipe_resource *dst, in si_sdma_v4_copy_texture() argument 85 if (!si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty, dstz, ssrc, src_level, src_box)) in si_sdma_v4_copy_texture() 97 struct radeon_cmdbuf *cs = sctx->sdma_cs; in si_sdma_v4_copy_texture() 105 si_need_dma_space(sctx, 13, &sdst->buffer, &ssrc->buffer); in si_sdma_v4_copy_texture() 116 sctx->ws->cs_is_secure(cs) ? (1u << 2) : 0) | in si_sdma_v4_copy_texture() 156 struct radeon_cmdbuf *cs = sctx->sdma_cs; in si_sdma_v4_copy_texture() 173 si_need_dma_space(sctx, 14, &sdst->buffer, &ssrc->buffer); in si_sdma_v4_copy_texture() 177 sctx->ws->cs_is_secure(cs) ? (1u << 2) : 0) | in si_sdma_v4_copy_texture() [all …]
|