/third_party/mesa3d/src/freedreno/vulkan/ |
D | tu_cs.c | 14 tu_cs_init(struct tu_cs *cs, in tu_cs_init() argument 21 memset(cs, 0, sizeof(*cs)); in tu_cs_init() 23 cs->device = device; in tu_cs_init() 24 cs->mode = mode; in tu_cs_init() 25 cs->next_bo_size = initial_size; in tu_cs_init() 32 tu_cs_init_external(struct tu_cs *cs, struct tu_device *device, in tu_cs_init_external() argument 35 memset(cs, 0, sizeof(*cs)); in tu_cs_init_external() 37 cs->device = device; in tu_cs_init_external() 38 cs->mode = TU_CS_MODE_EXTERNAL; in tu_cs_init_external() 39 cs->start = cs->reserved_end = cs->cur = start; in tu_cs_init_external() [all …]
|
D | tu_cs.h | 111 tu_cs_init(struct tu_cs *cs, 117 tu_cs_init_external(struct tu_cs *cs, struct tu_device *device, 121 tu_cs_init_suballoc(struct tu_cs *cs, struct tu_device *device, 125 tu_cs_finish(struct tu_cs *cs); 128 tu_cs_begin(struct tu_cs *cs); 131 tu_cs_end(struct tu_cs *cs); 134 tu_cs_begin_sub_stream(struct tu_cs *cs, uint32_t size, struct tu_cs *sub_cs); 137 tu_cs_alloc(struct tu_cs *cs, 143 tu_cs_end_sub_stream(struct tu_cs *cs, struct tu_cs *sub_cs); 146 tu_cs_end_draw_state(struct tu_cs *cs, struct tu_cs *sub_cs) in tu_cs_end_draw_state() argument [all …]
|
D | tu_query.c | 586 struct tu_cs *cs, in copy_query_value_gpu() argument 595 tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5); in copy_query_value_gpu() 598 tu_cs_emit(cs, mem_to_mem_flags); in copy_query_value_gpu() 599 tu_cs_emit_qw(cs, write_iova); in copy_query_value_gpu() 600 tu_cs_emit_qw(cs, src_iova); in copy_query_value_gpu() 605 struct tu_cs *cs, in emit_copy_query_pool_results() argument 623 tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0); in emit_copy_query_pool_results() 635 tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6); in emit_copy_query_pool_results() 636 tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) | in emit_copy_query_pool_results() 638 tu_cs_emit_qw(cs, available_iova); in emit_copy_query_pool_results() [all …]
|
D | tu_clear_blit.c | 68 r2d_coords(struct tu_cs *cs, in r2d_coords() argument 73 tu_cs_emit_regs(cs, in r2d_coords() 80 tu_cs_emit_regs(cs, in r2d_coords() 88 r2d_clear_value(struct tu_cs *cs, enum pipe_format format, const VkClearValue *val) in r2d_clear_value() argument 143 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_SRC_SOLID_C0, 4); in r2d_clear_value() 144 tu_cs_emit_array(cs, clear_value, 4); in r2d_clear_value() 182 struct tu_cs *cs, in r2d_src() argument 200 tu_cs_emit_pkt4(cs, REG_A6XX_SP_PS_2D_SRC_INFO, 5); in r2d_src() 201 tu_cs_emit(cs, src_info); in r2d_src() 202 tu_cs_emit(cs, iview->SP_PS_2D_SRC_SIZE); in r2d_src() [all …]
|
D | tu_cmd_buffer.c | 22 struct tu_cs *cs, in tu6_emit_event_write() argument 39 tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1); in tu6_emit_event_write() 40 tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event)); in tu6_emit_event_write() 42 tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy)); in tu6_emit_event_write() 43 tu_cs_emit(cs, 0); in tu6_emit_event_write() 58 tu_cs_emit_regs(&cmd->cs, A6XX_PC_TESSFACTOR_ADDR(.qword = cmd->device->tess_bo->iova)); in tu6_lazy_emit_tessfactor_addr() 66 struct tu_cs *cs, in tu6_emit_flushes() argument 84 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_COLOR_TS); in tu6_emit_flushes() 87 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_DEPTH_TS); in tu6_emit_flushes() 89 tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_COLOR); in tu6_emit_flushes() [all …]
|
/third_party/mesa3d/src/amd/vulkan/ |
D | radv_cs.h | 35 radeon_check_space(struct radeon_winsys *ws, struct radeon_cmdbuf *cs, unsigned needed) in radeon_check_space() argument 37 if (cs->max_dw - cs->cdw < needed) in radeon_check_space() 38 ws->cs_grow(cs, needed); in radeon_check_space() 39 return cs->cdw + needed; in radeon_check_space() 43 radeon_set_config_reg_seq(struct radeon_cmdbuf *cs, unsigned reg, unsigned num) in radeon_set_config_reg_seq() argument 46 assert(cs->cdw + 2 + num <= cs->max_dw); in radeon_set_config_reg_seq() 48 radeon_emit(cs, PKT3(PKT3_SET_CONFIG_REG, num, 0)); in radeon_set_config_reg_seq() 49 radeon_emit(cs, (reg - SI_CONFIG_REG_OFFSET) >> 2); in radeon_set_config_reg_seq() 53 radeon_set_config_reg(struct radeon_cmdbuf *cs, unsigned reg, unsigned value) in radeon_set_config_reg() argument 55 radeon_set_config_reg_seq(cs, reg, 1); in radeon_set_config_reg() [all …]
|
D | si_cmd_buffer.c | 37 struct radeon_cmdbuf *cs, unsigned raster_config, in si_write_harvested_raster_configs() argument 50 radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX, in si_write_harvested_raster_configs() 54 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, in si_write_harvested_raster_configs() 57 radeon_set_context_reg(cs, R_028350_PA_SC_RASTER_CONFIG, raster_config_se[se]); in si_write_harvested_raster_configs() 62 radeon_set_config_reg(cs, R_00802C_GRBM_GFX_INDEX, in si_write_harvested_raster_configs() 66 radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, in si_write_harvested_raster_configs() 71 radeon_set_context_reg(cs, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1); in si_write_harvested_raster_configs() 75 si_emit_compute(struct radv_device *device, struct radeon_cmdbuf *cs) in si_emit_compute() argument 79 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3); in si_emit_compute() 80 radeon_emit(cs, 0); in si_emit_compute() [all …]
|
D | radv_sqtt.c | 64 radv_emit_wait_for_idle(struct radv_device *device, struct radeon_cmdbuf *cs, int family) in radv_emit_wait_for_idle() argument 68 cs, device->physical_device->rad_info.gfx_level, NULL, 0, in radv_emit_wait_for_idle() 79 radv_emit_thread_trace_start(struct radv_device *device, struct radeon_cmdbuf *cs, in radv_emit_thread_trace_start() argument 97 cs, R_030800_GRBM_GFX_INDEX, in radv_emit_thread_trace_start() 103 cs, R_008D04_SQ_THREAD_TRACE_BUF0_SIZE, in radv_emit_thread_trace_start() 106 radeon_set_privileged_config_reg(cs, R_008D00_SQ_THREAD_TRACE_BUF0_BASE, shifted_va); in radv_emit_thread_trace_start() 109 cs, R_008D14_SQ_THREAD_TRACE_MASK, in radv_emit_thread_trace_start() 130 radeon_set_privileged_config_reg(cs, R_008D18_SQ_THREAD_TRACE_TOKEN_MASK, in radv_emit_thread_trace_start() 134 radeon_set_privileged_config_reg(cs, R_008D1C_SQ_THREAD_TRACE_CTRL, in radv_emit_thread_trace_start() 138 radeon_set_uconfig_reg(cs, R_030CDC_SQ_THREAD_TRACE_BASE2, in radv_emit_thread_trace_start() [all …]
|
D | radv_perfcounter.c | 33 radv_perfcounter_emit_shaders(struct radeon_cmdbuf *cs, unsigned shaders) in radv_perfcounter_emit_shaders() argument 35 radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2); in radv_perfcounter_emit_shaders() 36 radeon_emit(cs, shaders & 0x7f); in radv_perfcounter_emit_shaders() 37 radeon_emit(cs, 0xffffffff); in radv_perfcounter_emit_shaders() 41 radv_emit_windowed_counters(struct radv_device *device, struct radeon_cmdbuf *cs, int family, in radv_emit_windowed_counters() argument 45 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); in radv_emit_windowed_counters() 46 radeon_emit(cs, EVENT_TYPE(enable ? V_028A90_PERFCOUNTER_START : V_028A90_PERFCOUNTER_STOP) | in radv_emit_windowed_counters() 50 radeon_set_sh_reg(cs, R_00B82C_COMPUTE_PERFCOUNT_ENABLE, S_00B82C_PERFCOUNT_ENABLE(enable)); in radv_emit_windowed_counters() 54 radv_perfcounter_emit_spm_reset(struct radeon_cmdbuf *cs) in radv_perfcounter_emit_spm_reset() argument 56 radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL, in radv_perfcounter_emit_spm_reset() [all …]
|
/third_party/flatbuffers/tests/FlatBuffers.Test/ |
D | FlatBuffers.Core.Test.csproj | 9 <Compile Remove="Properties\AssemblyInfo.cs" /> 23 <Compile Include="..\..\net\FlatBuffers\ByteBuffer.cs"> 24 <Link>FlatBuffers\ByteBuffer.cs</Link> 26 <Compile Include="..\..\net\FlatBuffers\ByteBufferUtil.cs"> 27 <Link>FlatBuffers\ByteBufferUtil.cs</Link> 29 <Compile Include="..\..\net\FlatBuffers\IFlatbufferObject.cs"> 30 <Link>FlatBuffers\IFlatbufferObject.cs</Link> 32 <Compile Include="..\..\net\FlatBuffers\Offset.cs"> 33 <Link>FlatBuffers\Offset.cs</Link> 35 <Compile Include="..\..\net\FlatBuffers\FlatBufferBuilder.cs"> [all …]
|
/third_party/mesa3d/src/gallium/winsys/amdgpu/drm/ |
D | amdgpu_cs.c | 254 struct amdgpu_cs *cs = amdgpu_cs(rcs); in amdgpu_cs_get_next_fence() local 257 if (cs->noop) in amdgpu_cs_get_next_fence() 260 if (cs->next_fence) { in amdgpu_cs_get_next_fence() 261 amdgpu_fence_reference(&fence, cs->next_fence); in amdgpu_cs_get_next_fence() 265 fence = amdgpu_fence_create(cs->ctx, in amdgpu_cs_get_next_fence() 266 cs->csc->ib[IB_MAIN].ip_type); in amdgpu_cs_get_next_fence() 270 amdgpu_fence_reference(&cs->next_fence, fence); in amdgpu_cs_get_next_fence() 421 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs) in amdgpu_cs_has_user_fence() argument 423 return cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD && in amdgpu_cs_has_user_fence() 424 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCE && in amdgpu_cs_has_user_fence() [all …]
|
/third_party/mesa3d/src/gallium/drivers/r600/ |
D | r600_cs.h | 45 struct radeon_cmdbuf *cs, in radeon_cs_memory_below_limit() argument 48 vram += (uint64_t)cs->used_vram_kb * 1024; in radeon_cs_memory_below_limit() 49 gtt += (uint64_t)cs->used_gart_kb * 1024; in radeon_cs_memory_below_limit() 76 &ring->cs, rbo->buf, in radeon_add_to_buffer_list() 106 !radeon_cs_memory_below_limit(rctx->screen, &ring->cs, in radeon_add_to_buffer_list_check_mem() 118 struct radeon_cmdbuf *cs = &ring->cs; in r600_emit_reloc() local 123 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); in r600_emit_reloc() 124 radeon_emit(cs, reloc); in r600_emit_reloc() 128 static inline void radeon_set_config_reg_seq(struct radeon_cmdbuf *cs, unsigned reg, unsigned num) in radeon_set_config_reg_seq() argument 131 assert(cs->current.cdw + 2 + num <= cs->current.max_dw); in radeon_set_config_reg_seq() [all …]
|
D | cayman_msaa.c | 144 static void cayman_emit_msaa_sample_locs(struct radeon_cmdbuf *cs, int nr_samples) in cayman_emit_msaa_sample_locs() argument 149 radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 0); in cayman_emit_msaa_sample_locs() 150 radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, 0); in cayman_emit_msaa_sample_locs() 151 radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, 0); in cayman_emit_msaa_sample_locs() 152 radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, 0); in cayman_emit_msaa_sample_locs() 155 radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_2x[0]); in cayman_emit_msaa_sample_locs() 156 radeon_set_context_reg(cs, CM_R_028C08_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0, eg_sample_locs_2x[1]); in cayman_emit_msaa_sample_locs() 157 radeon_set_context_reg(cs, CM_R_028C18_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0, eg_sample_locs_2x[2]); in cayman_emit_msaa_sample_locs() 158 radeon_set_context_reg(cs, CM_R_028C28_PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0, eg_sample_locs_2x[3]); in cayman_emit_msaa_sample_locs() 161 radeon_set_context_reg(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, eg_sample_locs_4x[0]); in cayman_emit_msaa_sample_locs() [all …]
|
D | r600_hw_context.c | 37 if (radeon_emitted(&ctx->b.dma.cs, 0)) in r600_need_cs_space() 40 if (!radeon_cs_memory_below_limit(ctx->b.screen, &ctx->b.gfx.cs, in r600_need_cs_space() 87 if (!ctx->b.ws->cs_check_space(&ctx->b.gfx.cs, num_dw)) { in r600_need_cs_space() 94 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs; in r600_flush_emit() local 125 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); in r600_flush_emit() 126 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); in r600_flush_emit() 130 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); in r600_flush_emit() 131 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); in r600_flush_emit() 138 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until); in r600_flush_emit() 144 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); in r600_flush_emit() [all …]
|
D | r600_streamout.c | 157 struct radeon_cmdbuf *cs = &rctx->gfx.cs; in r600_flush_vgt_streamout() local 167 radeon_set_config_reg(cs, reg_strmout_cntl, 0); in r600_flush_vgt_streamout() 169 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); in r600_flush_vgt_streamout() 170 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0)); in r600_flush_vgt_streamout() 172 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); in r600_flush_vgt_streamout() 173 radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */ in r600_flush_vgt_streamout() 174 radeon_emit(cs, reg_strmout_cntl >> 2); /* register */ in r600_flush_vgt_streamout() 175 radeon_emit(cs, 0); in r600_flush_vgt_streamout() 176 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */ in r600_flush_vgt_streamout() 177 radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */ in r600_flush_vgt_streamout() [all …]
|
/third_party/ffmpeg/libavutil/ |
D | camellia.c | 184 static void generate_round_keys(AVCAMELLIA *cs, uint64_t Kl[2], uint64_t Kr[2], uint64_t Ka[2], uin… in generate_round_keys() argument 192 cs->Kw[0] = Kl[0]; in generate_round_keys() 193 cs->Kw[1] = Kl[1]; in generate_round_keys() 194 if (cs->key_bits == 128) { in generate_round_keys() 197 cs->K[2*i] = d[0]; in generate_round_keys() 198 cs->K[2*i+1] = d[1]; in generate_round_keys() 201 cs->K[9] = d[1]; in generate_round_keys() 203 cs->Ke[0] = d[0]; in generate_round_keys() 204 cs->Ke[1] = d[1]; in generate_round_keys() 206 cs->Ke[2] = d[0]; in generate_round_keys() [all …]
|
D | twofish.c | 187 static uint32_t MDS_mul(AVTWOFISH *cs, uint32_t X) in MDS_mul() argument 189 …return cs->MDS1[(X) & 0xff] ^ cs->MDS2[((X) >> 8) & 0xff] ^ cs->MDS3[((X) >> 16) & 0xff] ^ cs->MDS… in MDS_mul() 192 static void precomputeMDS(AVTWOFISH *cs) in precomputeMDS() argument 198 tf_h0(y, cs->S, cs->ksize); in precomputeMDS() 199 …cs->MDS1[i] = ((uint32_t)y[0]) ^ ((uint32_t)MD1[y[0]] << 8) ^ ((uint32_t)MD2[y[0]] << 16) ^ ((uint… in precomputeMDS() 200 …cs->MDS2[i] = ((uint32_t)MD2[y[1]]) ^ ((uint32_t)MD2[y[1]] << 8) ^ ((uint32_t)MD1[y[1]] << 16) ^ (… in precomputeMDS() 201 …cs->MDS3[i] = ((uint32_t)MD1[y[2]]) ^ ((uint32_t)MD2[y[2]] << 8) ^ ((uint32_t)y[2] << 16) ^ ((uint… in precomputeMDS() 202 …cs->MDS4[i] = ((uint32_t)MD1[y[3]]) ^ ((uint32_t)y[3] << 8) ^ ((uint32_t)MD2[y[3]] << 16) ^ ((uint… in precomputeMDS() 206 static void twofish_encrypt(AVTWOFISH *cs, uint8_t *dst, const uint8_t *src) in twofish_encrypt() argument 210 P[0] = AV_RL32(src) ^ cs->K[0]; in twofish_encrypt() [all …]
|
/third_party/mesa3d/src/gallium/winsys/radeon/drm/ |
D | radeon_drm_cs.c | 132 csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array; in radeon_init_cs_context() 183 struct radeon_drm_cs *cs; in radeon_drm_cs_create() local 185 cs = CALLOC_STRUCT(radeon_drm_cs); in radeon_drm_cs_create() 186 if (!cs) { in radeon_drm_cs_create() 189 util_queue_fence_init(&cs->flush_completed); in radeon_drm_cs_create() 191 cs->ws = ws; in radeon_drm_cs_create() 192 cs->flush_cs = flush; in radeon_drm_cs_create() 193 cs->flush_data = flush_ctx; in radeon_drm_cs_create() 195 if (!radeon_init_cs_context(&cs->csc1, cs->ws)) { in radeon_drm_cs_create() 196 FREE(cs); in radeon_drm_cs_create() [all …]
|
/third_party/mesa3d/src/amd/vulkan/winsys/amdgpu/ |
D | radv_amdgpu_cs.c | 179 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs); in radv_amdgpu_cs_destroy() local 181 if (cs->ib_buffer) in radv_amdgpu_cs_destroy() 182 cs->ws->base.buffer_destroy(&cs->ws->base, cs->ib_buffer); in radv_amdgpu_cs_destroy() 184 free(cs->base.buf); in radv_amdgpu_cs_destroy() 186 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i) in radv_amdgpu_cs_destroy() 187 cs->ws->base.buffer_destroy(&cs->ws->base, cs->old_ib_buffers[i].bo); in radv_amdgpu_cs_destroy() 189 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) { in radv_amdgpu_cs_destroy() 190 free(cs->old_cs_buffers[i].buf); in radv_amdgpu_cs_destroy() 193 free(cs->old_cs_buffers); in radv_amdgpu_cs_destroy() 194 free(cs->old_ib_buffers); in radv_amdgpu_cs_destroy() [all …]
|
/third_party/libdrm/radeon/ |
D | radeon_cs.h | 68 extern int radeon_cs_begin(struct radeon_cs *cs, 72 extern int radeon_cs_end(struct radeon_cs *cs, 76 extern int radeon_cs_emit(struct radeon_cs *cs); 77 extern int radeon_cs_destroy(struct radeon_cs *cs); 78 extern int radeon_cs_erase(struct radeon_cs *cs); 79 extern int radeon_cs_need_flush(struct radeon_cs *cs); 80 extern void radeon_cs_print(struct radeon_cs *cs, FILE *file); 81 extern void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit); 82 extern void radeon_cs_space_set_flush(struct radeon_cs *cs, void (*fn)(void *), void *data); 83 extern int radeon_cs_write_reloc(struct radeon_cs *cs, [all …]
|
D | radeon_cs_gem.c | 74 struct drm_radeon_cs cs; member 172 static int cs_gem_write_reloc(struct radeon_cs_int *cs, in cs_gem_write_reloc() argument 179 struct cs_gem *csg = (struct cs_gem*)cs; in cs_gem_write_reloc() 201 if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) { in cs_gem_write_reloc() 205 for(i = cs->crelocs; i != 0;) { in cs_gem_write_reloc() 234 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000); in cs_gem_write_reloc() 235 radeon_cs_write_dword((struct radeon_cs *)cs, idx); in cs_gem_write_reloc() 255 cs->relocs = csg->relocs = tmp; in cs_gem_write_reloc() 269 atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id); in cs_gem_write_reloc() 270 cs->relocs_total_size += boi->size; in cs_gem_write_reloc() [all …]
|
/third_party/mesa3d/src/gallium/drivers/etnaviv/ |
D | etnaviv_state.c | 54 struct compiled_stencil_ref *cs = &ctx->stencil_ref; in etna_set_stencil_ref() local 59 cs->PE_STENCIL_CONFIG[i] = in etna_set_stencil_ref() 61 cs->PE_STENCIL_CONFIG_EXT[i] = in etna_set_stencil_ref() 135 struct compiled_framebuffer_state *cs = &ctx->framebuffer; in etna_set_framebuffer_state() local 160 cs->PE_COLOR_FORMAT = VIVS_PE_COLOR_FORMAT_FORMAT_EXT(fmt) | in etna_set_framebuffer_state() 163 cs->PE_COLOR_FORMAT = VIVS_PE_COLOR_FORMAT_FORMAT(fmt); in etna_set_framebuffer_state() 168 cs->PE_COLOR_FORMAT |= in etna_set_framebuffer_state() 173 cs->PE_COLOR_FORMAT |= COND(color_supertiled, VIVS_PE_COLOR_FORMAT_SUPER_TILED_NEW); in etna_set_framebuffer_state() 195 cs->PE_PIPE_COLOR_ADDR[i] = cbuf->reloc[i]; in etna_set_framebuffer_state() 196 cs->PE_PIPE_COLOR_ADDR[i].flags = ETNA_RELOC_READ | ETNA_RELOC_WRITE; in etna_set_framebuffer_state() [all …]
|
/third_party/mesa3d/src/amd/common/ |
D | ac_shadowed_regs.c | 1315 static void gfx9_emulate_clear_state(struct radeon_cmdbuf *cs, in gfx9_emulate_clear_state() argument 1941 set_context_reg_seq_array(cs, R_028000_DB_RENDER_CONTROL, SET(DbRenderControlGfx9)); in gfx9_emulate_clear_state() 1942 set_context_reg_seq_array(cs, R_0281E8_COHER_DEST_BASE_HI_0, SET(CoherDestBaseHi0Gfx9)); in gfx9_emulate_clear_state() 1943 set_context_reg_seq_array(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, in gfx9_emulate_clear_state() 1945 set_context_reg_seq_array(cs, R_028414_CB_BLEND_RED, SET(CbBlendRedGfx9)); in gfx9_emulate_clear_state() 1946 set_context_reg_seq_array(cs, R_028644_SPI_PS_INPUT_CNTL_0, SET(SpiPsInputCntl0Gfx9)); in gfx9_emulate_clear_state() 1947 set_context_reg_seq_array(cs, R_028754_SX_PS_DOWNCONVERT, SET(SxPsDownconvertGfx9)); in gfx9_emulate_clear_state() 1948 set_context_reg_seq_array(cs, R_028800_DB_DEPTH_CONTROL, SET(DbDepthControlGfx9)); in gfx9_emulate_clear_state() 1949 set_context_reg_seq_array(cs, R_028A00_PA_SU_POINT_SIZE, SET(PaSuPointSizeGfx9)); in gfx9_emulate_clear_state() 1950 set_context_reg_seq_array(cs, R_028A18_VGT_HOS_MAX_TESS_LEVEL, SET(VgtHosMaxTessLevelGfx9)); in gfx9_emulate_clear_state() [all …]
|
/third_party/lwip/src/netif/ppp/ |
D | vj.c | 164 struct cstate *cs = comp->last_cs->cs_next; in vj_compress_tcp() local 221 if (!ip4_addr_cmp(&ip->src, &cs->cs_ip.src) in vj_compress_tcp() 222 || !ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) in vj_compress_tcp() 223 || (*(struct vj_u32_t*)th).v != (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { in vj_compress_tcp() 240 lcs = cs; cs = cs->cs_next; in vj_compress_tcp() 242 if (ip4_addr_cmp(&ip->src, &cs->cs_ip.src) in vj_compress_tcp() 243 && ip4_addr_cmp(&ip->dest, &cs->cs_ip.dest) in vj_compress_tcp() 244 && (*(struct vj_u32_t*)th).v == (((struct vj_u32_t*)&cs->cs_ip)[IPH_HL(&cs->cs_ip)]).v) { in vj_compress_tcp() 247 } while (cs != lastcs); in vj_compress_tcp() 265 if (cs == lastcs) { in vj_compress_tcp() [all …]
|
/third_party/mesa3d/src/gallium/drivers/d3d12/ci/ |
D | d3d12-quick_shader.txt | 1 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-float-float-float: skip 2 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-int-int-int: skip 3 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-ivec2-ivec2-ivec2: skip 4 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-ivec3-ivec3-ivec3: skip 5 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-ivec4-ivec4-ivec4: skip 6 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-uint-uint-uint: skip 7 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-uvec2-uvec2-uvec2: skip 8 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-uvec3-uvec3-uvec3: skip 9 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-uvec4-uvec4-uvec4: skip 10 spec/amd_shader_trinary_minmax/execution/built-in-functions/cs-max3-vec2-vec2-vec2: skip [all …]
|