/third_party/mesa3d/src/mesa/drivers/dri/i965/ |
D | brw_pipe_control.c | 37 brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags) in brw_emit_pipe_control_flush() argument 39 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_emit_pipe_control_flush() 55 brw_emit_end_of_pipe_sync(brw, (flags & PIPE_CONTROL_CACHE_FLUSH_BITS)); in brw_emit_pipe_control_flush() 59 brw->vtbl.emit_raw_pipe_control(brw, flags, NULL, 0, 0); in brw_emit_pipe_control_flush() 71 brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags, in brw_emit_pipe_control_write() argument 75 brw->vtbl.emit_raw_pipe_control(brw, flags, bo, offset, imm); in brw_emit_pipe_control_write() 91 brw_emit_depth_stall_flushes(struct brw_context *brw) in brw_emit_depth_stall_flushes() argument 93 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_emit_depth_stall_flushes() 105 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL); in brw_emit_depth_stall_flushes() 106 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_CACHE_FLUSH); in brw_emit_depth_stall_flushes() [all …]
|
D | brw_state_upload.c | 50 brw_enable_obj_preemption(struct brw_context *brw, bool enable) in brw_enable_obj_preemption() argument 52 ASSERTED const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_enable_obj_preemption() 55 if (enable == brw->object_preemption) in brw_enable_obj_preemption() 59 brw_emit_end_of_pipe_sync(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH); in brw_enable_obj_preemption() 65 brw_load_register_imm32(brw, CS_CHICKEN1, in brw_enable_obj_preemption() 68 brw->object_preemption = enable; in brw_enable_obj_preemption() 72 brw_upload_gfx11_slice_hashing_state(struct brw_context *brw) in brw_upload_gfx11_slice_hashing_state() argument 74 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_upload_gfx11_slice_hashing_state() 83 uint32_t *map = brw_state_batch(brw, size, 64, &hash_address); in brw_upload_gfx11_slice_hashing_state() 130 OUT_RELOC(brw->batch.state.bo, 0, hash_address | 1); in brw_upload_gfx11_slice_hashing_state() [all …]
|
D | brw_urb.c | 100 static bool check_urb_layout(struct brw_context *brw) in check_urb_layout() argument 102 brw->urb.vs_start = 0; in check_urb_layout() 103 brw->urb.gs_start = brw->urb.nr_vs_entries * brw->urb.vsize; in check_urb_layout() 104 brw->urb.clip_start = brw->urb.gs_start + brw->urb.nr_gs_entries * brw->urb.vsize; in check_urb_layout() 105 brw->urb.sf_start = brw->urb.clip_start + brw->urb.nr_clip_entries * brw->urb.vsize; in check_urb_layout() 106 brw->urb.cs_start = brw->urb.sf_start + brw->urb.nr_sf_entries * brw->urb.sfsize; in check_urb_layout() 108 return brw->urb.cs_start + brw->urb.nr_cs_entries * in check_urb_layout() 109 brw->urb.csize <= brw->urb.size; in check_urb_layout() 116 brw_calculate_urb_fence(struct brw_context *brw, unsigned csize, in brw_calculate_urb_fence() argument 119 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_calculate_urb_fence() [all …]
|
D | brw_draw.c | 82 brw_set_prim(struct brw_context *brw, const struct _mesa_prim *prim) in brw_set_prim() argument 84 struct gl_context *ctx = &brw->ctx; in brw_set_prim() 104 if (hw_prim != brw->primitive) { in brw_set_prim() 105 brw->primitive = hw_prim; in brw_set_prim() 106 brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE; in brw_set_prim() 108 if (reduced_prim[prim->mode] != brw->reduced_primitive) { in brw_set_prim() 109 brw->reduced_primitive = reduced_prim[prim->mode]; in brw_set_prim() 110 brw->ctx.NewDriverState |= BRW_NEW_REDUCED_PRIMITIVE; in brw_set_prim() 116 gfx6_set_prim(struct brw_context *brw, const struct _mesa_prim *prim) in gfx6_set_prim() argument 118 const struct gl_context *ctx = &brw->ctx; in gfx6_set_prim() [all …]
|
D | brw_binding_tables.c | 54 brw_upload_binding_table(struct brw_context *brw, in brw_upload_binding_table() argument 59 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_upload_binding_table() 71 brw, &stage_state->surf_offset[ in brw_upload_binding_table() 73 brw->shader_time.bo, 0, ISL_FORMAT_RAW, in brw_upload_binding_table() 74 brw->shader_time.bo->size, 1, RELOC_WRITE); in brw_upload_binding_table() 77 brw_state_batch(brw, prog_data->binding_table.size_bytes, in brw_upload_binding_table() 85 brw->ctx.NewDriverState |= BRW_NEW_BINDING_TABLE_POINTERS; in brw_upload_binding_table() 105 brw_vs_upload_binding_table(struct brw_context *brw) in brw_vs_upload_binding_table() argument 108 const struct brw_stage_prog_data *prog_data = brw->vs.base.prog_data; in brw_vs_upload_binding_table() 109 brw_upload_binding_table(brw, in brw_vs_upload_binding_table() [all …]
|
D | brw_curbe.c | 77 static void calculate_curbe_offsets( struct brw_context *brw ) in calculate_curbe_offsets() argument 79 struct gl_context *ctx = &brw->ctx; in calculate_curbe_offsets() 81 const GLuint nr_fp_regs = (brw->wm.base.prog_data->nr_params + 15) / 16; in calculate_curbe_offsets() 84 const GLuint nr_vp_regs = (brw->vs.base.prog_data->nr_params + 15) / 16; in calculate_curbe_offsets() 111 if (nr_fp_regs > brw->curbe.wm_size || in calculate_curbe_offsets() 112 nr_vp_regs > brw->curbe.vs_size || in calculate_curbe_offsets() 113 nr_clip_regs != brw->curbe.clip_size || in calculate_curbe_offsets() 114 (total_regs < brw->curbe.total_size / 4 && in calculate_curbe_offsets() 115 brw->curbe.total_size > 16)) { in calculate_curbe_offsets() 122 brw->curbe.wm_start = reg; in calculate_curbe_offsets() [all …]
|
D | genX_blorp_exec.c | 48 struct brw_context *brw = batch->driver_batch; in blorp_emit_dwords() local 50 brw_batch_begin(brw, n); in blorp_emit_dwords() 51 uint32_t *map = brw->batch.map_next; in blorp_emit_dwords() 52 brw->batch.map_next += n; in blorp_emit_dwords() 53 brw_batch_advance(brw); in blorp_emit_dwords() 62 struct brw_context *brw = batch->driver_batch; in blorp_emit_reloc() local 65 if (GFX_VER < 6 && brw_ptr_in_state_buffer(&brw->batch, location)) { in blorp_emit_reloc() 66 offset = (char *)location - (char *)brw->batch.state.map; in blorp_emit_reloc() 67 return brw_state_reloc(&brw->batch, offset, in blorp_emit_reloc() 72 assert(!brw_ptr_in_state_buffer(&brw->batch, location)); in blorp_emit_reloc() [all …]
|
D | brw_compute.c | 40 struct brw_context *brw = brw_context(ctx); in brw_dispatch_compute_common() local 49 brw_validate_textures(brw); in brw_dispatch_compute_common() 51 brw_predraw_resolve_inputs(brw, false, NULL); in brw_dispatch_compute_common() 56 brw_batch_require_space(brw, 600); in brw_dispatch_compute_common() 57 brw_require_statebuffer_space(brw, 2500); in brw_dispatch_compute_common() 58 brw_batch_save_state(brw); in brw_dispatch_compute_common() 59 fail_next = brw_batch_saved_state_is_empty(brw); in brw_dispatch_compute_common() 62 brw->batch.no_wrap = true; in brw_dispatch_compute_common() 63 brw_upload_compute_state(brw); in brw_dispatch_compute_common() 65 brw->vtbl.emit_compute_walker(brw); in brw_dispatch_compute_common() [all …]
|
D | brw_misc_state.c | 55 upload_pipelined_state_pointers(struct brw_context *brw) in upload_pipelined_state_pointers() argument 57 const struct intel_device_info *devinfo = &brw->screen->devinfo; in upload_pipelined_state_pointers() 68 OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset); in upload_pipelined_state_pointers() 69 if (brw->ff_gs.prog_active) in upload_pipelined_state_pointers() 70 OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1); in upload_pipelined_state_pointers() 73 OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1); in upload_pipelined_state_pointers() 74 OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset); in upload_pipelined_state_pointers() 75 OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset); in upload_pipelined_state_pointers() 76 OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset); in upload_pipelined_state_pointers() 79 brw->ctx.NewDriverState |= BRW_NEW_PSP; in upload_pipelined_state_pointers() [all …]
|
D | brw_state.h | 98 void gfx4_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 101 void gfx45_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 104 void gfx5_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 107 void gfx6_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 110 void gfx7_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 113 void gfx75_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 116 void gfx8_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 119 void gfx9_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 122 void gfx11_emit_raw_pipe_control(struct brw_context *brw, uint32_t flags, 127 brw_state_dirty(const struct brw_context *brw, in brw_state_dirty() argument [all …]
|
D | brw_wm_surface_state.c | 81 get_isl_surf(struct brw_context *brw, struct brw_mipmap_tree *mt, in get_isl_surf() argument 88 const struct intel_device_info *devinfo = &brw->screen->devinfo; in get_isl_surf() 137 brw_emit_surface_state(struct brw_context *brw, in brw_emit_surface_state() argument 144 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_emit_surface_state() 151 get_isl_surf(brw, mt, target, &view, &tile_x, &tile_y, &offset, &surf); in brw_emit_surface_state() 172 void *state = brw_state_batch(brw, in brw_emit_surface_state() 173 brw->isl_dev.ss.size, in brw_emit_surface_state() 174 brw->isl_dev.ss.align, in brw_emit_surface_state() 177 isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view, in brw_emit_surface_state() 178 .address = brw_state_reloc(&brw->batch, in brw_emit_surface_state() [all …]
|
D | brw_conditional_render.c | 40 set_predicate_enable(struct brw_context *brw, in set_predicate_enable() argument 44 brw->predicate.state = BRW_PREDICATE_STATE_RENDER; in set_predicate_enable() 46 brw->predicate.state = BRW_PREDICATE_STATE_DONT_RENDER; in set_predicate_enable() 50 set_predicate_for_overflow_query(struct brw_context *brw, in set_predicate_for_overflow_query() argument 54 if (!can_do_mi_math_and_lrr(brw->screen)) { in set_predicate_for_overflow_query() 55 brw->predicate.state = BRW_PREDICATE_STATE_STALL_FOR_QUERY; in set_predicate_for_overflow_query() 59 brw->predicate.state = BRW_PREDICATE_STATE_USE_BIT; in set_predicate_for_overflow_query() 65 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_FLUSH_ENABLE); in set_predicate_for_overflow_query() 67 hsw_overflow_result_to_gpr0(brw, query, count); in set_predicate_for_overflow_query() 68 brw_load_register_reg64(brw, MI_PREDICATE_SRC0, HSW_CS_GPR(0)); in set_predicate_for_overflow_query() [all …]
|
D | brw_context.c | 109 const struct brw_context *const brw = brw_context(ctx); in brw_get_string() local 117 (GLubyte *) brw_get_renderer_string(brw->screen); in brw_get_string() 128 struct brw_context *brw = brw_context(ctx); in brw_set_background_context() local 129 __DRIcontext *driContext = brw->driContext; in brw_set_background_context() 167 struct brw_context *brw = brw_context(ctx); in brw_import_memoryobj_fd() local 170 memory_object->bo = brw_bo_gem_create_from_prime(brw->bufmgr, fd); in brw_import_memoryobj_fd() 179 struct brw_context *brw = brw_context(ctx); in brw_viewport() local 180 __DRIcontext *driContext = brw->driContext; in brw_viewport() 193 struct brw_context *brw = brw_context(ctx); in brw_update_framebuffer() local 198 brw_quantize_num_samples(brw->screen, fb->DefaultGeometry.NumSamples); in brw_update_framebuffer() [all …]
|
D | brw_batch.c | 57 brw_batch_reset(struct brw_context *brw); 59 brw_new_batch(struct brw_context *brw); 94 struct brw_context *brw = v_brw; in decode_get_bo() local 95 struct brw_batch *batch = &brw->batch; in decode_get_bo() 106 .map = brw_bo_map(brw, bo, MAP_READ), in decode_get_bo() 117 struct brw_context *brw = v_brw; in decode_get_state_size() local 118 struct brw_batch *batch = &brw->batch; in decode_get_state_size() 135 brw_batch_init(struct brw_context *brw) in brw_batch_init() argument 137 struct brw_screen *screen = brw->screen; in brw_batch_init() 138 struct brw_batch *batch = &brw->batch; in brw_batch_init() [all …]
|
D | gfx6_sol.c | 39 gfx6_update_sol_surfaces(struct brw_context *brw) in gfx6_update_sol_surfaces() argument 41 struct gl_context *ctx = &brw->ctx; in gfx6_update_sol_surfaces() 59 if (brw->programs[MESA_SHADER_GEOMETRY]) { in gfx6_update_sol_surfaces() 61 brw, xfb_obj->Buffers[buffer], in gfx6_update_sol_surfaces() 62 &brw->gs.base.surf_offset[surf_index], in gfx6_update_sol_surfaces() 67 brw, xfb_obj->Buffers[buffer], in gfx6_update_sol_surfaces() 68 &brw->ff_gs.surf_offset[surf_index], in gfx6_update_sol_surfaces() 73 if (!brw->programs[MESA_SHADER_GEOMETRY]) in gfx6_update_sol_surfaces() 74 brw->ff_gs.surf_offset[surf_index] = 0; in gfx6_update_sol_surfaces() 76 brw->gs.base.surf_offset[surf_index] = 0; in gfx6_update_sol_surfaces() [all …]
|
D | brw_queryobj.c | 57 brw_raw_timestamp_delta(struct brw_context *brw, uint64_t time0, uint64_t time1) in brw_raw_timestamp_delta() argument 59 if (brw->screen->hw_has_timestamp == 2) { in brw_raw_timestamp_delta() 77 brw_write_timestamp(struct brw_context *brw, struct brw_bo *query_bo, int idx) in brw_write_timestamp() argument 79 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_write_timestamp() 83 brw_emit_pipe_control_flush(brw, in brw_write_timestamp() 93 brw_emit_pipe_control_write(brw, flags, in brw_write_timestamp() 101 brw_write_depth_count(struct brw_context *brw, struct brw_bo *query_bo, int idx) in brw_write_depth_count() argument 103 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_write_depth_count() 114 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL); in brw_write_depth_count() 117 brw_emit_pipe_control_write(brw, flags, in brw_write_depth_count() [all …]
|
D | gfx7_urb.c | 63 gfx7_allocate_push_constants(struct brw_context *brw) in gfx7_allocate_push_constants() argument 65 const struct intel_device_info *devinfo = &brw->screen->devinfo; in gfx7_allocate_push_constants() 68 bool gs_present = brw->programs[MESA_SHADER_GEOMETRY]; in gfx7_allocate_push_constants() 71 bool tess_present = brw->programs[MESA_SHADER_TESS_EVAL]; in gfx7_allocate_push_constants() 90 gfx7_emit_push_constant_state(brw, multiplier * vs_size, in gfx7_allocate_push_constants() 105 brw->vs.base.push_constants_dirty = true; in gfx7_allocate_push_constants() 106 brw->tcs.base.push_constants_dirty = true; in gfx7_allocate_push_constants() 107 brw->tes.base.push_constants_dirty = true; in gfx7_allocate_push_constants() 108 brw->gs.base.push_constants_dirty = true; in gfx7_allocate_push_constants() 109 brw->wm.base.push_constants_dirty = true; in gfx7_allocate_push_constants() [all …]
|
D | brw_batch.h | 24 void brw_batch_init(struct brw_context *brw); 26 void brw_batch_save_state(struct brw_context *brw); 27 bool brw_batch_saved_state_is_empty(struct brw_context *brw); 28 void brw_batch_reset_to_saved(struct brw_context *brw); 29 void brw_batch_require_space(struct brw_context *brw, GLuint sz); 30 int _brw_batch_flush_fence(struct brw_context *brw, 33 void brw_batch_maybe_noop(struct brw_context *brw); 35 #define brw_batch_flush(brw) \ argument 36 _brw_batch_flush_fence((brw), -1, NULL, __FILE__, __LINE__) 38 #define brw_batch_flush_fence(brw, in_fence_fd, out_fence_fd) \ argument [all …]
|
D | brw_disk_cache.c | 87 read_and_upload(struct brw_context *brw, struct disk_cache *cache, in read_and_upload() argument 96 brw_vs_populate_key(brw, &prog_key.vs); in read_and_upload() 99 brw_tcs_populate_key(brw, &prog_key.tcs); in read_and_upload() 102 brw_tes_populate_key(brw, &prog_key.tes); in read_and_upload() 105 brw_gs_populate_key(brw, &prog_key.gs); in read_and_upload() 108 brw_wm_populate_key(brw, &prog_key.wm); in read_and_upload() 111 brw_cs_populate_key(brw, &prog_key.cs); in read_and_upload() 128 if (brw->ctx._Shader->Flags & GLSL_CACHE_INFO) { in read_and_upload() 137 if (brw->ctx._Shader->Flags & GLSL_CACHE_INFO) { in read_and_upload() 154 if (brw->ctx._Shader->Flags & GLSL_CACHE_INFO) { in read_and_upload() [all …]
|
D | genX_state_upload.c | 66 KSP(struct brw_context *brw, uint32_t offset) in KSP() argument 68 return ro_bo(brw->cache.bo, offset); in KSP() 72 KSP(UNUSED struct brw_context *brw, uint32_t offset) in KSP() argument 80 emit_lrm(struct brw_context *brw, uint32_t reg, struct brw_address addr) in emit_lrm() argument 82 brw_batch_emit(brw, GENX(MI_LOAD_REGISTER_MEM), lrm) { in emit_lrm() 91 emit_lri(struct brw_context *brw, uint32_t reg, uint32_t imm) in emit_lri() argument 93 brw_batch_emit(brw, GENX(MI_LOAD_REGISTER_IMM), lri) { in emit_lri() 104 genX(upload_polygon_stipple)(struct brw_context *brw) in genX() 106 struct gl_context *ctx = &brw->ctx; in genX() 112 brw_batch_emit(brw, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) { in genX() [all …]
|
D | brw_cs.c | 51 brw_codegen_cs_prog(struct brw_context *brw, in brw_codegen_cs_prog() argument 55 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_codegen_cs_prog() 81 if (unlikely(brw->perf_debug)) { in brw_codegen_cs_prog() 82 start_busy = (brw->batch.last_bo && in brw_codegen_cs_prog() 83 brw_bo_busy(brw->batch.last_bo)); in brw_codegen_cs_prog() 94 .log_data = brw, in brw_codegen_cs_prog() 100 brw_get_shader_time_index(brw, &cp->program, ST_CS, true); in brw_codegen_cs_prog() 103 program = brw_compile_cs(brw->screen->compiler, mem_ctx, ¶ms); in brw_codegen_cs_prog() 113 if (unlikely(brw->perf_debug)) { in brw_codegen_cs_prog() 115 brw_debug_recompile(brw, MESA_SHADER_COMPUTE, cp->program.Id, in brw_codegen_cs_prog() [all …]
|
D | gfx7_l3_state.c | 39 get_pipeline_state_l3_weights(const struct brw_context *brw) in get_pipeline_state_l3_weights() argument 42 [MESA_SHADER_VERTEX] = &brw->vs.base, in get_pipeline_state_l3_weights() 43 [MESA_SHADER_TESS_CTRL] = &brw->tcs.base, in get_pipeline_state_l3_weights() 44 [MESA_SHADER_TESS_EVAL] = &brw->tes.base, in get_pipeline_state_l3_weights() 45 [MESA_SHADER_GEOMETRY] = &brw->gs.base, in get_pipeline_state_l3_weights() 46 [MESA_SHADER_FRAGMENT] = &brw->wm.base, in get_pipeline_state_l3_weights() 47 [MESA_SHADER_COMPUTE] = &brw->cs.base in get_pipeline_state_l3_weights() 53 brw->ctx._Shader->CurrentProgram[stage_states[i]->stage]; in get_pipeline_state_l3_weights() 63 return intel_get_default_l3_weights(&brw->screen->devinfo, in get_pipeline_state_l3_weights() 71 setup_l3_config(struct brw_context *brw, const struct intel_l3_config *cfg) in setup_l3_config() argument [all …]
|
D | gfx6_queryobj.c | 42 set_query_availability(struct brw_context *brw, struct brw_query_object *query, in set_query_availability() argument 60 if (brw->ctx.Extensions.ARB_query_buffer_object && in set_query_availability() 72 brw_emit_pipe_control_write(brw, flags, in set_query_availability() 79 write_primitives_generated(struct brw_context *brw, in write_primitives_generated() argument 82 const struct intel_device_info *devinfo = &brw->screen->devinfo; in write_primitives_generated() 84 brw_emit_mi_flush(brw); in write_primitives_generated() 87 brw_store_register_mem64(brw, query_bo, in write_primitives_generated() 91 brw_store_register_mem64(brw, query_bo, CL_INVOCATION_COUNT, in write_primitives_generated() 97 write_xfb_primitives_written(struct brw_context *brw, in write_xfb_primitives_written() argument 100 const struct intel_device_info *devinfo = &brw->screen->devinfo; in write_xfb_primitives_written() [all …]
|
D | brw_ff_gs.c | 46 compile_ff_gs_prog(struct brw_context *brw, in compile_ff_gs_prog() argument 56 program = brw_compile_ff_gs_prog(brw->screen->compiler, mem_ctx, key, in compile_ff_gs_prog() 58 &brw_vue_prog_data(brw->vs.base.prog_data)->vue_map, in compile_ff_gs_prog() 61 brw_upload_cache(&brw->cache, BRW_CACHE_FF_GS_PROG, in compile_ff_gs_prog() 65 &brw->ff_gs.prog_offset, &brw->ff_gs.prog_data); in compile_ff_gs_prog() 70 brw_ff_gs_state_dirty(const struct brw_context *brw) in brw_ff_gs_state_dirty() argument 72 return brw_state_dirty(brw, in brw_ff_gs_state_dirty() 80 brw_ff_gs_populate_key(struct brw_context *brw, in brw_ff_gs_populate_key() argument 83 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_ff_gs_populate_key() 91 struct gl_context *ctx = &brw->ctx; in brw_ff_gs_populate_key() [all …]
|
D | brw_gs.c | 85 brw_codegen_gs_prog(struct brw_context *brw, in brw_codegen_gs_prog() argument 89 struct brw_compiler *compiler = brw->screen->compiler; in brw_codegen_gs_prog() 90 const struct intel_device_info *devinfo = &brw->screen->devinfo; in brw_codegen_gs_prog() 91 struct brw_stage_state *stage_state = &brw->gs.base; in brw_codegen_gs_prog() 107 if (brw->can_push_ubos) { in brw_codegen_gs_prog() 124 st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true); in brw_codegen_gs_prog() 126 if (unlikely(brw->perf_debug)) { in brw_codegen_gs_prog() 127 start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo); in brw_codegen_gs_prog() 133 brw_compile_gs(brw->screen->compiler, brw, mem_ctx, key, in brw_codegen_gs_prog() 144 if (unlikely(brw->perf_debug)) { in brw_codegen_gs_prog() [all …]
|