Home
last modified time | relevance | path

Searched refs:brw (Results 1 – 25 of 93) sorted by relevance

1234

/external/mesa3d/src/mesa/drivers/dri/i965/
Dbrw_state_dump.c35 batch_out(struct brw_context *brw, const char *name, uint32_t offset,
39 batch_out(struct brw_context *brw, const char *name, uint32_t offset, in batch_out() argument
42 struct intel_context *intel = &brw->intel; in batch_out()
80 static void dump_vs_state(struct brw_context *brw, uint32_t offset) in dump_vs_state() argument
82 struct intel_context *intel = &brw->intel; in dump_vs_state()
86 batch_out(brw, name, offset, 0, "thread0\n"); in dump_vs_state()
87 batch_out(brw, name, offset, 1, "thread1\n"); in dump_vs_state()
88 batch_out(brw, name, offset, 2, "thread2\n"); in dump_vs_state()
89 batch_out(brw, name, offset, 3, "thread3\n"); in dump_vs_state()
90 batch_out(brw, name, offset, 4, "thread4: %d threads\n", in dump_vs_state()
[all …]
Dbrw_urb.c100 static bool check_urb_layout(struct brw_context *brw) in check_urb_layout() argument
102 brw->urb.vs_start = 0; in check_urb_layout()
103 brw->urb.gs_start = brw->urb.nr_vs_entries * brw->urb.vsize; in check_urb_layout()
104 brw->urb.clip_start = brw->urb.gs_start + brw->urb.nr_gs_entries * brw->urb.vsize; in check_urb_layout()
105 brw->urb.sf_start = brw->urb.clip_start + brw->urb.nr_clip_entries * brw->urb.vsize; in check_urb_layout()
106 brw->urb.cs_start = brw->urb.sf_start + brw->urb.nr_sf_entries * brw->urb.sfsize; in check_urb_layout()
108 return brw->urb.cs_start + brw->urb.nr_cs_entries * in check_urb_layout()
109 brw->urb.csize <= brw->urb.size; in check_urb_layout()
115 static void recalculate_urb_fence( struct brw_context *brw ) in recalculate_urb_fence() argument
117 struct intel_context *intel = &brw->intel; in recalculate_urb_fence()
[all …]
Dbrw_curbe.c56 static void calculate_curbe_offsets( struct brw_context *brw ) in calculate_curbe_offsets() argument
58 struct gl_context *ctx = &brw->intel.ctx; in calculate_curbe_offsets()
60 const GLuint nr_fp_regs = (brw->wm.prog_data->nr_params + 15) / 16; in calculate_curbe_offsets()
63 const GLuint nr_vp_regs = (brw->vs.prog_data->nr_params + 15) / 16; in calculate_curbe_offsets()
97 if (nr_fp_regs > brw->curbe.wm_size || in calculate_curbe_offsets()
98 nr_vp_regs > brw->curbe.vs_size || in calculate_curbe_offsets()
99 nr_clip_regs != brw->curbe.clip_size || in calculate_curbe_offsets()
100 (total_regs < brw->curbe.total_size / 4 && in calculate_curbe_offsets()
101 brw->curbe.total_size > 16)) { in calculate_curbe_offsets()
108 brw->curbe.wm_start = reg; in calculate_curbe_offsets()
[all …]
Dbrw_vtbl.c70 struct brw_context *brw = brw_context(&intel->ctx); in brw_destroy_context() local
72 brw_destroy_state(brw); in brw_destroy_context()
73 brw_draw_destroy( brw ); in brw_destroy_context()
75 ralloc_free(brw->wm.compile_data); in brw_destroy_context()
77 dri_bo_release(&brw->curbe.curbe_bo); in brw_destroy_context()
78 dri_bo_release(&brw->vs.const_bo); in brw_destroy_context()
79 dri_bo_release(&brw->wm.const_bo); in brw_destroy_context()
81 free(brw->curbe.last_buf); in brw_destroy_context()
82 free(brw->curbe.next_buf); in brw_destroy_context()
157 struct brw_context *brw = brw_context(&intel->ctx); in brw_finish_batch() local
[all …]
Dbrw_vs_surface_state.c45 brw_upload_vs_pull_constants(struct brw_context *brw) in brw_upload_vs_pull_constants() argument
47 struct gl_context *ctx = &brw->intel.ctx; in brw_upload_vs_pull_constants()
48 struct intel_context *intel = &brw->intel; in brw_upload_vs_pull_constants()
51 (struct brw_vertex_program *) brw->vertex_program; in brw_upload_vs_pull_constants()
61 _mesa_load_state_parameters(&brw->intel.ctx, vp->program.Base.Parameters); in brw_upload_vs_pull_constants()
64 if (!brw->vs.prog_data->nr_pull_params) { in brw_upload_vs_pull_constants()
65 if (brw->vs.const_bo) { in brw_upload_vs_pull_constants()
66 drm_intel_bo_unreference(brw->vs.const_bo); in brw_upload_vs_pull_constants()
67 brw->vs.const_bo = NULL; in brw_upload_vs_pull_constants()
68 brw->vs.surf_offset[SURF_INDEX_VERT_CONST_BUFFER] = 0; in brw_upload_vs_pull_constants()
[all …]
Dbrw_context.c128 struct brw_context *brw = rzalloc(NULL, struct brw_context); in brwCreateContext() local
129 if (!brw) { in brwCreateContext()
138 brw->intel.gen = screen->gen; in brwCreateContext()
140 brwInitVtbl( brw ); in brwCreateContext()
144 struct intel_context *intel = &brw->intel; in brwCreateContext()
154 brw_init_surface_formats(brw); in brwCreateContext()
280 brw->CMD_VF_STATISTICS = GM45_3DSTATE_VF_STATISTICS; in brwCreateContext()
281 brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45; in brwCreateContext()
282 brw->has_surface_tile_offset = true; in brwCreateContext()
284 brw->has_compr4 = true; in brwCreateContext()
[all …]
Dbrw_draw.c88 static void brw_set_prim(struct brw_context *brw, in brw_set_prim() argument
91 struct gl_context *ctx = &brw->intel.ctx; in brw_set_prim()
111 if (hw_prim != brw->primitive) { in brw_set_prim()
112 brw->primitive = hw_prim; in brw_set_prim()
113 brw->state.dirty.brw |= BRW_NEW_PRIMITIVE; in brw_set_prim()
115 if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) { in brw_set_prim()
116 brw->intel.reduced_primitive = reduced_prim[prim->mode]; in brw_set_prim()
117 brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE; in brw_set_prim()
122 static void gen6_set_prim(struct brw_context *brw, in gen6_set_prim() argument
131 if (hw_prim != brw->primitive) { in gen6_set_prim()
[all …]
Dgen6_urb.c50 gen6_upload_urb( struct brw_context *brw ) in gen6_upload_urb() argument
52 struct intel_context *intel = &brw->intel; in gen6_upload_urb()
54 int total_urb_size = brw->urb.size * 1024; /* in bytes */ in gen6_upload_urb()
57 brw->urb.vs_size = MAX2(brw->vs.prog_data->urb_entry_size, 1); in gen6_upload_urb()
65 brw->urb.gs_size = brw->urb.vs_size; in gen6_upload_urb()
68 if (brw->gs.prog_active) { in gen6_upload_urb()
69 nr_vs_entries = (total_urb_size/2) / (brw->urb.vs_size * 128); in gen6_upload_urb()
70 nr_gs_entries = (total_urb_size/2) / (brw->urb.gs_size * 128); in gen6_upload_urb()
72 nr_vs_entries = total_urb_size / (brw->urb.vs_size * 128); in gen6_upload_urb()
77 if (nr_vs_entries > brw->urb.max_vs_entries) in gen6_upload_urb()
[all …]
Dgen7_blorp.cpp50 gen7_blorp_emit_urb_config(struct brw_context *brw, in gen7_blorp_emit_urb_config() argument
60 gen7_emit_urb_state(brw, num_vs_entries, vs_size, vs_start); in gen7_blorp_emit_urb_config()
66 gen7_blorp_emit_blend_state_pointer(struct brw_context *brw, in gen7_blorp_emit_blend_state_pointer() argument
70 struct intel_context *intel = &brw->intel; in gen7_blorp_emit_blend_state_pointer()
81 gen7_blorp_emit_cc_state_pointer(struct brw_context *brw, in gen7_blorp_emit_cc_state_pointer() argument
85 struct intel_context *intel = &brw->intel; in gen7_blorp_emit_cc_state_pointer()
94 gen7_blorp_emit_cc_viewport(struct brw_context *brw, in gen7_blorp_emit_cc_viewport() argument
97 struct intel_context *intel = &brw->intel; in gen7_blorp_emit_cc_viewport()
101 ccv = (struct brw_cc_viewport *)brw_state_batch(brw, AUB_TRACE_CC_VP_STATE, in gen7_blorp_emit_cc_viewport()
119 gen7_blorp_emit_depth_stencil_state_pointers(struct brw_context *brw, in gen7_blorp_emit_depth_stencil_state_pointers() argument
[all …]
Dgen6_sol.c36 gen6_update_sol_surfaces(struct brw_context *brw) in gen6_update_sol_surfaces() argument
38 struct gl_context *ctx = &brw->intel.ctx; in gen6_update_sol_surfaces()
58 brw, xfb_obj->Buffers[buffer], &brw->gs.surf_offset[surf_index], in gen6_update_sol_surfaces()
62 brw->gs.surf_offset[surf_index] = 0; in gen6_update_sol_surfaces()
66 brw->state.dirty.brw |= BRW_NEW_SURFACES; in gen6_update_sol_surfaces()
72 .brw = (BRW_NEW_BATCH |
84 brw_gs_upload_binding_table(struct brw_context *brw) in brw_gs_upload_binding_table() argument
86 struct gl_context *ctx = &brw->intel.ctx; in brw_gs_upload_binding_table()
102 if (brw->gs.bind_bo_offset != 0) { in brw_gs_upload_binding_table()
103 brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE; in brw_gs_upload_binding_table()
[all …]
Dgen6_blorp.cpp74 gen6_blorp_emit_batch_head(struct brw_context *brw, in gen6_blorp_emit_batch_head() argument
77 struct gl_context *ctx = &brw->intel.ctx; in gen6_blorp_emit_batch_head()
78 struct intel_context *intel = &brw->intel; in gen6_blorp_emit_batch_head()
99 OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16); in gen6_blorp_emit_batch_head()
118 gen6_blorp_emit_state_base_address(struct brw_context *brw, in gen6_blorp_emit_state_base_address() argument
121 struct intel_context *intel = &brw->intel; in gen6_blorp_emit_state_base_address()
133 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, in gen6_blorp_emit_state_base_address()
152 gen6_blorp_emit_vertices(struct brw_context *brw, in gen6_blorp_emit_vertices() argument
155 struct intel_context *intel = &brw->intel; in gen6_blorp_emit_vertices()
196 vertex_data = (float *) brw_state_batch(brw, AUB_TRACE_VERTEX_BUFFER, in gen6_blorp_emit_vertices()
[all …]
Dgen6_wm_state.c38 gen6_upload_wm_push_constants(struct brw_context *brw) in gen6_upload_wm_push_constants() argument
40 struct intel_context *intel = &brw->intel; in gen6_upload_wm_push_constants()
44 brw_fragment_program_const(brw->fragment_program); in gen6_upload_wm_push_constants()
53 if (brw->wm.prog_data->nr_params != 0) { in gen6_upload_wm_push_constants()
57 constants = brw_state_batch(brw, AUB_TRACE_WM_CONSTANTS, in gen6_upload_wm_push_constants()
58 brw->wm.prog_data->nr_params * in gen6_upload_wm_push_constants()
60 32, &brw->wm.push_const_offset); in gen6_upload_wm_push_constants()
62 for (i = 0; i < brw->wm.prog_data->nr_params; i++) { in gen6_upload_wm_push_constants()
63 constants[i] = *brw->wm.prog_data->param[i]; in gen6_upload_wm_push_constants()
68 for (i = 0; i < brw->wm.prog_data->nr_params; i++) { in gen6_upload_wm_push_constants()
[all …]
Dbrw_draw_upload.c308 copy_array_to_vbo_array(struct brw_context *brw, in copy_array_to_vbo_array() argument
321 intel_upload_data(&brw->intel, element->glarray->Ptr, in copy_array_to_vbo_array()
336 intel_upload_data(&brw->intel, src, size, dst_stride, in copy_array_to_vbo_array()
339 char * const map = intel_upload_map(&brw->intel, size, dst_stride); in copy_array_to_vbo_array()
347 intel_upload_unmap(&brw->intel, map, size, dst_stride, in copy_array_to_vbo_array()
353 static void brw_prepare_vertices(struct brw_context *brw) in brw_prepare_vertices() argument
355 struct gl_context *ctx = &brw->intel.ctx; in brw_prepare_vertices()
358 GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read; in brw_prepare_vertices()
361 unsigned int min_index = brw->vb.min_index; in brw_prepare_vertices()
362 unsigned int max_index = brw->vb.max_index; in brw_prepare_vertices()
[all …]
Dbrw_vs_state.c40 brw_upload_vs_unit(struct brw_context *brw) in brw_upload_vs_unit() argument
42 struct intel_context *intel = &brw->intel; in brw_upload_vs_unit()
46 vs = brw_state_batch(brw, AUB_TRACE_VS_STATE, in brw_upload_vs_unit()
47 sizeof(*vs), 32, &brw->vs.state_offset); in brw_upload_vs_unit()
51 vs->thread0.grf_reg_count = ALIGN(brw->vs.prog_data->total_grf, 16) / 16 - 1; in brw_upload_vs_unit()
53 brw_program_reloc(brw, in brw_upload_vs_unit()
54 brw->vs.state_offset + in brw_upload_vs_unit()
56 brw->vs.prog_offset + in brw_upload_vs_unit()
76 if (brw->vs.prog_data->total_scratch != 0) { in brw_upload_vs_unit()
78 brw->vs.scratch_bo->offset >> 10; /* reloc */ in brw_upload_vs_unit()
[all …]
Dgen7_urb.c54 gen7_allocate_push_constants(struct brw_context *brw) in gen7_allocate_push_constants() argument
56 struct intel_context *intel = &brw->intel; in gen7_allocate_push_constants()
71 .brw = BRW_NEW_CONTEXT,
78 gen7_upload_urb(struct brw_context *brw) in gen7_upload_urb() argument
80 struct intel_context *intel = &brw->intel; in gen7_upload_urb()
82 int handle_region_size = (brw->urb.size - 16) * 1024; /* bytes */ in gen7_upload_urb()
85 brw->urb.vs_size = MAX2(brw->vs.prog_data->urb_entry_size, 1); in gen7_upload_urb()
87 int nr_vs_entries = handle_region_size / (brw->urb.vs_size * 64); in gen7_upload_urb()
88 if (nr_vs_entries > brw->urb.max_vs_entries) in gen7_upload_urb()
89 nr_vs_entries = brw->urb.max_vs_entries; in gen7_upload_urb()
[all …]
Dbrw_wm_state.c45 brw_color_buffer_write_enabled(struct brw_context *brw) in brw_color_buffer_write_enabled() argument
47 struct gl_context *ctx = &brw->intel.ctx; in brw_color_buffer_write_enabled()
48 const struct gl_fragment_program *fp = brw->fragment_program; in brw_color_buffer_write_enabled()
74 brw_upload_wm_unit(struct brw_context *brw) in brw_upload_wm_unit() argument
76 struct intel_context *intel = &brw->intel; in brw_upload_wm_unit()
78 const struct gl_fragment_program *fp = brw->fragment_program; in brw_upload_wm_unit()
81 wm = brw_state_batch(brw, AUB_TRACE_WM_STATE, in brw_upload_wm_unit()
82 sizeof(*wm), 32, &brw->wm.state_offset); in brw_upload_wm_unit()
85 if (brw->wm.prog_data->prog_offset_16) { in brw_upload_wm_unit()
90 assert(brw->wm.prog_data->first_curbe_grf == in brw_upload_wm_unit()
[all …]
Dgen6_vs_state.c37 gen6_upload_vs_push_constants(struct brw_context *brw) in gen6_upload_vs_push_constants() argument
39 struct intel_context *intel = &brw->intel; in gen6_upload_vs_push_constants()
43 brw_vertex_program_const(brw->vertex_program); in gen6_upload_vs_push_constants()
44 unsigned int nr_params = brw->vs.prog_data->nr_params / 4; in gen6_upload_vs_push_constants()
47 if (brw->vertex_program->IsNVProgram) in gen6_upload_vs_push_constants()
57 if (brw->vs.prog_data->nr_params == 0 && !ctx->Transform.ClipPlanesEnabled) { in gen6_upload_vs_push_constants()
58 brw->vs.push_const_size = 0; in gen6_upload_vs_push_constants()
64 param = brw_state_batch(brw, AUB_TRACE_VS_CONSTANTS, in gen6_upload_vs_push_constants()
67 32, &brw->vs.push_const_offset); in gen6_upload_vs_push_constants()
69 if (brw->vs.prog_data->uses_new_param_layout) { in gen6_upload_vs_push_constants()
[all …]
Dbrw_queryobj.c249 struct brw_context *brw = brw_context(ctx); in brw_begin_query() local
268 brw->query.obj = query; in brw_begin_query()
276 brw->sol.primitives_generated = 0; in brw_begin_query()
277 brw->sol.counting_primitives_generated = true; in brw_begin_query()
284 brw->sol.primitives_written = 0; in brw_begin_query()
285 brw->sol.counting_primitives_written = true; in brw_begin_query()
300 struct brw_context *brw = brw_context(ctx); in brw_end_query() local
323 brw_emit_query_end(brw); in brw_end_query()
326 drm_intel_bo_unreference(brw->query.bo); in brw_end_query()
327 brw->query.bo = NULL; in brw_end_query()
[all …]
Dbrw_misc_state.c46 static void upload_drawing_rect(struct brw_context *brw) in upload_drawing_rect() argument
48 struct intel_context *intel = &brw->intel; in upload_drawing_rect()
63 .brw = BRW_NEW_CONTEXT,
76 static void upload_binding_table_pointers(struct brw_context *brw) in upload_binding_table_pointers() argument
78 struct intel_context *intel = &brw->intel; in upload_binding_table_pointers()
82 OUT_BATCH(brw->vs.bind_bo_offset); in upload_binding_table_pointers()
86 OUT_BATCH(brw->wm.bind_bo_offset); in upload_binding_table_pointers()
93 .brw = (BRW_NEW_BATCH |
110 static void upload_gen6_binding_table_pointers(struct brw_context *brw) in upload_gen6_binding_table_pointers() argument
112 struct intel_context *intel = &brw->intel; in upload_gen6_binding_table_pointers()
[all …]
Dgen7_wm_state.c35 upload_wm_state(struct brw_context *brw) in upload_wm_state() argument
37 struct intel_context *intel = &brw->intel; in upload_wm_state()
40 brw_fragment_program_const(brw->fragment_program); in upload_wm_state()
68 dw1 |= brw->wm.prog_data->barycentric_interp_modes << in upload_wm_state()
77 if (brw_color_buffer_write_enabled(brw) || writes_depth || in upload_wm_state()
105 .brw = (BRW_NEW_FRAGMENT_PROGRAM |
113 upload_ps_state(struct brw_context *brw) in upload_ps_state() argument
115 struct intel_context *intel = &brw->intel; in upload_ps_state()
118 const int max_threads_shift = brw->intel.is_haswell ? in upload_ps_state()
124 OUT_BATCH(brw->wm.bind_bo_offset); in upload_ps_state()
[all …]
Dbrw_state_batch.c38 brw_track_state_batch(struct brw_context *brw, in brw_track_state_batch() argument
43 struct intel_batchbuffer *batch = &brw->intel.batch; in brw_track_state_batch()
45 if (!brw->state_batch_list) { in brw_track_state_batch()
49 brw->state_batch_list = ralloc_size(brw, sizeof(*brw->state_batch_list) * in brw_track_state_batch()
53 brw->state_batch_list[brw->state_batch_count].offset = offset; in brw_track_state_batch()
54 brw->state_batch_list[brw->state_batch_count].size = size; in brw_track_state_batch()
55 brw->state_batch_list[brw->state_batch_count].type = type; in brw_track_state_batch()
56 brw->state_batch_count++; in brw_track_state_batch()
84 struct brw_context *brw = brw_context(&intel->ctx); in brw_annotate_aub() local
86 unsigned annotation_count = 2 * brw->state_batch_count + 1; in brw_annotate_aub()
[all …]
Dbrw_gs.c47 static void compile_gs_prog( struct brw_context *brw, in compile_gs_prog() argument
50 struct intel_context *intel = &brw->intel; in compile_gs_prog()
59 c.vue_map = brw->vs.prog_data->vue_map; in compile_gs_prog()
66 brw_init_compile(brw, &c.func, mem_ctx); in compile_gs_prog()
145 brw_upload_cache(&brw->cache, BRW_GS_PROG, in compile_gs_prog()
149 &brw->gs.prog_offset, &brw->gs.prog_data); in compile_gs_prog()
153 static void populate_key( struct brw_context *brw, in populate_key() argument
163 struct gl_context *ctx = &brw->intel.ctx; in populate_key()
164 struct intel_context *intel = &brw->intel; in populate_key()
169 key->attrs = brw->vs.prog_data->outputs_written; in populate_key()
[all …]
Dbrw_state_upload.c254 void brw_init_state( struct brw_context *brw ) in brw_init_state() argument
259 brw_init_caches(brw); in brw_init_state()
261 if (brw->intel.gen >= 7) { in brw_init_state()
264 } else if (brw->intel.gen == 6) { in brw_init_state()
272 brw->atoms = atoms; in brw_init_state()
273 brw->num_atoms = num_atoms; in brw_init_state()
277 (*atoms)->dirty.brw | in brw_init_state()
285 void brw_destroy_state( struct brw_context *brw ) in brw_destroy_state() argument
287 brw_destroy_caches(brw); in brw_destroy_state()
297 (a->brw & b->brw) | in check_state()
[all …]
Dbrw_clip_state.c37 brw_upload_clip_unit(struct brw_context *brw) in brw_upload_clip_unit() argument
39 struct intel_context *intel = &brw->intel; in brw_upload_clip_unit()
43 clip = brw_state_batch(brw, AUB_TRACE_CLIP_STATE, in brw_upload_clip_unit()
44 sizeof(*clip), 32, &brw->clip.state_offset); in brw_upload_clip_unit()
48 clip->thread0.grf_reg_count = (ALIGN(brw->clip.prog_data->total_grf, 16) / in brw_upload_clip_unit()
51 brw_program_reloc(brw, in brw_upload_clip_unit()
52 brw->clip.state_offset + in brw_upload_clip_unit()
54 brw->clip.prog_offset + in brw_upload_clip_unit()
60 clip->thread3.urb_entry_read_length = brw->clip.prog_data->urb_read_length; in brw_upload_clip_unit()
62 brw->clip.prog_data->curb_read_length; in brw_upload_clip_unit()
[all …]
Dgen7_vs_state.c33 upload_vs_state(struct brw_context *brw) in upload_vs_state() argument
35 struct intel_context *intel = &brw->intel; in upload_vs_state()
37 const int max_threads_shift = brw->intel.is_haswell ? in upload_vs_state()
45 OUT_BATCH(brw->vs.bind_bo_offset); in upload_vs_state()
51 OUT_BATCH(brw->sampler.offset); in upload_vs_state()
54 if (brw->vs.push_const_size == 0) { in upload_vs_state()
68 OUT_BATCH(brw->vs.push_const_size); in upload_vs_state()
73 OUT_BATCH(brw->vs.push_const_offset); in upload_vs_state()
88 OUT_BATCH(brw->vs.prog_offset); in upload_vs_state()
90 ((ALIGN(brw->sampler.count, 4)/4) << GEN6_VS_SAMPLER_COUNT_SHIFT)); in upload_vs_state()
[all …]

1234