• Home
  • Raw
  • Download

Lines Matching +full:- +full:- +full:batch

31 static void dump_batch(struct intel_batchbuffer *batch) {  in dump_batch()  argument
32 int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666); in dump_batch()
33 if (fd != -1) { in dump_batch()
34 igt_assert_eq(write(fd, batch->buffer, 4096), 4096); in dump_batch()
60 /* Write all -1 */
83 /* aub->annotations is an array keeping a list of annotations of the in annotation_init()
84 * batch buffer ordered by offset. aub->annotations[0] is thus left in annotation_init()
86 * the batch buffer with annotations_add_batch() */ in annotation_init()
87 aub->index = 1; in annotation_init()
94 a->type = type; in add_annotation()
95 a->subtype = subtype; in add_annotation()
96 a->ending_offset = ending_offset; in add_annotation()
101 add_annotation(&aub->annotations[0], AUB_TRACE_TYPE_BATCH, 0, size); in annotation_add_batch()
109 igt_assert(aub->index < MAX_ANNOTATIONS); in annotation_add_state()
111 add_annotation(&aub->annotations[aub->index++], in annotation_add_state()
114 add_annotation(&aub->annotations[aub->index++], in annotation_add_state()
121 struct intel_batchbuffer *batch) in annotation_flush() argument
126 drm_intel_bufmgr_gem_set_aub_annotations(batch->bo, in annotation_flush()
127 aub->annotations, in annotation_flush()
128 aub->index); in annotation_flush()
132 gen6_render_flush(struct intel_batchbuffer *batch, in gen6_render_flush() argument
137 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen6_render_flush()
139 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen6_render_flush()
146 gen8_bind_buf(struct intel_batchbuffer *batch, in gen8_bind_buf() argument
154 igt_assert_lte(buf->stride, 256*1024); in gen8_bind_buf()
165 ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64); in gen8_bind_buf()
166 offset = intel_batchbuffer_subdata_offset(batch, ss); in gen8_bind_buf()
169 ss->ss0.surface_type = SURFACE_2D; in gen8_bind_buf()
170 switch (buf->bpp) { in gen8_bind_buf()
171 case 8: ss->ss0.surface_format = SURFACEFORMAT_R8_UNORM; break; in gen8_bind_buf()
172 case 16: ss->ss0.surface_format = SURFACEFORMAT_R8G8_UNORM; break; in gen8_bind_buf()
173 case 32: ss->ss0.surface_format = SURFACEFORMAT_B8G8R8A8_UNORM; break; in gen8_bind_buf()
174 case 64: ss->ss0.surface_format = SURFACEFORMAT_R16G16B16A16_FLOAT; break; in gen8_bind_buf()
177 ss->ss0.render_cache_read_write = 1; in gen8_bind_buf()
178 ss->ss0.vertical_alignment = 1; /* align 4 */ in gen8_bind_buf()
179 ss->ss0.horizontal_alignment = 1; /* align 4 */ in gen8_bind_buf()
180 if (buf->tiling == I915_TILING_X) in gen8_bind_buf()
181 ss->ss0.tiled_mode = 2; in gen8_bind_buf()
182 else if (buf->tiling == I915_TILING_Y) in gen8_bind_buf()
183 ss->ss0.tiled_mode = 3; in gen8_bind_buf()
185 if (IS_CHERRYVIEW(batch->devid)) in gen8_bind_buf()
186 ss->ss1.memory_object_control = CHV_MOCS_WB | CHV_MOCS_L3; in gen8_bind_buf()
188 ss->ss1.memory_object_control = BDW_MOCS_PTE | in gen8_bind_buf()
191 ss->ss8.base_addr = buf->bo->offset64; in gen8_bind_buf()
192 ss->ss9.base_addr_hi = buf->bo->offset64 >> 32; in gen8_bind_buf()
194 ret = drm_intel_bo_emit_reloc(batch->bo, in gen8_bind_buf()
195 intel_batchbuffer_subdata_offset(batch, &ss->ss8), in gen8_bind_buf()
196 buf->bo, 0, in gen8_bind_buf()
200 ss->ss2.height = igt_buf_height(buf) - 1; in gen8_bind_buf()
201 ss->ss2.width = igt_buf_width(buf) - 1; in gen8_bind_buf()
202 ss->ss3.pitch = buf->stride - 1; in gen8_bind_buf()
204 ss->ss7.shader_chanel_select_r = 4; in gen8_bind_buf()
205 ss->ss7.shader_chanel_select_g = 5; in gen8_bind_buf()
206 ss->ss7.shader_chanel_select_b = 6; in gen8_bind_buf()
207 ss->ss7.shader_chanel_select_a = 7; in gen8_bind_buf()
213 gen8_bind_surfaces(struct intel_batchbuffer *batch, in gen8_bind_surfaces() argument
220 binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32); in gen8_bind_surfaces()
221 offset = intel_batchbuffer_subdata_offset(batch, binding_table); in gen8_bind_surfaces()
224 binding_table[0] = gen8_bind_buf(batch, aub, dst, 1); in gen8_bind_surfaces()
225 binding_table[1] = gen8_bind_buf(batch, aub, src, 0); in gen8_bind_surfaces()
232 gen8_create_sampler(struct intel_batchbuffer *batch, in gen8_create_sampler() argument
238 ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 64); in gen8_create_sampler()
239 offset = intel_batchbuffer_subdata_offset(batch, ss); in gen8_create_sampler()
243 ss->ss0.min_filter = GEN4_MAPFILTER_NEAREST; in gen8_create_sampler()
244 ss->ss0.mag_filter = GEN4_MAPFILTER_NEAREST; in gen8_create_sampler()
245 ss->ss3.r_wrap_mode = GEN4_TEXCOORDMODE_CLAMP; in gen8_create_sampler()
246 ss->ss3.s_wrap_mode = GEN4_TEXCOORDMODE_CLAMP; in gen8_create_sampler()
247 ss->ss3.t_wrap_mode = GEN4_TEXCOORDMODE_CLAMP; in gen8_create_sampler()
249 /* I've experimented with non-normalized coordinates and using the LD in gen8_create_sampler()
251 ss->ss3.non_normalized_coord = 0; in gen8_create_sampler()
257 gen8_fill_ps(struct intel_batchbuffer *batch, in gen8_fill_ps() argument
264 offset = intel_batchbuffer_copy_data(batch, kernel, size, 64); in gen8_fill_ps()
281 gen7_fill_vertex_buffer_data(struct intel_batchbuffer *batch, in gen7_fill_vertex_buffer_data() argument
291 intel_batchbuffer_align(batch, 8); in gen7_fill_vertex_buffer_data()
292 start = batch->ptr; in gen7_fill_vertex_buffer_data()
294 emit_vertex_2s(batch, dst_x + width, dst_y + height); in gen7_fill_vertex_buffer_data()
295 emit_vertex_normalized(batch, src_x + width, igt_buf_width(src)); in gen7_fill_vertex_buffer_data()
296 emit_vertex_normalized(batch, src_y + height, igt_buf_height(src)); in gen7_fill_vertex_buffer_data()
298 emit_vertex_2s(batch, dst_x, dst_y + height); in gen7_fill_vertex_buffer_data()
299 emit_vertex_normalized(batch, src_x, igt_buf_width(src)); in gen7_fill_vertex_buffer_data()
300 emit_vertex_normalized(batch, src_y + height, igt_buf_height(src)); in gen7_fill_vertex_buffer_data()
302 emit_vertex_2s(batch, dst_x, dst_y); in gen7_fill_vertex_buffer_data()
303 emit_vertex_normalized(batch, src_x, igt_buf_width(src)); in gen7_fill_vertex_buffer_data()
304 emit_vertex_normalized(batch, src_y, igt_buf_height(src)); in gen7_fill_vertex_buffer_data()
306 offset = intel_batchbuffer_subdata_offset(batch, start); in gen7_fill_vertex_buffer_data()
313 * gen6_emit_vertex_elements - The vertex elements describe the contents of the
322 gen6_emit_vertex_elements(struct intel_batchbuffer *batch) { in gen6_emit_vertex_elements() argument
325 * dword 0-3: pad (0, 0, 0. 0) in gen6_emit_vertex_elements()
326 * dword 4-7: position (x, y, 0, 1.0), in gen6_emit_vertex_elements()
327 * dword 8-11: texture coordinate 0 (u0, v0, 0, 1.0) in gen6_emit_vertex_elements()
329 OUT_BATCH(GEN4_3DSTATE_VERTEX_ELEMENTS | (3 * 2 + 1 - 2)); in gen6_emit_vertex_elements()
342 /* Element state 1 - Our "destination" vertices. These are passed down in gen6_emit_vertex_elements()
372 * @batch
373 * @offset - bytw offset within the @batch where the vertex buffer starts.
375 static void gen8_emit_vertex_buffer(struct intel_batchbuffer *batch, in gen8_emit_vertex_buffer() argument
377 OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | (1 + (4 * 1) - 2)); in gen8_emit_vertex_buffer()
381 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, offset); in gen8_emit_vertex_buffer()
386 gen6_create_cc_state(struct intel_batchbuffer *batch, in gen6_create_cc_state() argument
392 cc_state = intel_batchbuffer_subdata_alloc(batch, in gen6_create_cc_state()
394 offset = intel_batchbuffer_subdata_offset(batch, cc_state); in gen6_create_cc_state()
402 gen8_create_blend_state(struct intel_batchbuffer *batch, in gen8_create_blend_state() argument
409 blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64); in gen8_create_blend_state()
410 offset = intel_batchbuffer_subdata_offset(batch, blend); in gen8_create_blend_state()
415 blend->bs[i].dest_blend_factor = GEN6_BLENDFACTOR_ZERO; in gen8_create_blend_state()
416 blend->bs[i].source_blend_factor = GEN6_BLENDFACTOR_ONE; in gen8_create_blend_state()
417 blend->bs[i].color_blend_func = GEN6_BLENDFUNCTION_ADD; in gen8_create_blend_state()
418 blend->bs[i].pre_blend_color_clamp = 1; in gen8_create_blend_state()
419 blend->bs[i].color_buffer_blend = 0; in gen8_create_blend_state()
426 gen6_create_cc_viewport(struct intel_batchbuffer *batch, in gen6_create_cc_viewport() argument
432 vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32); in gen6_create_cc_viewport()
433 offset = intel_batchbuffer_subdata_offset(batch, vp); in gen6_create_cc_viewport()
438 vp->min_depth = -1.e35; in gen6_create_cc_viewport()
439 vp->max_depth = 1.e35; in gen6_create_cc_viewport()
445 gen7_create_sf_clip_viewport(struct intel_batchbuffer *batch, in gen7_create_sf_clip_viewport() argument
452 scv_state = intel_batchbuffer_subdata_alloc(batch, in gen7_create_sf_clip_viewport()
454 offset = intel_batchbuffer_subdata_offset(batch, scv_state); in gen7_create_sf_clip_viewport()
458 scv_state->guardband.xmin = 0; in gen7_create_sf_clip_viewport()
459 scv_state->guardband.xmax = 1.0f; in gen7_create_sf_clip_viewport()
460 scv_state->guardband.ymin = 0; in gen7_create_sf_clip_viewport()
461 scv_state->guardband.ymax = 1.0f; in gen7_create_sf_clip_viewport()
467 gen6_create_scissor_rect(struct intel_batchbuffer *batch, in gen6_create_scissor_rect() argument
473 scissor = intel_batchbuffer_subdata_alloc(batch, sizeof(*scissor), 64); in gen6_create_scissor_rect()
474 offset = intel_batchbuffer_subdata_offset(batch, scissor); in gen6_create_scissor_rect()
482 gen8_emit_sip(struct intel_batchbuffer *batch) { in gen8_emit_sip() argument
483 OUT_BATCH(GEN4_STATE_SIP | (3 - 2)); in gen8_emit_sip()
489 gen7_emit_push_constants(struct intel_batchbuffer *batch) { in gen7_emit_push_constants() argument
503 gen8_emit_state_base_address(struct intel_batchbuffer *batch) { in gen8_emit_state_base_address() argument
504 OUT_BATCH(GEN4_STATE_BASE_ADDRESS | (16 - 2)); in gen8_emit_state_base_address()
514 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_SAMPLER, 0, BASE_ADDRESS_MODIFY); in gen8_emit_state_base_address()
517 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_INSTRUCTION, in gen8_emit_state_base_address()
525 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); in gen8_emit_state_base_address()
538 gen7_emit_urb(struct intel_batchbuffer *batch) { in gen7_emit_urb() argument
545 OUT_BATCH(vs_entries | ((vs_size - 1) << 16) | (vs_start << 25)); in gen7_emit_urb()
555 gen8_emit_cc(struct intel_batchbuffer *batch) { in gen8_emit_cc() argument
564 gen8_emit_multisample(struct intel_batchbuffer *batch) { in gen8_emit_multisample() argument
573 gen8_emit_vs(struct intel_batchbuffer *batch) { in gen8_emit_vs() argument
580 OUT_BATCH(GEN6_3DSTATE_CONSTANT_VS | (11 - 2)); in gen8_emit_vs()
592 OUT_BATCH(GEN6_3DSTATE_VS | (9-2)); in gen8_emit_vs()
604 gen8_emit_hs(struct intel_batchbuffer *batch) { in gen8_emit_hs() argument
605 OUT_BATCH(GEN7_3DSTATE_CONSTANT_HS | (11 - 2)); in gen8_emit_hs()
617 OUT_BATCH(GEN7_3DSTATE_HS | (9-2)); in gen8_emit_hs()
635 gen8_emit_gs(struct intel_batchbuffer *batch) { in gen8_emit_gs() argument
636 OUT_BATCH(GEN6_3DSTATE_CONSTANT_GS | (11 - 2)); in gen8_emit_gs()
648 OUT_BATCH(GEN6_3DSTATE_GS | (10-2)); in gen8_emit_gs()
667 gen8_emit_ds(struct intel_batchbuffer *batch) { in gen8_emit_ds() argument
668 OUT_BATCH(GEN7_3DSTATE_CONSTANT_DS | (11 - 2)); in gen8_emit_ds()
680 OUT_BATCH(GEN7_3DSTATE_DS | (9-2)); in gen8_emit_ds()
698 gen8_emit_wm_hz_op(struct intel_batchbuffer *batch) { in gen8_emit_wm_hz_op() argument
699 OUT_BATCH(GEN8_3DSTATE_WM_HZ_OP | (5-2)); in gen8_emit_wm_hz_op()
707 gen8_emit_null_state(struct intel_batchbuffer *batch) { in gen8_emit_null_state() argument
708 gen8_emit_wm_hz_op(batch); in gen8_emit_null_state()
709 gen8_emit_hs(batch); in gen8_emit_null_state()
710 OUT_BATCH(GEN7_3DSTATE_TE | (4-2)); in gen8_emit_null_state()
714 gen8_emit_gs(batch); in gen8_emit_null_state()
715 gen8_emit_ds(batch); in gen8_emit_null_state()
716 gen8_emit_vs(batch); in gen8_emit_null_state()
720 gen7_emit_clip(struct intel_batchbuffer *batch) { in gen7_emit_clip() argument
721 OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2)); in gen7_emit_clip()
723 OUT_BATCH(0); /* pass-through */ in gen7_emit_clip()
728 gen8_emit_sf(struct intel_batchbuffer *batch) in gen8_emit_sf() argument
732 OUT_BATCH(GEN7_3DSTATE_SBE | (4 - 2)); in gen8_emit_sf()
741 OUT_BATCH(GEN8_3DSTATE_SBE_SWIZ | (11 - 2)); in gen8_emit_sf()
747 OUT_BATCH(GEN8_3DSTATE_RASTER | (5 - 2)); in gen8_emit_sf()
753 OUT_BATCH(GEN6_3DSTATE_SF | (4 - 2)); in gen8_emit_sf()
760 gen8_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel) { in gen8_emit_ps() argument
763 OUT_BATCH(GEN6_3DSTATE_WM | (2 - 2)); in gen8_emit_ps()
769 OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (11-2)); in gen8_emit_ps()
781 OUT_BATCH(GEN7_3DSTATE_PS | (12-2)); in gen8_emit_ps()
788 OUT_BATCH((max_threads - 1) << GEN8_3DSTATE_PS_MAX_THREADS_SHIFT | in gen8_emit_ps()
796 OUT_BATCH(GEN8_3DSTATE_PS_BLEND | (2 - 2)); in gen8_emit_ps()
799 OUT_BATCH(GEN8_3DSTATE_PS_EXTRA | (2 - 2)); in gen8_emit_ps()
804 gen8_emit_depth(struct intel_batchbuffer *batch) { in gen8_emit_depth() argument
805 OUT_BATCH(GEN8_3DSTATE_WM_DEPTH_STENCIL | (3 - 2)); in gen8_emit_depth()
809 OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (8-2)); in gen8_emit_depth()
818 OUT_BATCH(GEN8_3DSTATE_HIER_DEPTH_BUFFER | (5 - 2)); in gen8_emit_depth()
824 OUT_BATCH(GEN8_3DSTATE_STENCIL_BUFFER | (5 - 2)); in gen8_emit_depth()
832 gen7_emit_clear(struct intel_batchbuffer *batch) { in gen7_emit_clear() argument
833 OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3-2)); in gen7_emit_clear()
839 gen6_emit_drawing_rectangle(struct intel_batchbuffer *batch, const struct igt_buf *dst) in gen6_emit_drawing_rectangle() argument
841 OUT_BATCH(GEN4_3DSTATE_DRAWING_RECTANGLE | (4 - 2)); in gen6_emit_drawing_rectangle()
843 OUT_BATCH((igt_buf_height(dst) - 1) << 16 | (igt_buf_width(dst) - 1)); in gen6_emit_drawing_rectangle()
847 static void gen8_emit_vf_topology(struct intel_batchbuffer *batch) in gen8_emit_vf_topology() argument
854 static void gen8_emit_primitive(struct intel_batchbuffer *batch, uint32_t offset) in gen8_emit_primitive() argument
856 OUT_BATCH(GEN8_3DSTATE_VF_INSTANCING | (3 - 2)); in gen8_emit_primitive()
860 OUT_BATCH(GEN4_3DPRIMITIVE | (7-2)); in gen8_emit_primitive()
877 * +---------------+ <---- 4096
883 * |_______|_______| <---- 2048 + ?
886 * | batch |
890 * +---------------+ <---- 0 + ?
892 * The batch commands point to state within tthe batch, so all state offsets should be
894 * in that order. This means too many batch commands can delete state if not
901 void gen8_render_copyfunc(struct intel_batchbuffer *batch, in gen8_render_copyfunc() argument
913 igt_assert(src->bpp == dst->bpp); in gen8_render_copyfunc()
914 intel_batchbuffer_flush_with_context(batch, context); in gen8_render_copyfunc()
916 intel_batchbuffer_align(batch, 8); in gen8_render_copyfunc()
918 batch->ptr = &batch->buffer[BATCH_STATE_SPLIT]; in gen8_render_copyfunc()
922 ps_binding_table = gen8_bind_surfaces(batch, &aub_annotations, in gen8_render_copyfunc()
924 ps_sampler_state = gen8_create_sampler(batch, &aub_annotations); in gen8_render_copyfunc()
925 ps_kernel_off = gen8_fill_ps(batch, &aub_annotations, in gen8_render_copyfunc()
927 vertex_buffer = gen7_fill_vertex_buffer_data(batch, &aub_annotations, in gen8_render_copyfunc()
932 cc.cc_state = gen6_create_cc_state(batch, &aub_annotations); in gen8_render_copyfunc()
933 cc.blend_state = gen8_create_blend_state(batch, &aub_annotations); in gen8_render_copyfunc()
934 viewport.cc_state = gen6_create_cc_viewport(batch, &aub_annotations); in gen8_render_copyfunc()
935 viewport.sf_clip_state = gen7_create_sf_clip_viewport(batch, &aub_annotations); in gen8_render_copyfunc()
936 scissor_state = gen6_create_scissor_rect(batch, &aub_annotations); in gen8_render_copyfunc()
939 igt_assert(batch->ptr < &batch->buffer[4095]); in gen8_render_copyfunc()
941 batch->ptr = batch->buffer; in gen8_render_copyfunc()
947 gen8_emit_sip(batch); in gen8_render_copyfunc()
949 gen7_emit_push_constants(batch); in gen8_render_copyfunc()
951 gen8_emit_state_base_address(batch); in gen8_render_copyfunc()
958 gen7_emit_urb(batch); in gen8_render_copyfunc()
960 gen8_emit_cc(batch); in gen8_render_copyfunc()
962 gen8_emit_multisample(batch); in gen8_render_copyfunc()
964 gen8_emit_null_state(batch); in gen8_render_copyfunc()
966 OUT_BATCH(GEN7_3DSTATE_STREAMOUT | (5-2)); in gen8_render_copyfunc()
972 gen7_emit_clip(batch); in gen8_render_copyfunc()
974 gen8_emit_sf(batch); in gen8_render_copyfunc()
982 gen8_emit_ps(batch, ps_kernel_off); in gen8_render_copyfunc()
987 gen8_emit_depth(batch); in gen8_render_copyfunc()
989 gen7_emit_clear(batch); in gen8_render_copyfunc()
991 gen6_emit_drawing_rectangle(batch, dst); in gen8_render_copyfunc()
993 gen8_emit_vertex_buffer(batch, vertex_buffer); in gen8_render_copyfunc()
994 gen6_emit_vertex_elements(batch); in gen8_render_copyfunc()
996 gen8_emit_vf_topology(batch); in gen8_render_copyfunc()
997 gen8_emit_primitive(batch, vertex_buffer); in gen8_render_copyfunc()
1001 batch_end = intel_batchbuffer_align(batch, 8); in gen8_render_copyfunc()
1005 dump_batch(batch); in gen8_render_copyfunc()
1007 annotation_flush(&aub_annotations, batch); in gen8_render_copyfunc()
1009 gen6_render_flush(batch, context, batch_end); in gen8_render_copyfunc()
1010 intel_batchbuffer_reset(batch); in gen8_render_copyfunc()