• Home
  • Raw
  • Download

Lines Matching +full:- +full:- +full:batch

16 #define GEN4_GRF_BLOCKS(nreg) (((nreg) + 15) / 16 - 1)
83 batch_used(struct intel_batchbuffer *batch) in batch_used() argument
85 return batch->ptr - batch->buffer; in batch_used()
89 batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor) in batch_round_upto() argument
91 uint32_t offset = batch_used(batch); in batch_round_upto()
93 offset = (offset + divisor - 1) / divisor * divisor; in batch_round_upto()
94 batch->ptr = batch->buffer + offset; in batch_round_upto()
124 gen4_render_flush(struct intel_batchbuffer *batch, in gen4_render_flush() argument
129 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen4_render_flush()
131 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen4_render_flush()
137 gen4_bind_buf(struct intel_batchbuffer *batch, in gen4_bind_buf() argument
145 igt_assert_lte(buf->stride, 128*1024); in gen4_bind_buf()
156 ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32); in gen4_bind_buf()
158 ss->ss0.surface_type = SURFACE_2D; in gen4_bind_buf()
159 switch (buf->bpp) { in gen4_bind_buf()
160 case 8: ss->ss0.surface_format = SURFACEFORMAT_R8_UNORM; break; in gen4_bind_buf()
161 case 16: ss->ss0.surface_format = SURFACEFORMAT_R8G8_UNORM; break; in gen4_bind_buf()
162 case 32: ss->ss0.surface_format = SURFACEFORMAT_B8G8R8A8_UNORM; break; in gen4_bind_buf()
163 case 64: ss->ss0.surface_format = SURFACEFORMAT_R16G16B16A16_FLOAT; break; in gen4_bind_buf()
167 ss->ss0.data_return_format = SURFACERETURNFORMAT_FLOAT32; in gen4_bind_buf()
168 ss->ss0.color_blend = 1; in gen4_bind_buf()
169 ss->ss1.base_addr = buf->bo->offset; in gen4_bind_buf()
171 ret = drm_intel_bo_emit_reloc(batch->bo, in gen4_bind_buf()
172 intel_batchbuffer_subdata_offset(batch, ss) + 4, in gen4_bind_buf()
173 buf->bo, 0, in gen4_bind_buf()
177 ss->ss2.height = igt_buf_height(buf) - 1; in gen4_bind_buf()
178 ss->ss2.width = igt_buf_width(buf) - 1; in gen4_bind_buf()
179 ss->ss3.pitch = buf->stride - 1; in gen4_bind_buf()
180 ss->ss3.tiled_surface = buf->tiling != I915_TILING_NONE; in gen4_bind_buf()
181 ss->ss3.tile_walk = buf->tiling == I915_TILING_Y; in gen4_bind_buf()
183 return intel_batchbuffer_subdata_offset(batch, ss); in gen4_bind_buf()
187 gen4_bind_surfaces(struct intel_batchbuffer *batch, in gen4_bind_surfaces() argument
193 binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 32); in gen4_bind_surfaces()
195 binding_table[0] = gen4_bind_buf(batch, dst, 1); in gen4_bind_surfaces()
196 binding_table[1] = gen4_bind_buf(batch, src, 0); in gen4_bind_surfaces()
198 return intel_batchbuffer_subdata_offset(batch, binding_table); in gen4_bind_surfaces()
202 gen4_emit_sip(struct intel_batchbuffer *batch) in gen4_emit_sip() argument
204 OUT_BATCH(GEN4_STATE_SIP | (2 - 2)); in gen4_emit_sip()
209 gen4_emit_state_base_address(struct intel_batchbuffer *batch) in gen4_emit_state_base_address() argument
211 if (IS_GEN5(batch->devid)) { in gen4_emit_state_base_address()
212 OUT_BATCH(GEN4_STATE_BASE_ADDRESS | (8 - 2)); in gen4_emit_state_base_address()
213 OUT_RELOC(batch->bo, /* general */ in gen4_emit_state_base_address()
216 OUT_RELOC(batch->bo, /* surface */ in gen4_emit_state_base_address()
220 OUT_RELOC(batch->bo, /* instruction */ in gen4_emit_state_base_address()
229 OUT_BATCH(GEN4_STATE_BASE_ADDRESS | (6 - 2)); in gen4_emit_state_base_address()
230 OUT_RELOC(batch->bo, /* general */ in gen4_emit_state_base_address()
233 OUT_RELOC(batch->bo, /* surface */ in gen4_emit_state_base_address()
245 gen4_emit_pipelined_pointers(struct intel_batchbuffer *batch, in gen4_emit_pipelined_pointers() argument
249 OUT_BATCH(GEN4_3DSTATE_PIPELINED_POINTERS | (7 - 2)); in gen4_emit_pipelined_pointers()
259 gen4_emit_urb(struct intel_batchbuffer *batch) in gen4_emit_urb() argument
261 int vs_entries = gen4_max_vs_nr_urb_entries(batch->devid); in gen4_emit_urb()
264 int sf_entries = gen4_max_sf_nr_urb_entries(batch->devid); in gen4_emit_urb()
273 assert(urb_cs_end <= gen4_urb_size(batch->devid)); in gen4_emit_urb()
275 intel_batchbuffer_align(batch, 16); in gen4_emit_urb()
283 (3 - 2)); in gen4_emit_urb()
290 OUT_BATCH(GEN4_CS_URB_STATE | (2 - 2)); in gen4_emit_urb()
291 OUT_BATCH((URB_CS_ENTRY_SIZE - 1) << 4 | cs_entries << 0); in gen4_emit_urb()
295 gen4_emit_null_depth_buffer(struct intel_batchbuffer *batch) in gen4_emit_null_depth_buffer() argument
297 if (IS_G4X(batch->devid) || IS_GEN5(batch->devid)) { in gen4_emit_null_depth_buffer()
298 OUT_BATCH(GEN4_3DSTATE_DEPTH_BUFFER | (6 - 2)); in gen4_emit_null_depth_buffer()
306 OUT_BATCH(GEN4_3DSTATE_DEPTH_BUFFER | (5 - 2)); in gen4_emit_null_depth_buffer()
314 if (IS_GEN5(batch->devid)) { in gen4_emit_null_depth_buffer()
315 OUT_BATCH(GEN4_3DSTATE_CLEAR_PARAMS | (2 - 2)); in gen4_emit_null_depth_buffer()
321 gen4_emit_invariant(struct intel_batchbuffer *batch) in gen4_emit_invariant() argument
325 if (IS_GEN5(batch->devid) || IS_G4X(batch->devid)) in gen4_emit_invariant()
332 gen4_create_vs_state(struct intel_batchbuffer *batch) in gen4_create_vs_state() argument
337 vs = intel_batchbuffer_subdata_alloc(batch, sizeof(*vs), 32); in gen4_create_vs_state()
340 nr_urb_entries = gen4_max_vs_nr_urb_entries(batch->devid); in gen4_create_vs_state()
341 if (IS_GEN5(batch->devid)) in gen4_create_vs_state()
343 vs->vs4.nr_urb_entries = nr_urb_entries; in gen4_create_vs_state()
344 vs->vs4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1; in gen4_create_vs_state()
345 vs->vs6.vs_enable = 0; in gen4_create_vs_state()
346 vs->vs6.vert_cache_disable = 1; in gen4_create_vs_state()
348 return intel_batchbuffer_subdata_offset(batch, vs); in gen4_create_vs_state()
352 gen4_create_sf_state(struct intel_batchbuffer *batch, in gen4_create_sf_state() argument
357 sf = intel_batchbuffer_subdata_alloc(batch, sizeof(*sf), 32); in gen4_create_sf_state()
359 sf->sf0.grf_reg_count = GEN4_GRF_BLOCKS(SF_KERNEL_NUM_GRF); in gen4_create_sf_state()
360 sf->sf0.kernel_start_pointer = kernel >> 6; in gen4_create_sf_state()
362 sf->sf3.urb_entry_read_length = 1; /* 1 URB per vertex */ in gen4_create_sf_state()
364 sf->sf3.urb_entry_read_offset = 1; in gen4_create_sf_state()
365 sf->sf3.dispatch_grf_start_reg = 3; in gen4_create_sf_state()
367 sf->sf4.max_threads = gen4_max_sf_threads(batch->devid) - 1; in gen4_create_sf_state()
368 sf->sf4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1; in gen4_create_sf_state()
369 sf->sf4.nr_urb_entries = gen4_max_sf_nr_urb_entries(batch->devid); in gen4_create_sf_state()
371 sf->sf6.cull_mode = GEN4_CULLMODE_NONE; in gen4_create_sf_state()
372 sf->sf6.dest_org_vbias = 0x8; in gen4_create_sf_state()
373 sf->sf6.dest_org_hbias = 0x8; in gen4_create_sf_state()
375 return intel_batchbuffer_subdata_offset(batch, sf); in gen4_create_sf_state()
379 gen4_create_wm_state(struct intel_batchbuffer *batch, in gen4_create_wm_state() argument
385 wm = intel_batchbuffer_subdata_alloc(batch, sizeof(*wm), 32); in gen4_create_wm_state()
388 wm->wm0.kernel_start_pointer = kernel >> 6; in gen4_create_wm_state()
389 wm->wm0.grf_reg_count = GEN4_GRF_BLOCKS(PS_KERNEL_NUM_GRF); in gen4_create_wm_state()
391 wm->wm3.urb_entry_read_offset = 0; in gen4_create_wm_state()
392 wm->wm3.dispatch_grf_start_reg = 3; in gen4_create_wm_state()
395 wm->wm4.sampler_state_pointer = sampler >> 5; in gen4_create_wm_state()
396 wm->wm4.sampler_count = 1; in gen4_create_wm_state()
398 wm->wm5.max_threads = gen4_max_wm_threads(batch->devid); in gen4_create_wm_state()
399 wm->wm5.thread_dispatch_enable = 1; in gen4_create_wm_state()
400 wm->wm5.enable_16_pix = 1; in gen4_create_wm_state()
401 wm->wm5.early_depth_test = 1; in gen4_create_wm_state()
403 if (IS_GEN5(batch->devid)) in gen4_create_wm_state()
404 wm->wm1.binding_table_entry_count = 0; in gen4_create_wm_state()
406 wm->wm1.binding_table_entry_count = 2; in gen4_create_wm_state()
407 wm->wm3.urb_entry_read_length = 2; in gen4_create_wm_state()
409 return intel_batchbuffer_subdata_offset(batch, wm); in gen4_create_wm_state()
413 gen4_emit_binding_table(struct intel_batchbuffer *batch, in gen4_emit_binding_table() argument
416 OUT_BATCH(GEN4_3DSTATE_BINDING_TABLE_POINTERS | (6 - 2)); in gen4_emit_binding_table()
425 gen4_emit_drawing_rectangle(struct intel_batchbuffer *batch, in gen4_emit_drawing_rectangle() argument
428 OUT_BATCH(GEN4_3DSTATE_DRAWING_RECTANGLE | (4 - 2)); in gen4_emit_drawing_rectangle()
430 OUT_BATCH((igt_buf_height(dst) - 1) << 16 | in gen4_emit_drawing_rectangle()
431 (igt_buf_width(dst) - 1)); in gen4_emit_drawing_rectangle()
436 gen4_emit_vertex_elements(struct intel_batchbuffer *batch) in gen4_emit_vertex_elements() argument
439 if (IS_GEN5(batch->devid)) { in gen4_emit_vertex_elements()
441 * dword 0-3: pad (0.0, 0.0, 0.0, 0.0), in gen4_emit_vertex_elements()
442 * dword 4-7: position (x, y, 1.0, 1.0), in gen4_emit_vertex_elements()
443 * dword 8-11: texture coordinate 0 (u0, v0, 0, 0) in gen4_emit_vertex_elements()
445 * dword 4-11 are fetched from vertex buffer in gen4_emit_vertex_elements()
447 OUT_BATCH(GEN4_3DSTATE_VERTEX_ELEMENTS | (3 * 2 + 1 - 2)); in gen4_emit_vertex_elements()
477 * dword 0-3: position (x, y, 1.0, 1.0), in gen4_emit_vertex_elements()
478 * dword 4-7: texture coordinate 0 (u0, v0, 0, 0) in gen4_emit_vertex_elements()
480 * dword 0-7 are fetched from vertex buffer in gen4_emit_vertex_elements()
482 OUT_BATCH(GEN4_3DSTATE_VERTEX_ELEMENTS | (2 * 2 + 1 - 2)); in gen4_emit_vertex_elements()
507 gen4_create_cc_viewport(struct intel_batchbuffer *batch) in gen4_create_cc_viewport() argument
511 vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32); in gen4_create_cc_viewport()
513 vp->min_depth = -1.e35; in gen4_create_cc_viewport()
514 vp->max_depth = 1.e35; in gen4_create_cc_viewport()
516 return intel_batchbuffer_subdata_offset(batch, vp); in gen4_create_cc_viewport()
520 gen4_create_cc_state(struct intel_batchbuffer *batch, in gen4_create_cc_state() argument
525 cc = intel_batchbuffer_subdata_alloc(batch, sizeof(*cc), 64); in gen4_create_cc_state()
527 cc->cc4.cc_viewport_state_offset = cc_vp; in gen4_create_cc_state()
529 return intel_batchbuffer_subdata_offset(batch, cc); in gen4_create_cc_state()
533 gen4_create_sf_kernel(struct intel_batchbuffer *batch) in gen4_create_sf_kernel() argument
535 if (IS_GEN5(batch->devid)) in gen4_create_sf_kernel()
536 return intel_batchbuffer_copy_data(batch, gen5_sf_kernel_nomask, in gen4_create_sf_kernel()
540 return intel_batchbuffer_copy_data(batch, gen4_sf_kernel_nomask, in gen4_create_sf_kernel()
546 gen4_create_ps_kernel(struct intel_batchbuffer *batch) in gen4_create_ps_kernel() argument
548 if (IS_GEN5(batch->devid)) in gen4_create_ps_kernel()
549 return intel_batchbuffer_copy_data(batch, gen5_ps_kernel_nomask_affine, in gen4_create_ps_kernel()
553 return intel_batchbuffer_copy_data(batch, gen4_ps_kernel_nomask_affine, in gen4_create_ps_kernel()
559 gen4_create_sampler(struct intel_batchbuffer *batch, in gen4_create_sampler() argument
565 ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32); in gen4_create_sampler()
567 ss->ss0.lod_preclamp = GEN4_LOD_PRECLAMP_OGL; in gen4_create_sampler()
572 ss->ss0.border_color_mode = GEN4_BORDER_COLOR_MODE_LEGACY; in gen4_create_sampler()
577 ss->ss0.min_filter = GEN4_MAPFILTER_NEAREST; in gen4_create_sampler()
578 ss->ss0.mag_filter = GEN4_MAPFILTER_NEAREST; in gen4_create_sampler()
581 ss->ss0.min_filter = GEN4_MAPFILTER_LINEAR; in gen4_create_sampler()
582 ss->ss0.mag_filter = GEN4_MAPFILTER_LINEAR; in gen4_create_sampler()
589 ss->ss1.r_wrap_mode = GEN4_TEXCOORDMODE_CLAMP_BORDER; in gen4_create_sampler()
590 ss->ss1.s_wrap_mode = GEN4_TEXCOORDMODE_CLAMP_BORDER; in gen4_create_sampler()
591 ss->ss1.t_wrap_mode = GEN4_TEXCOORDMODE_CLAMP_BORDER; in gen4_create_sampler()
594 ss->ss1.r_wrap_mode = GEN4_TEXCOORDMODE_WRAP; in gen4_create_sampler()
595 ss->ss1.s_wrap_mode = GEN4_TEXCOORDMODE_WRAP; in gen4_create_sampler()
596 ss->ss1.t_wrap_mode = GEN4_TEXCOORDMODE_WRAP; in gen4_create_sampler()
599 ss->ss1.r_wrap_mode = GEN4_TEXCOORDMODE_CLAMP; in gen4_create_sampler()
600 ss->ss1.s_wrap_mode = GEN4_TEXCOORDMODE_CLAMP; in gen4_create_sampler()
601 ss->ss1.t_wrap_mode = GEN4_TEXCOORDMODE_CLAMP; in gen4_create_sampler()
604 ss->ss1.r_wrap_mode = GEN4_TEXCOORDMODE_MIRROR; in gen4_create_sampler()
605 ss->ss1.s_wrap_mode = GEN4_TEXCOORDMODE_MIRROR; in gen4_create_sampler()
606 ss->ss1.t_wrap_mode = GEN4_TEXCOORDMODE_MIRROR; in gen4_create_sampler()
610 return intel_batchbuffer_subdata_offset(batch, ss); in gen4_create_sampler()
613 static void gen4_emit_vertex_buffer(struct intel_batchbuffer *batch) in gen4_emit_vertex_buffer() argument
615 OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | (5 - 2)); in gen4_emit_vertex_buffer()
619 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, 0); in gen4_emit_vertex_buffer()
620 if (IS_GEN5(batch->devid)) in gen4_emit_vertex_buffer()
621 OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, in gen4_emit_vertex_buffer()
622 batch->bo->size - 1); in gen4_emit_vertex_buffer()
624 OUT_BATCH(batch->bo->size / VERTEX_SIZE - 1); in gen4_emit_vertex_buffer()
628 static uint32_t gen4_emit_primitive(struct intel_batchbuffer *batch) in gen4_emit_primitive() argument
636 (6 - 2)); in gen4_emit_primitive()
638 offset = batch_used(batch); in gen4_emit_primitive()
647 void gen4_render_copyfunc(struct intel_batchbuffer *batch, in gen4_render_copyfunc() argument
661 igt_assert(src->bpp == dst->bpp); in gen4_render_copyfunc()
662 intel_batchbuffer_flush_with_context(batch, context); in gen4_render_copyfunc()
664 batch->ptr = batch->buffer + 1024; in gen4_render_copyfunc()
665 intel_batchbuffer_subdata_alloc(batch, 64, 64); in gen4_render_copyfunc()
667 vs = gen4_create_vs_state(batch); in gen4_render_copyfunc()
669 sf_kernel = gen4_create_sf_kernel(batch); in gen4_render_copyfunc()
670 sf = gen4_create_sf_state(batch, sf_kernel); in gen4_render_copyfunc()
672 wm_table = gen4_bind_surfaces(batch, src, dst); in gen4_render_copyfunc()
673 wm_kernel = gen4_create_ps_kernel(batch); in gen4_render_copyfunc()
674 wm_sampler = gen4_create_sampler(batch, in gen4_render_copyfunc()
677 wm = gen4_create_wm_state(batch, wm_kernel, wm_sampler); in gen4_render_copyfunc()
679 cc_vp = gen4_create_cc_viewport(batch); in gen4_render_copyfunc()
680 cc = gen4_create_cc_state(batch, cc_vp); in gen4_render_copyfunc()
682 batch->ptr = batch->buffer; in gen4_render_copyfunc()
684 gen4_emit_invariant(batch); in gen4_render_copyfunc()
685 gen4_emit_state_base_address(batch); in gen4_render_copyfunc()
686 gen4_emit_sip(batch); in gen4_render_copyfunc()
687 gen4_emit_null_depth_buffer(batch); in gen4_render_copyfunc()
689 gen4_emit_drawing_rectangle(batch, dst); in gen4_render_copyfunc()
690 gen4_emit_binding_table(batch, wm_table); in gen4_render_copyfunc()
691 gen4_emit_vertex_elements(batch); in gen4_render_copyfunc()
692 gen4_emit_pipelined_pointers(batch, vs, sf, wm, cc); in gen4_render_copyfunc()
693 gen4_emit_urb(batch); in gen4_render_copyfunc()
695 gen4_emit_vertex_buffer(batch); in gen4_render_copyfunc()
696 offset = gen4_emit_primitive(batch); in gen4_render_copyfunc()
699 batch_end = intel_batchbuffer_align(batch, 8); in gen4_render_copyfunc()
701 *(uint32_t *)(batch->buffer + offset) = in gen4_render_copyfunc()
702 batch_round_upto(batch, VERTEX_SIZE)/VERTEX_SIZE; in gen4_render_copyfunc()
704 emit_vertex_2s(batch, dst_x + width, dst_y + height); in gen4_render_copyfunc()
705 emit_vertex_normalized(batch, src_x + width, igt_buf_width(src)); in gen4_render_copyfunc()
706 emit_vertex_normalized(batch, src_y + height, igt_buf_height(src)); in gen4_render_copyfunc()
708 emit_vertex_2s(batch, dst_x, dst_y + height); in gen4_render_copyfunc()
709 emit_vertex_normalized(batch, src_x, igt_buf_width(src)); in gen4_render_copyfunc()
710 emit_vertex_normalized(batch, src_y + height, igt_buf_height(src)); in gen4_render_copyfunc()
712 emit_vertex_2s(batch, dst_x, dst_y); in gen4_render_copyfunc()
713 emit_vertex_normalized(batch, src_x, igt_buf_width(src)); in gen4_render_copyfunc()
714 emit_vertex_normalized(batch, src_y, igt_buf_height(src)); in gen4_render_copyfunc()
716 gen4_render_flush(batch, context, batch_end); in gen4_render_copyfunc()
717 intel_batchbuffer_reset(batch); in gen4_render_copyfunc()