1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 static bool
lookup_blorp_shader(struct blorp_context * blorp,const void * key,uint32_t key_size,uint32_t * kernel_out,void * prog_data_out)27 lookup_blorp_shader(struct blorp_context *blorp,
28 const void *key, uint32_t key_size,
29 uint32_t *kernel_out, void *prog_data_out)
30 {
31 struct anv_device *device = blorp->driver_ctx;
32
33 /* The blorp cache must be a real cache */
34 assert(device->blorp_shader_cache.cache);
35
36 struct anv_shader_bin *bin =
37 anv_pipeline_cache_search(&device->blorp_shader_cache, key, key_size);
38 if (!bin)
39 return false;
40
41 /* The cache already has a reference and it's not going anywhere so there
42 * is no need to hold a second reference.
43 */
44 anv_shader_bin_unref(device, bin);
45
46 *kernel_out = bin->kernel.offset;
47 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
48
49 return true;
50 }
51
52 static bool
upload_blorp_shader(struct blorp_context * blorp,const void * key,uint32_t key_size,const void * kernel,uint32_t kernel_size,const struct brw_stage_prog_data * prog_data,uint32_t prog_data_size,uint32_t * kernel_out,void * prog_data_out)53 upload_blorp_shader(struct blorp_context *blorp,
54 const void *key, uint32_t key_size,
55 const void *kernel, uint32_t kernel_size,
56 const struct brw_stage_prog_data *prog_data,
57 uint32_t prog_data_size,
58 uint32_t *kernel_out, void *prog_data_out)
59 {
60 struct anv_device *device = blorp->driver_ctx;
61
62 /* The blorp cache must be a real cache */
63 assert(device->blorp_shader_cache.cache);
64
65 struct anv_pipeline_bind_map bind_map = {
66 .surface_count = 0,
67 .sampler_count = 0,
68 };
69
70 struct anv_shader_bin *bin =
71 anv_pipeline_cache_upload_kernel(&device->blorp_shader_cache,
72 key, key_size, kernel, kernel_size,
73 prog_data, prog_data_size, &bind_map);
74
75 if (!bin)
76 return false;
77
78 /* The cache already has a reference and it's not going anywhere so there
79 * is no need to hold a second reference.
80 */
81 anv_shader_bin_unref(device, bin);
82
83 *kernel_out = bin->kernel.offset;
84 *(const struct brw_stage_prog_data **)prog_data_out = bin->prog_data;
85
86 return true;
87 }
88
89 void
anv_device_init_blorp(struct anv_device * device)90 anv_device_init_blorp(struct anv_device *device)
91 {
92 anv_pipeline_cache_init(&device->blorp_shader_cache, device, true);
93 blorp_init(&device->blorp, device, &device->isl_dev);
94 device->blorp.compiler = device->instance->physicalDevice.compiler;
95 device->blorp.lookup_shader = lookup_blorp_shader;
96 device->blorp.upload_shader = upload_blorp_shader;
97 switch (device->info.gen) {
98 case 7:
99 if (device->info.is_haswell) {
100 device->blorp.exec = gen75_blorp_exec;
101 } else {
102 device->blorp.exec = gen7_blorp_exec;
103 }
104 break;
105 case 8:
106 device->blorp.exec = gen8_blorp_exec;
107 break;
108 case 9:
109 device->blorp.exec = gen9_blorp_exec;
110 break;
111 case 10:
112 device->blorp.exec = gen10_blorp_exec;
113 break;
114 default:
115 unreachable("Unknown hardware generation");
116 }
117 }
118
119 void
anv_device_finish_blorp(struct anv_device * device)120 anv_device_finish_blorp(struct anv_device *device)
121 {
122 blorp_finish(&device->blorp);
123 anv_pipeline_cache_finish(&device->blorp_shader_cache);
124 }
125
126 static void
get_blorp_surf_for_anv_buffer(struct anv_device * device,struct anv_buffer * buffer,uint64_t offset,uint32_t width,uint32_t height,uint32_t row_pitch,enum isl_format format,struct blorp_surf * blorp_surf,struct isl_surf * isl_surf)127 get_blorp_surf_for_anv_buffer(struct anv_device *device,
128 struct anv_buffer *buffer, uint64_t offset,
129 uint32_t width, uint32_t height,
130 uint32_t row_pitch, enum isl_format format,
131 struct blorp_surf *blorp_surf,
132 struct isl_surf *isl_surf)
133 {
134 const struct isl_format_layout *fmtl =
135 isl_format_get_layout(format);
136 bool ok UNUSED;
137
138 /* ASTC is the only format which doesn't support linear layouts.
139 * Create an equivalently sized surface with ISL to get around this.
140 */
141 if (fmtl->txc == ISL_TXC_ASTC) {
142 /* Use an equivalently sized format */
143 format = ISL_FORMAT_R32G32B32A32_UINT;
144 assert(fmtl->bpb == isl_format_get_layout(format)->bpb);
145
146 /* Shrink the dimensions for the new format */
147 width = DIV_ROUND_UP(width, fmtl->bw);
148 height = DIV_ROUND_UP(height, fmtl->bh);
149 }
150
151 *blorp_surf = (struct blorp_surf) {
152 .surf = isl_surf,
153 .addr = {
154 .buffer = buffer->bo,
155 .offset = buffer->offset + offset,
156 .mocs = device->default_mocs,
157 },
158 };
159
160 ok = isl_surf_init(&device->isl_dev, isl_surf,
161 .dim = ISL_SURF_DIM_2D,
162 .format = format,
163 .width = width,
164 .height = height,
165 .depth = 1,
166 .levels = 1,
167 .array_len = 1,
168 .samples = 1,
169 .row_pitch = row_pitch,
170 .usage = ISL_SURF_USAGE_TEXTURE_BIT |
171 ISL_SURF_USAGE_RENDER_TARGET_BIT,
172 .tiling_flags = ISL_TILING_LINEAR_BIT);
173 assert(ok);
174 }
175
176 #define ANV_AUX_USAGE_DEFAULT ((enum isl_aux_usage)0xff)
177
178 static struct blorp_address
anv_to_blorp_address(struct anv_address addr)179 anv_to_blorp_address(struct anv_address addr)
180 {
181 return (struct blorp_address) {
182 .buffer = addr.bo,
183 .offset = addr.offset,
184 };
185 }
186
187 static void
get_blorp_surf_for_anv_image(const struct anv_device * device,const struct anv_image * image,VkImageAspectFlags aspect,enum isl_aux_usage aux_usage,struct blorp_surf * blorp_surf)188 get_blorp_surf_for_anv_image(const struct anv_device *device,
189 const struct anv_image *image,
190 VkImageAspectFlags aspect,
191 enum isl_aux_usage aux_usage,
192 struct blorp_surf *blorp_surf)
193 {
194 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
195
196 if (aux_usage == ANV_AUX_USAGE_DEFAULT)
197 aux_usage = image->planes[plane].aux_usage;
198
199 if (aspect == VK_IMAGE_ASPECT_STENCIL_BIT ||
200 aux_usage == ISL_AUX_USAGE_HIZ)
201 aux_usage = ISL_AUX_USAGE_NONE;
202
203 const struct anv_surface *surface = &image->planes[plane].surface;
204 *blorp_surf = (struct blorp_surf) {
205 .surf = &surface->isl,
206 .addr = {
207 .buffer = image->planes[plane].bo,
208 .offset = image->planes[plane].bo_offset + surface->offset,
209 .mocs = device->default_mocs,
210 },
211 };
212
213 if (aux_usage != ISL_AUX_USAGE_NONE) {
214 const struct anv_surface *aux_surface = &image->planes[plane].aux_surface;
215 blorp_surf->aux_surf = &aux_surface->isl,
216 blorp_surf->aux_addr = (struct blorp_address) {
217 .buffer = image->planes[plane].bo,
218 .offset = image->planes[plane].bo_offset + aux_surface->offset,
219 .mocs = device->default_mocs,
220 };
221 blorp_surf->aux_usage = aux_usage;
222 }
223 }
224
anv_CmdCopyImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageCopy * pRegions)225 void anv_CmdCopyImage(
226 VkCommandBuffer commandBuffer,
227 VkImage srcImage,
228 VkImageLayout srcImageLayout,
229 VkImage dstImage,
230 VkImageLayout dstImageLayout,
231 uint32_t regionCount,
232 const VkImageCopy* pRegions)
233 {
234 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
235 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
236 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
237
238 struct blorp_batch batch;
239 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
240
241 for (unsigned r = 0; r < regionCount; r++) {
242 VkOffset3D srcOffset =
243 anv_sanitize_image_offset(src_image->type, pRegions[r].srcOffset);
244 VkOffset3D dstOffset =
245 anv_sanitize_image_offset(dst_image->type, pRegions[r].dstOffset);
246 VkExtent3D extent =
247 anv_sanitize_image_extent(src_image->type, pRegions[r].extent);
248
249 unsigned dst_base_layer, layer_count;
250 if (dst_image->type == VK_IMAGE_TYPE_3D) {
251 dst_base_layer = pRegions[r].dstOffset.z;
252 layer_count = pRegions[r].extent.depth;
253 } else {
254 dst_base_layer = pRegions[r].dstSubresource.baseArrayLayer;
255 layer_count =
256 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
257 }
258
259 unsigned src_base_layer;
260 if (src_image->type == VK_IMAGE_TYPE_3D) {
261 src_base_layer = pRegions[r].srcOffset.z;
262 } else {
263 src_base_layer = pRegions[r].srcSubresource.baseArrayLayer;
264 assert(layer_count ==
265 anv_get_layerCount(src_image, &pRegions[r].srcSubresource));
266 }
267
268 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
269 dst_mask = pRegions[r].dstSubresource.aspectMask;
270
271 assert(anv_image_aspects_compatible(src_mask, dst_mask));
272
273 if (_mesa_bitcount(src_mask) > 1) {
274 uint32_t aspect_bit;
275 anv_foreach_image_aspect_bit(aspect_bit, src_image, src_mask) {
276 struct blorp_surf src_surf, dst_surf;
277 get_blorp_surf_for_anv_image(cmd_buffer->device,
278 src_image, 1UL << aspect_bit,
279 ANV_AUX_USAGE_DEFAULT, &src_surf);
280 get_blorp_surf_for_anv_image(cmd_buffer->device,
281 dst_image, 1UL << aspect_bit,
282 ANV_AUX_USAGE_DEFAULT, &dst_surf);
283
284 for (unsigned i = 0; i < layer_count; i++) {
285 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
286 src_base_layer + i,
287 &dst_surf, pRegions[r].dstSubresource.mipLevel,
288 dst_base_layer + i,
289 srcOffset.x, srcOffset.y,
290 dstOffset.x, dstOffset.y,
291 extent.width, extent.height);
292 }
293 }
294 } else {
295 struct blorp_surf src_surf, dst_surf;
296 get_blorp_surf_for_anv_image(cmd_buffer->device, src_image, src_mask,
297 ANV_AUX_USAGE_DEFAULT, &src_surf);
298 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_image, dst_mask,
299 ANV_AUX_USAGE_DEFAULT, &dst_surf);
300
301 for (unsigned i = 0; i < layer_count; i++) {
302 blorp_copy(&batch, &src_surf, pRegions[r].srcSubresource.mipLevel,
303 src_base_layer + i,
304 &dst_surf, pRegions[r].dstSubresource.mipLevel,
305 dst_base_layer + i,
306 srcOffset.x, srcOffset.y,
307 dstOffset.x, dstOffset.y,
308 extent.width, extent.height);
309 }
310 }
311 }
312
313 blorp_batch_finish(&batch);
314 }
315
316 static void
copy_buffer_to_image(struct anv_cmd_buffer * cmd_buffer,struct anv_buffer * anv_buffer,struct anv_image * anv_image,uint32_t regionCount,const VkBufferImageCopy * pRegions,bool buffer_to_image)317 copy_buffer_to_image(struct anv_cmd_buffer *cmd_buffer,
318 struct anv_buffer *anv_buffer,
319 struct anv_image *anv_image,
320 uint32_t regionCount,
321 const VkBufferImageCopy* pRegions,
322 bool buffer_to_image)
323 {
324 struct blorp_batch batch;
325 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
326
327 struct {
328 struct blorp_surf surf;
329 uint32_t level;
330 VkOffset3D offset;
331 } image, buffer, *src, *dst;
332
333 buffer.level = 0;
334 buffer.offset = (VkOffset3D) { 0, 0, 0 };
335
336 if (buffer_to_image) {
337 src = &buffer;
338 dst = ℑ
339 } else {
340 src = ℑ
341 dst = &buffer;
342 }
343
344 for (unsigned r = 0; r < regionCount; r++) {
345 const VkImageAspectFlags aspect = pRegions[r].imageSubresource.aspectMask;
346
347 get_blorp_surf_for_anv_image(cmd_buffer->device, anv_image, aspect,
348 ANV_AUX_USAGE_DEFAULT, &image.surf);
349 image.offset =
350 anv_sanitize_image_offset(anv_image->type, pRegions[r].imageOffset);
351 image.level = pRegions[r].imageSubresource.mipLevel;
352
353 VkExtent3D extent =
354 anv_sanitize_image_extent(anv_image->type, pRegions[r].imageExtent);
355 if (anv_image->type != VK_IMAGE_TYPE_3D) {
356 image.offset.z = pRegions[r].imageSubresource.baseArrayLayer;
357 extent.depth =
358 anv_get_layerCount(anv_image, &pRegions[r].imageSubresource);
359 }
360
361 const enum isl_format buffer_format =
362 anv_get_isl_format(&cmd_buffer->device->info, anv_image->vk_format,
363 aspect, VK_IMAGE_TILING_LINEAR);
364
365 const VkExtent3D bufferImageExtent = {
366 .width = pRegions[r].bufferRowLength ?
367 pRegions[r].bufferRowLength : extent.width,
368 .height = pRegions[r].bufferImageHeight ?
369 pRegions[r].bufferImageHeight : extent.height,
370 };
371
372 const struct isl_format_layout *buffer_fmtl =
373 isl_format_get_layout(buffer_format);
374
375 const uint32_t buffer_row_pitch =
376 DIV_ROUND_UP(bufferImageExtent.width, buffer_fmtl->bw) *
377 (buffer_fmtl->bpb / 8);
378
379 const uint32_t buffer_layer_stride =
380 DIV_ROUND_UP(bufferImageExtent.height, buffer_fmtl->bh) *
381 buffer_row_pitch;
382
383 struct isl_surf buffer_isl_surf;
384 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
385 anv_buffer, pRegions[r].bufferOffset,
386 extent.width, extent.height,
387 buffer_row_pitch, buffer_format,
388 &buffer.surf, &buffer_isl_surf);
389
390 for (unsigned z = 0; z < extent.depth; z++) {
391 blorp_copy(&batch, &src->surf, src->level, src->offset.z,
392 &dst->surf, dst->level, dst->offset.z,
393 src->offset.x, src->offset.y, dst->offset.x, dst->offset.y,
394 extent.width, extent.height);
395
396 image.offset.z++;
397 buffer.surf.addr.offset += buffer_layer_stride;
398 }
399 }
400
401 blorp_batch_finish(&batch);
402 }
403
anv_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkBufferImageCopy * pRegions)404 void anv_CmdCopyBufferToImage(
405 VkCommandBuffer commandBuffer,
406 VkBuffer srcBuffer,
407 VkImage dstImage,
408 VkImageLayout dstImageLayout,
409 uint32_t regionCount,
410 const VkBufferImageCopy* pRegions)
411 {
412 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
413 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
414 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
415
416 copy_buffer_to_image(cmd_buffer, src_buffer, dst_image,
417 regionCount, pRegions, true);
418 }
419
anv_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferImageCopy * pRegions)420 void anv_CmdCopyImageToBuffer(
421 VkCommandBuffer commandBuffer,
422 VkImage srcImage,
423 VkImageLayout srcImageLayout,
424 VkBuffer dstBuffer,
425 uint32_t regionCount,
426 const VkBufferImageCopy* pRegions)
427 {
428 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
429 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
430 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
431
432 copy_buffer_to_image(cmd_buffer, dst_buffer, src_image,
433 regionCount, pRegions, false);
434 }
435
436 static bool
flip_coords(unsigned * src0,unsigned * src1,unsigned * dst0,unsigned * dst1)437 flip_coords(unsigned *src0, unsigned *src1, unsigned *dst0, unsigned *dst1)
438 {
439 bool flip = false;
440 if (*src0 > *src1) {
441 unsigned tmp = *src0;
442 *src0 = *src1;
443 *src1 = tmp;
444 flip = !flip;
445 }
446
447 if (*dst0 > *dst1) {
448 unsigned tmp = *dst0;
449 *dst0 = *dst1;
450 *dst1 = tmp;
451 flip = !flip;
452 }
453
454 return flip;
455 }
456
anv_CmdBlitImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageBlit * pRegions,VkFilter filter)457 void anv_CmdBlitImage(
458 VkCommandBuffer commandBuffer,
459 VkImage srcImage,
460 VkImageLayout srcImageLayout,
461 VkImage dstImage,
462 VkImageLayout dstImageLayout,
463 uint32_t regionCount,
464 const VkImageBlit* pRegions,
465 VkFilter filter)
466
467 {
468 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
469 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
470 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
471
472 struct blorp_surf src, dst;
473
474 uint32_t gl_filter;
475 switch (filter) {
476 case VK_FILTER_NEAREST:
477 gl_filter = 0x2600; /* GL_NEAREST */
478 break;
479 case VK_FILTER_LINEAR:
480 gl_filter = 0x2601; /* GL_LINEAR */
481 break;
482 default:
483 unreachable("Invalid filter");
484 }
485
486 struct blorp_batch batch;
487 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
488
489 for (unsigned r = 0; r < regionCount; r++) {
490 const VkImageSubresourceLayers *src_res = &pRegions[r].srcSubresource;
491 const VkImageSubresourceLayers *dst_res = &pRegions[r].dstSubresource;
492
493 get_blorp_surf_for_anv_image(cmd_buffer->device,
494 src_image, src_res->aspectMask,
495 ANV_AUX_USAGE_DEFAULT, &src);
496 get_blorp_surf_for_anv_image(cmd_buffer->device,
497 dst_image, dst_res->aspectMask,
498 ANV_AUX_USAGE_DEFAULT, &dst);
499
500 struct anv_format_plane src_format =
501 anv_get_format_plane(&cmd_buffer->device->info, src_image->vk_format,
502 src_res->aspectMask, src_image->tiling);
503 struct anv_format_plane dst_format =
504 anv_get_format_plane(&cmd_buffer->device->info, dst_image->vk_format,
505 dst_res->aspectMask, dst_image->tiling);
506
507 unsigned dst_start, dst_end;
508 if (dst_image->type == VK_IMAGE_TYPE_3D) {
509 assert(dst_res->baseArrayLayer == 0);
510 dst_start = pRegions[r].dstOffsets[0].z;
511 dst_end = pRegions[r].dstOffsets[1].z;
512 } else {
513 dst_start = dst_res->baseArrayLayer;
514 dst_end = dst_start + anv_get_layerCount(dst_image, dst_res);
515 }
516
517 unsigned src_start, src_end;
518 if (src_image->type == VK_IMAGE_TYPE_3D) {
519 assert(src_res->baseArrayLayer == 0);
520 src_start = pRegions[r].srcOffsets[0].z;
521 src_end = pRegions[r].srcOffsets[1].z;
522 } else {
523 src_start = src_res->baseArrayLayer;
524 src_end = src_start + anv_get_layerCount(src_image, src_res);
525 }
526
527 bool flip_z = flip_coords(&src_start, &src_end, &dst_start, &dst_end);
528 float src_z_step = (float)(src_end + 1 - src_start) /
529 (float)(dst_end + 1 - dst_start);
530
531 if (flip_z) {
532 src_start = src_end;
533 src_z_step *= -1;
534 }
535
536 unsigned src_x0 = pRegions[r].srcOffsets[0].x;
537 unsigned src_x1 = pRegions[r].srcOffsets[1].x;
538 unsigned dst_x0 = pRegions[r].dstOffsets[0].x;
539 unsigned dst_x1 = pRegions[r].dstOffsets[1].x;
540 bool flip_x = flip_coords(&src_x0, &src_x1, &dst_x0, &dst_x1);
541
542 unsigned src_y0 = pRegions[r].srcOffsets[0].y;
543 unsigned src_y1 = pRegions[r].srcOffsets[1].y;
544 unsigned dst_y0 = pRegions[r].dstOffsets[0].y;
545 unsigned dst_y1 = pRegions[r].dstOffsets[1].y;
546 bool flip_y = flip_coords(&src_y0, &src_y1, &dst_y0, &dst_y1);
547
548 const unsigned num_layers = dst_end - dst_start;
549 for (unsigned i = 0; i < num_layers; i++) {
550 unsigned dst_z = dst_start + i;
551 unsigned src_z = src_start + i * src_z_step;
552
553 blorp_blit(&batch, &src, src_res->mipLevel, src_z,
554 src_format.isl_format, src_format.swizzle,
555 &dst, dst_res->mipLevel, dst_z,
556 dst_format.isl_format,
557 anv_swizzle_for_render(dst_format.swizzle),
558 src_x0, src_y0, src_x1, src_y1,
559 dst_x0, dst_y0, dst_x1, dst_y1,
560 gl_filter, flip_x, flip_y);
561 }
562
563 }
564
565 blorp_batch_finish(&batch);
566 }
567
568 static enum isl_format
isl_format_for_size(unsigned size_B)569 isl_format_for_size(unsigned size_B)
570 {
571 switch (size_B) {
572 case 4: return ISL_FORMAT_R32_UINT;
573 case 8: return ISL_FORMAT_R32G32_UINT;
574 case 16: return ISL_FORMAT_R32G32B32A32_UINT;
575 default:
576 unreachable("Not a power-of-two format size");
577 }
578 }
579
580 /**
581 * Returns the greatest common divisor of a and b that is a power of two.
582 */
583 static uint64_t
gcd_pow2_u64(uint64_t a,uint64_t b)584 gcd_pow2_u64(uint64_t a, uint64_t b)
585 {
586 assert(a > 0 || b > 0);
587
588 unsigned a_log2 = ffsll(a) - 1;
589 unsigned b_log2 = ffsll(b) - 1;
590
591 /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which
592 * case, the MIN2() will take the other one. If both are 0 then we will
593 * hit the assert above.
594 */
595 return 1 << MIN2(a_log2, b_log2);
596 }
597
598 /* This is maximum possible width/height our HW can handle */
599 #define MAX_SURFACE_DIM (1ull << 14)
600
anv_CmdCopyBuffer(VkCommandBuffer commandBuffer,VkBuffer srcBuffer,VkBuffer dstBuffer,uint32_t regionCount,const VkBufferCopy * pRegions)601 void anv_CmdCopyBuffer(
602 VkCommandBuffer commandBuffer,
603 VkBuffer srcBuffer,
604 VkBuffer dstBuffer,
605 uint32_t regionCount,
606 const VkBufferCopy* pRegions)
607 {
608 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
609 ANV_FROM_HANDLE(anv_buffer, src_buffer, srcBuffer);
610 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
611
612 struct blorp_batch batch;
613 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
614
615 for (unsigned r = 0; r < regionCount; r++) {
616 struct blorp_address src = {
617 .buffer = src_buffer->bo,
618 .offset = src_buffer->offset + pRegions[r].srcOffset,
619 .mocs = cmd_buffer->device->default_mocs,
620 };
621 struct blorp_address dst = {
622 .buffer = dst_buffer->bo,
623 .offset = dst_buffer->offset + pRegions[r].dstOffset,
624 .mocs = cmd_buffer->device->default_mocs,
625 };
626
627 blorp_buffer_copy(&batch, src, dst, pRegions[r].size);
628 }
629
630 blorp_batch_finish(&batch);
631 }
632
anv_CmdUpdateBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize dataSize,const void * pData)633 void anv_CmdUpdateBuffer(
634 VkCommandBuffer commandBuffer,
635 VkBuffer dstBuffer,
636 VkDeviceSize dstOffset,
637 VkDeviceSize dataSize,
638 const void* pData)
639 {
640 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
641 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
642
643 struct blorp_batch batch;
644 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
645
646 /* We can't quite grab a full block because the state stream needs a
647 * little data at the top to build its linked list.
648 */
649 const uint32_t max_update_size =
650 cmd_buffer->device->dynamic_state_pool.block_size - 64;
651
652 assert(max_update_size < MAX_SURFACE_DIM * 4);
653
654 /* We're about to read data that was written from the CPU. Flush the
655 * texture cache so we don't get anything stale.
656 */
657 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
658
659 while (dataSize) {
660 const uint32_t copy_size = MIN2(dataSize, max_update_size);
661
662 struct anv_state tmp_data =
663 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, copy_size, 64);
664
665 memcpy(tmp_data.map, pData, copy_size);
666
667 anv_state_flush(cmd_buffer->device, tmp_data);
668
669 struct blorp_address src = {
670 .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
671 .offset = tmp_data.offset,
672 .mocs = cmd_buffer->device->default_mocs,
673 };
674 struct blorp_address dst = {
675 .buffer = dst_buffer->bo,
676 .offset = dst_buffer->offset + dstOffset,
677 .mocs = cmd_buffer->device->default_mocs,
678 };
679
680 blorp_buffer_copy(&batch, src, dst, copy_size);
681
682 dataSize -= copy_size;
683 dstOffset += copy_size;
684 pData = (void *)pData + copy_size;
685 }
686
687 blorp_batch_finish(&batch);
688 }
689
anv_CmdFillBuffer(VkCommandBuffer commandBuffer,VkBuffer dstBuffer,VkDeviceSize dstOffset,VkDeviceSize fillSize,uint32_t data)690 void anv_CmdFillBuffer(
691 VkCommandBuffer commandBuffer,
692 VkBuffer dstBuffer,
693 VkDeviceSize dstOffset,
694 VkDeviceSize fillSize,
695 uint32_t data)
696 {
697 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
698 ANV_FROM_HANDLE(anv_buffer, dst_buffer, dstBuffer);
699 struct blorp_surf surf;
700 struct isl_surf isl_surf;
701
702 struct blorp_batch batch;
703 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
704
705 fillSize = anv_buffer_get_range(dst_buffer, dstOffset, fillSize);
706
707 /* From the Vulkan spec:
708 *
709 * "size is the number of bytes to fill, and must be either a multiple
710 * of 4, or VK_WHOLE_SIZE to fill the range from offset to the end of
711 * the buffer. If VK_WHOLE_SIZE is used and the remaining size of the
712 * buffer is not a multiple of 4, then the nearest smaller multiple is
713 * used."
714 */
715 fillSize &= ~3ull;
716
717 /* First, we compute the biggest format that can be used with the
718 * given offsets and size.
719 */
720 int bs = 16;
721 bs = gcd_pow2_u64(bs, dstOffset);
722 bs = gcd_pow2_u64(bs, fillSize);
723 enum isl_format isl_format = isl_format_for_size(bs);
724
725 union isl_color_value color = {
726 .u32 = { data, data, data, data },
727 };
728
729 const uint64_t max_fill_size = MAX_SURFACE_DIM * MAX_SURFACE_DIM * bs;
730 while (fillSize >= max_fill_size) {
731 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
732 dst_buffer, dstOffset,
733 MAX_SURFACE_DIM, MAX_SURFACE_DIM,
734 MAX_SURFACE_DIM * bs, isl_format,
735 &surf, &isl_surf);
736
737 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
738 0, 0, 1, 0, 0, MAX_SURFACE_DIM, MAX_SURFACE_DIM,
739 color, NULL);
740 fillSize -= max_fill_size;
741 dstOffset += max_fill_size;
742 }
743
744 uint64_t height = fillSize / (MAX_SURFACE_DIM * bs);
745 assert(height < MAX_SURFACE_DIM);
746 if (height != 0) {
747 const uint64_t rect_fill_size = height * MAX_SURFACE_DIM * bs;
748 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
749 dst_buffer, dstOffset,
750 MAX_SURFACE_DIM, height,
751 MAX_SURFACE_DIM * bs, isl_format,
752 &surf, &isl_surf);
753
754 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
755 0, 0, 1, 0, 0, MAX_SURFACE_DIM, height,
756 color, NULL);
757 fillSize -= rect_fill_size;
758 dstOffset += rect_fill_size;
759 }
760
761 if (fillSize != 0) {
762 const uint32_t width = fillSize / bs;
763 get_blorp_surf_for_anv_buffer(cmd_buffer->device,
764 dst_buffer, dstOffset,
765 width, 1,
766 width * bs, isl_format,
767 &surf, &isl_surf);
768
769 blorp_clear(&batch, &surf, isl_format, ISL_SWIZZLE_IDENTITY,
770 0, 0, 1, 0, 0, width, 1,
771 color, NULL);
772 }
773
774 blorp_batch_finish(&batch);
775 }
776
anv_CmdClearColorImage(VkCommandBuffer commandBuffer,VkImage _image,VkImageLayout imageLayout,const VkClearColorValue * pColor,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)777 void anv_CmdClearColorImage(
778 VkCommandBuffer commandBuffer,
779 VkImage _image,
780 VkImageLayout imageLayout,
781 const VkClearColorValue* pColor,
782 uint32_t rangeCount,
783 const VkImageSubresourceRange* pRanges)
784 {
785 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
786 ANV_FROM_HANDLE(anv_image, image, _image);
787
788 static const bool color_write_disable[4] = { false, false, false, false };
789
790 struct blorp_batch batch;
791 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
792
793
794 for (unsigned r = 0; r < rangeCount; r++) {
795 if (pRanges[r].aspectMask == 0)
796 continue;
797
798 assert(pRanges[r].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
799
800 struct blorp_surf surf;
801 get_blorp_surf_for_anv_image(cmd_buffer->device,
802 image, pRanges[r].aspectMask,
803 ANV_AUX_USAGE_DEFAULT, &surf);
804
805 struct anv_format_plane src_format =
806 anv_get_format_plane(&cmd_buffer->device->info, image->vk_format,
807 VK_IMAGE_ASPECT_COLOR_BIT, image->tiling);
808
809 unsigned base_layer = pRanges[r].baseArrayLayer;
810 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
811
812 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
813 const unsigned level = pRanges[r].baseMipLevel + i;
814 const unsigned level_width = anv_minify(image->extent.width, level);
815 const unsigned level_height = anv_minify(image->extent.height, level);
816
817 if (image->type == VK_IMAGE_TYPE_3D) {
818 base_layer = 0;
819 layer_count = anv_minify(image->extent.depth, level);
820 }
821
822 blorp_clear(&batch, &surf,
823 src_format.isl_format, src_format.swizzle,
824 level, base_layer, layer_count,
825 0, 0, level_width, level_height,
826 vk_to_isl_color(*pColor), color_write_disable);
827 }
828 }
829
830 blorp_batch_finish(&batch);
831 }
832
anv_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,VkImage image_h,VkImageLayout imageLayout,const VkClearDepthStencilValue * pDepthStencil,uint32_t rangeCount,const VkImageSubresourceRange * pRanges)833 void anv_CmdClearDepthStencilImage(
834 VkCommandBuffer commandBuffer,
835 VkImage image_h,
836 VkImageLayout imageLayout,
837 const VkClearDepthStencilValue* pDepthStencil,
838 uint32_t rangeCount,
839 const VkImageSubresourceRange* pRanges)
840 {
841 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
842 ANV_FROM_HANDLE(anv_image, image, image_h);
843
844 struct blorp_batch batch;
845 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
846
847 struct blorp_surf depth, stencil;
848 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
849 get_blorp_surf_for_anv_image(cmd_buffer->device,
850 image, VK_IMAGE_ASPECT_DEPTH_BIT,
851 ISL_AUX_USAGE_NONE, &depth);
852 } else {
853 memset(&depth, 0, sizeof(depth));
854 }
855
856 if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
857 get_blorp_surf_for_anv_image(cmd_buffer->device,
858 image, VK_IMAGE_ASPECT_STENCIL_BIT,
859 ISL_AUX_USAGE_NONE, &stencil);
860 } else {
861 memset(&stencil, 0, sizeof(stencil));
862 }
863
864 for (unsigned r = 0; r < rangeCount; r++) {
865 if (pRanges[r].aspectMask == 0)
866 continue;
867
868 bool clear_depth = pRanges[r].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
869 bool clear_stencil = pRanges[r].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
870
871 unsigned base_layer = pRanges[r].baseArrayLayer;
872 unsigned layer_count = anv_get_layerCount(image, &pRanges[r]);
873
874 for (unsigned i = 0; i < anv_get_levelCount(image, &pRanges[r]); i++) {
875 const unsigned level = pRanges[r].baseMipLevel + i;
876 const unsigned level_width = anv_minify(image->extent.width, level);
877 const unsigned level_height = anv_minify(image->extent.height, level);
878
879 if (image->type == VK_IMAGE_TYPE_3D)
880 layer_count = anv_minify(image->extent.depth, level);
881
882 blorp_clear_depth_stencil(&batch, &depth, &stencil,
883 level, base_layer, layer_count,
884 0, 0, level_width, level_height,
885 clear_depth, pDepthStencil->depth,
886 clear_stencil ? 0xff : 0,
887 pDepthStencil->stencil);
888 }
889 }
890
891 blorp_batch_finish(&batch);
892 }
893
894 VkResult
anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer * cmd_buffer,uint32_t num_entries,uint32_t * state_offset,struct anv_state * bt_state)895 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
896 uint32_t num_entries,
897 uint32_t *state_offset,
898 struct anv_state *bt_state)
899 {
900 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
901 state_offset);
902 if (bt_state->map == NULL) {
903 /* We ran out of space. Grab a new binding table block. */
904 VkResult result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
905 if (result != VK_SUCCESS)
906 return result;
907
908 /* Re-emit state base addresses so we get the new surface state base
909 * address before we start emitting binding tables etc.
910 */
911 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
912
913 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, num_entries,
914 state_offset);
915 assert(bt_state->map != NULL);
916 }
917
918 return VK_SUCCESS;
919 }
920
921 static VkResult
binding_table_for_surface_state(struct anv_cmd_buffer * cmd_buffer,struct anv_state surface_state,uint32_t * bt_offset)922 binding_table_for_surface_state(struct anv_cmd_buffer *cmd_buffer,
923 struct anv_state surface_state,
924 uint32_t *bt_offset)
925 {
926 uint32_t state_offset;
927 struct anv_state bt_state;
928
929 VkResult result =
930 anv_cmd_buffer_alloc_blorp_binding_table(cmd_buffer, 1, &state_offset,
931 &bt_state);
932 if (result != VK_SUCCESS)
933 return result;
934
935 uint32_t *bt_map = bt_state.map;
936 bt_map[0] = surface_state.offset + state_offset;
937
938 *bt_offset = bt_state.offset;
939 return VK_SUCCESS;
940 }
941
942 static void
clear_color_attachment(struct anv_cmd_buffer * cmd_buffer,struct blorp_batch * batch,const VkClearAttachment * attachment,uint32_t rectCount,const VkClearRect * pRects)943 clear_color_attachment(struct anv_cmd_buffer *cmd_buffer,
944 struct blorp_batch *batch,
945 const VkClearAttachment *attachment,
946 uint32_t rectCount, const VkClearRect *pRects)
947 {
948 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
949 const uint32_t color_att = attachment->colorAttachment;
950 const uint32_t att_idx = subpass->color_attachments[color_att].attachment;
951
952 if (att_idx == VK_ATTACHMENT_UNUSED)
953 return;
954
955 struct anv_render_pass_attachment *pass_att =
956 &cmd_buffer->state.pass->attachments[att_idx];
957 struct anv_attachment_state *att_state =
958 &cmd_buffer->state.attachments[att_idx];
959
960 uint32_t binding_table;
961 VkResult result =
962 binding_table_for_surface_state(cmd_buffer, att_state->color.state,
963 &binding_table);
964 if (result != VK_SUCCESS)
965 return;
966
967 union isl_color_value clear_color =
968 vk_to_isl_color(attachment->clearValue.color);
969
970 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
971 if (subpass->view_mask) {
972 uint32_t view_idx;
973 for_each_bit(view_idx, subpass->view_mask) {
974 for (uint32_t r = 0; r < rectCount; ++r) {
975 const VkOffset2D offset = pRects[r].rect.offset;
976 const VkExtent2D extent = pRects[r].rect.extent;
977 blorp_clear_attachments(batch, binding_table,
978 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
979 view_idx, 1,
980 offset.x, offset.y,
981 offset.x + extent.width,
982 offset.y + extent.height,
983 true, clear_color, false, 0.0f, 0, 0);
984 }
985 }
986 return;
987 }
988
989 for (uint32_t r = 0; r < rectCount; ++r) {
990 const VkOffset2D offset = pRects[r].rect.offset;
991 const VkExtent2D extent = pRects[r].rect.extent;
992 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
993 blorp_clear_attachments(batch, binding_table,
994 ISL_FORMAT_UNSUPPORTED, pass_att->samples,
995 pRects[r].baseArrayLayer,
996 pRects[r].layerCount,
997 offset.x, offset.y,
998 offset.x + extent.width, offset.y + extent.height,
999 true, clear_color, false, 0.0f, 0, 0);
1000 }
1001 }
1002
1003 static void
clear_depth_stencil_attachment(struct anv_cmd_buffer * cmd_buffer,struct blorp_batch * batch,const VkClearAttachment * attachment,uint32_t rectCount,const VkClearRect * pRects)1004 clear_depth_stencil_attachment(struct anv_cmd_buffer *cmd_buffer,
1005 struct blorp_batch *batch,
1006 const VkClearAttachment *attachment,
1007 uint32_t rectCount, const VkClearRect *pRects)
1008 {
1009 static const union isl_color_value color_value = { .u32 = { 0, } };
1010 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1011 const uint32_t att_idx = subpass->depth_stencil_attachment.attachment;
1012
1013 if (att_idx == VK_ATTACHMENT_UNUSED)
1014 return;
1015
1016 struct anv_render_pass_attachment *pass_att =
1017 &cmd_buffer->state.pass->attachments[att_idx];
1018
1019 bool clear_depth = attachment->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT;
1020 bool clear_stencil = attachment->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT;
1021
1022 enum isl_format depth_format = ISL_FORMAT_UNSUPPORTED;
1023 if (clear_depth) {
1024 depth_format = anv_get_isl_format(&cmd_buffer->device->info,
1025 pass_att->format,
1026 VK_IMAGE_ASPECT_DEPTH_BIT,
1027 VK_IMAGE_TILING_OPTIMAL);
1028 }
1029
1030 uint32_t binding_table;
1031 VkResult result =
1032 binding_table_for_surface_state(cmd_buffer,
1033 cmd_buffer->state.null_surface_state,
1034 &binding_table);
1035 if (result != VK_SUCCESS)
1036 return;
1037
1038 /* If multiview is enabled we ignore baseArrayLayer and layerCount */
1039 if (subpass->view_mask) {
1040 uint32_t view_idx;
1041 for_each_bit(view_idx, subpass->view_mask) {
1042 for (uint32_t r = 0; r < rectCount; ++r) {
1043 const VkOffset2D offset = pRects[r].rect.offset;
1044 const VkExtent2D extent = pRects[r].rect.extent;
1045 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1046 blorp_clear_attachments(batch, binding_table,
1047 depth_format, pass_att->samples,
1048 view_idx, 1,
1049 offset.x, offset.y,
1050 offset.x + extent.width,
1051 offset.y + extent.height,
1052 false, color_value,
1053 clear_depth, value.depth,
1054 clear_stencil ? 0xff : 0, value.stencil);
1055 }
1056 }
1057 return;
1058 }
1059
1060 for (uint32_t r = 0; r < rectCount; ++r) {
1061 const VkOffset2D offset = pRects[r].rect.offset;
1062 const VkExtent2D extent = pRects[r].rect.extent;
1063 VkClearDepthStencilValue value = attachment->clearValue.depthStencil;
1064 assert(pRects[r].layerCount != VK_REMAINING_ARRAY_LAYERS);
1065 blorp_clear_attachments(batch, binding_table,
1066 depth_format, pass_att->samples,
1067 pRects[r].baseArrayLayer,
1068 pRects[r].layerCount,
1069 offset.x, offset.y,
1070 offset.x + extent.width, offset.y + extent.height,
1071 false, color_value,
1072 clear_depth, value.depth,
1073 clear_stencil ? 0xff : 0, value.stencil);
1074 }
1075 }
1076
anv_CmdClearAttachments(VkCommandBuffer commandBuffer,uint32_t attachmentCount,const VkClearAttachment * pAttachments,uint32_t rectCount,const VkClearRect * pRects)1077 void anv_CmdClearAttachments(
1078 VkCommandBuffer commandBuffer,
1079 uint32_t attachmentCount,
1080 const VkClearAttachment* pAttachments,
1081 uint32_t rectCount,
1082 const VkClearRect* pRects)
1083 {
1084 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1085
1086 /* Because this gets called within a render pass, we tell blorp not to
1087 * trash our depth and stencil buffers.
1088 */
1089 struct blorp_batch batch;
1090 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1091 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1092
1093 for (uint32_t a = 0; a < attachmentCount; ++a) {
1094 if (pAttachments[a].aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1095 assert(pAttachments[a].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT);
1096 clear_color_attachment(cmd_buffer, &batch,
1097 &pAttachments[a],
1098 rectCount, pRects);
1099 } else {
1100 clear_depth_stencil_attachment(cmd_buffer, &batch,
1101 &pAttachments[a],
1102 rectCount, pRects);
1103 }
1104 }
1105
1106 blorp_batch_finish(&batch);
1107 }
1108
1109 enum subpass_stage {
1110 SUBPASS_STAGE_LOAD,
1111 SUBPASS_STAGE_DRAW,
1112 SUBPASS_STAGE_RESOLVE,
1113 };
1114
1115 static bool
subpass_needs_clear(const struct anv_cmd_buffer * cmd_buffer)1116 subpass_needs_clear(const struct anv_cmd_buffer *cmd_buffer)
1117 {
1118 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1119 uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1120
1121 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1122 uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1123 if (a == VK_ATTACHMENT_UNUSED)
1124 continue;
1125
1126 assert(a < cmd_state->pass->attachment_count);
1127 if (cmd_state->attachments[a].pending_clear_aspects) {
1128 return true;
1129 }
1130 }
1131
1132 if (ds != VK_ATTACHMENT_UNUSED) {
1133 assert(ds < cmd_state->pass->attachment_count);
1134 if (cmd_state->attachments[ds].pending_clear_aspects)
1135 return true;
1136 }
1137
1138 return false;
1139 }
1140
1141 void
anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer * cmd_buffer)1142 anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer)
1143 {
1144 const struct anv_cmd_state *cmd_state = &cmd_buffer->state;
1145 const VkRect2D render_area = cmd_buffer->state.render_area;
1146
1147
1148 if (!subpass_needs_clear(cmd_buffer))
1149 return;
1150
1151 /* Because this gets called within a render pass, we tell blorp not to
1152 * trash our depth and stencil buffers.
1153 */
1154 struct blorp_batch batch;
1155 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1156 BLORP_BATCH_NO_EMIT_DEPTH_STENCIL);
1157
1158 VkClearRect clear_rect = {
1159 .rect = cmd_buffer->state.render_area,
1160 .baseArrayLayer = 0,
1161 .layerCount = cmd_buffer->state.framebuffer->layers,
1162 };
1163
1164 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1165 for (uint32_t i = 0; i < cmd_state->subpass->color_count; ++i) {
1166 const uint32_t a = cmd_state->subpass->color_attachments[i].attachment;
1167 if (a == VK_ATTACHMENT_UNUSED)
1168 continue;
1169
1170 assert(a < cmd_state->pass->attachment_count);
1171 struct anv_attachment_state *att_state = &cmd_state->attachments[a];
1172
1173 if (!att_state->pending_clear_aspects)
1174 continue;
1175
1176 assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1177
1178 struct anv_image_view *iview = fb->attachments[a];
1179 const struct anv_image *image = iview->image;
1180 struct blorp_surf surf;
1181 get_blorp_surf_for_anv_image(cmd_buffer->device,
1182 image, VK_IMAGE_ASPECT_COLOR_BIT,
1183 att_state->aux_usage, &surf);
1184
1185 if (att_state->fast_clear) {
1186 surf.clear_color = vk_to_isl_color(att_state->clear_value.color);
1187
1188 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1189 *
1190 * "After Render target fast clear, pipe-control with color cache
1191 * write-flush must be issued before sending any DRAW commands on
1192 * that render target."
1193 *
1194 * This comment is a bit cryptic and doesn't really tell you what's
1195 * going or what's really needed. It appears that fast clear ops are
1196 * not properly synchronized with other drawing. This means that we
1197 * cannot have a fast clear operation in the pipe at the same time as
1198 * other regular drawing operations. We need to use a PIPE_CONTROL
1199 * to ensure that the contents of the previous draw hit the render
1200 * target before we resolve and then use a second PIPE_CONTROL after
1201 * the resolve to ensure that it is completed before any additional
1202 * drawing occurs.
1203 */
1204 cmd_buffer->state.pending_pipe_bits |=
1205 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1206
1207 assert(image->n_planes == 1);
1208 blorp_fast_clear(&batch, &surf, iview->planes[0].isl.format,
1209 iview->planes[0].isl.base_level,
1210 iview->planes[0].isl.base_array_layer, fb->layers,
1211 render_area.offset.x, render_area.offset.y,
1212 render_area.offset.x + render_area.extent.width,
1213 render_area.offset.y + render_area.extent.height);
1214
1215 cmd_buffer->state.pending_pipe_bits |=
1216 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1217 } else {
1218 assert(image->n_planes == 1);
1219 blorp_clear(&batch, &surf, iview->planes[0].isl.format,
1220 anv_swizzle_for_render(iview->planes[0].isl.swizzle),
1221 iview->planes[0].isl.base_level,
1222 iview->planes[0].isl.base_array_layer, fb->layers,
1223 render_area.offset.x, render_area.offset.y,
1224 render_area.offset.x + render_area.extent.width,
1225 render_area.offset.y + render_area.extent.height,
1226 vk_to_isl_color(att_state->clear_value.color), NULL);
1227 }
1228
1229 att_state->pending_clear_aspects = 0;
1230 }
1231
1232 const uint32_t ds = cmd_state->subpass->depth_stencil_attachment.attachment;
1233 assert(ds == VK_ATTACHMENT_UNUSED || ds < cmd_state->pass->attachment_count);
1234
1235 if (ds != VK_ATTACHMENT_UNUSED &&
1236 cmd_state->attachments[ds].pending_clear_aspects) {
1237
1238 VkClearAttachment clear_att = {
1239 .aspectMask = cmd_state->attachments[ds].pending_clear_aspects,
1240 .clearValue = cmd_state->attachments[ds].clear_value,
1241 };
1242
1243
1244 const uint8_t gen = cmd_buffer->device->info.gen;
1245 bool clear_with_hiz = gen >= 8 && cmd_state->attachments[ds].aux_usage ==
1246 ISL_AUX_USAGE_HIZ;
1247 const struct anv_image_view *iview = fb->attachments[ds];
1248
1249 if (clear_with_hiz) {
1250 const bool clear_depth = clear_att.aspectMask &
1251 VK_IMAGE_ASPECT_DEPTH_BIT;
1252 const bool clear_stencil = clear_att.aspectMask &
1253 VK_IMAGE_ASPECT_STENCIL_BIT;
1254
1255 /* Check against restrictions for depth buffer clearing. A great GPU
1256 * performance benefit isn't expected when using the HZ sequence for
1257 * stencil-only clears. Therefore, we don't emit a HZ op sequence for
1258 * a stencil clear in addition to using the BLORP-fallback for depth.
1259 */
1260 if (clear_depth) {
1261 if (!blorp_can_hiz_clear_depth(gen, iview->planes[0].isl.format,
1262 iview->image->samples,
1263 render_area.offset.x,
1264 render_area.offset.y,
1265 render_area.offset.x +
1266 render_area.extent.width,
1267 render_area.offset.y +
1268 render_area.extent.height)) {
1269 clear_with_hiz = false;
1270 } else if (clear_att.clearValue.depthStencil.depth !=
1271 ANV_HZ_FC_VAL) {
1272 /* Don't enable fast depth clears for any color not equal to
1273 * ANV_HZ_FC_VAL.
1274 */
1275 clear_with_hiz = false;
1276 } else if (gen == 8 &&
1277 anv_can_sample_with_hiz(&cmd_buffer->device->info,
1278 iview->image)) {
1279 /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a
1280 * fast-cleared portion of a HiZ buffer. Testing has revealed
1281 * that Gen8 only supports returning 0.0f. Gens prior to gen8 do
1282 * not support this feature at all.
1283 */
1284 clear_with_hiz = false;
1285 }
1286 }
1287
1288 if (clear_with_hiz) {
1289 blorp_gen8_hiz_clear_attachments(&batch, iview->image->samples,
1290 render_area.offset.x,
1291 render_area.offset.y,
1292 render_area.offset.x +
1293 render_area.extent.width,
1294 render_area.offset.y +
1295 render_area.extent.height,
1296 clear_depth, clear_stencil,
1297 clear_att.clearValue.
1298 depthStencil.stencil);
1299
1300 /* From the SKL PRM, Depth Buffer Clear:
1301 *
1302 * Depth Buffer Clear Workaround
1303 * Depth buffer clear pass using any of the methods (WM_STATE,
1304 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
1305 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
1306 * “set” before starting to render. DepthStall and DepthFlush are
1307 * not needed between consecutive depth clear passes nor is it
1308 * required if the depth-clear pass was done with “full_surf_clear”
1309 * bit set in the 3DSTATE_WM_HZ_OP.
1310 */
1311 if (clear_depth) {
1312 cmd_buffer->state.pending_pipe_bits |=
1313 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | ANV_PIPE_DEPTH_STALL_BIT;
1314 }
1315 }
1316 }
1317
1318 if (!clear_with_hiz) {
1319 clear_depth_stencil_attachment(cmd_buffer, &batch,
1320 &clear_att, 1, &clear_rect);
1321 }
1322
1323 cmd_state->attachments[ds].pending_clear_aspects = 0;
1324 }
1325
1326 blorp_batch_finish(&batch);
1327 }
1328
1329 static void
resolve_surface(struct blorp_batch * batch,struct blorp_surf * src_surf,uint32_t src_level,uint32_t src_layer,struct blorp_surf * dst_surf,uint32_t dst_level,uint32_t dst_layer,uint32_t src_x,uint32_t src_y,uint32_t dst_x,uint32_t dst_y,uint32_t width,uint32_t height)1330 resolve_surface(struct blorp_batch *batch,
1331 struct blorp_surf *src_surf,
1332 uint32_t src_level, uint32_t src_layer,
1333 struct blorp_surf *dst_surf,
1334 uint32_t dst_level, uint32_t dst_layer,
1335 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1336 uint32_t width, uint32_t height)
1337 {
1338 blorp_blit(batch,
1339 src_surf, src_level, src_layer,
1340 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1341 dst_surf, dst_level, dst_layer,
1342 ISL_FORMAT_UNSUPPORTED, ISL_SWIZZLE_IDENTITY,
1343 src_x, src_y, src_x + width, src_y + height,
1344 dst_x, dst_y, dst_x + width, dst_y + height,
1345 0x2600 /* GL_NEAREST */, false, false);
1346 }
1347
1348 static void
resolve_image(struct anv_device * device,struct blorp_batch * batch,const struct anv_image * src_image,uint32_t src_level,uint32_t src_layer,const struct anv_image * dst_image,uint32_t dst_level,uint32_t dst_layer,VkImageAspectFlags aspect_mask,uint32_t src_x,uint32_t src_y,uint32_t dst_x,uint32_t dst_y,uint32_t width,uint32_t height)1349 resolve_image(struct anv_device *device,
1350 struct blorp_batch *batch,
1351 const struct anv_image *src_image,
1352 uint32_t src_level, uint32_t src_layer,
1353 const struct anv_image *dst_image,
1354 uint32_t dst_level, uint32_t dst_layer,
1355 VkImageAspectFlags aspect_mask,
1356 uint32_t src_x, uint32_t src_y, uint32_t dst_x, uint32_t dst_y,
1357 uint32_t width, uint32_t height)
1358 {
1359 assert(src_image->type == VK_IMAGE_TYPE_2D);
1360 assert(src_image->samples > 1);
1361 assert(dst_image->type == VK_IMAGE_TYPE_2D);
1362 assert(dst_image->samples == 1);
1363 assert(src_image->n_planes == dst_image->n_planes);
1364
1365 uint32_t aspect_bit;
1366
1367 anv_foreach_image_aspect_bit(aspect_bit, src_image, aspect_mask) {
1368 struct blorp_surf src_surf, dst_surf;
1369 get_blorp_surf_for_anv_image(device, src_image, 1UL << aspect_bit,
1370 ANV_AUX_USAGE_DEFAULT, &src_surf);
1371 get_blorp_surf_for_anv_image(device, dst_image, 1UL << aspect_bit,
1372 ANV_AUX_USAGE_DEFAULT, &dst_surf);
1373
1374 assert(!src_image->format->can_ycbcr);
1375 assert(!dst_image->format->can_ycbcr);
1376
1377 resolve_surface(batch,
1378 &src_surf, src_level, src_layer,
1379 &dst_surf, dst_level, dst_layer,
1380 src_x, src_y, dst_x, dst_y, width, height);
1381 }
1382 }
1383
anv_CmdResolveImage(VkCommandBuffer commandBuffer,VkImage srcImage,VkImageLayout srcImageLayout,VkImage dstImage,VkImageLayout dstImageLayout,uint32_t regionCount,const VkImageResolve * pRegions)1384 void anv_CmdResolveImage(
1385 VkCommandBuffer commandBuffer,
1386 VkImage srcImage,
1387 VkImageLayout srcImageLayout,
1388 VkImage dstImage,
1389 VkImageLayout dstImageLayout,
1390 uint32_t regionCount,
1391 const VkImageResolve* pRegions)
1392 {
1393 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1394 ANV_FROM_HANDLE(anv_image, src_image, srcImage);
1395 ANV_FROM_HANDLE(anv_image, dst_image, dstImage);
1396
1397 struct blorp_batch batch;
1398 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1399
1400 for (uint32_t r = 0; r < regionCount; r++) {
1401 assert(pRegions[r].srcSubresource.aspectMask ==
1402 pRegions[r].dstSubresource.aspectMask);
1403 assert(anv_get_layerCount(src_image, &pRegions[r].srcSubresource) ==
1404 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource));
1405
1406 const uint32_t layer_count =
1407 anv_get_layerCount(dst_image, &pRegions[r].dstSubresource);
1408
1409 VkImageAspectFlags src_mask = pRegions[r].srcSubresource.aspectMask,
1410 dst_mask = pRegions[r].dstSubresource.aspectMask;
1411
1412 assert(anv_image_aspects_compatible(src_mask, dst_mask));
1413
1414 for (uint32_t layer = 0; layer < layer_count; layer++) {
1415 resolve_image(cmd_buffer->device, &batch,
1416 src_image,
1417 pRegions[r].srcSubresource.mipLevel,
1418 pRegions[r].srcSubresource.baseArrayLayer + layer,
1419 dst_image,
1420 pRegions[r].dstSubresource.mipLevel,
1421 pRegions[r].dstSubresource.baseArrayLayer + layer,
1422 pRegions[r].dstSubresource.aspectMask,
1423 pRegions[r].srcOffset.x, pRegions[r].srcOffset.y,
1424 pRegions[r].dstOffset.x, pRegions[r].dstOffset.y,
1425 pRegions[r].extent.width, pRegions[r].extent.height);
1426 }
1427 }
1428
1429 blorp_batch_finish(&batch);
1430 }
1431
1432 static enum isl_aux_usage
fast_clear_aux_usage(const struct anv_image * image,VkImageAspectFlagBits aspect)1433 fast_clear_aux_usage(const struct anv_image *image,
1434 VkImageAspectFlagBits aspect)
1435 {
1436 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1437 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
1438 return ISL_AUX_USAGE_CCS_D;
1439 else
1440 return image->planes[plane].aux_usage;
1441 }
1442
1443 void
anv_image_fast_clear(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,const uint32_t base_level,const uint32_t level_count,const uint32_t base_layer,uint32_t layer_count)1444 anv_image_fast_clear(struct anv_cmd_buffer *cmd_buffer,
1445 const struct anv_image *image,
1446 VkImageAspectFlagBits aspect,
1447 const uint32_t base_level, const uint32_t level_count,
1448 const uint32_t base_layer, uint32_t layer_count)
1449 {
1450 assert(image->type == VK_IMAGE_TYPE_3D || image->extent.depth == 1);
1451
1452 if (image->type == VK_IMAGE_TYPE_3D) {
1453 assert(base_layer == 0);
1454 assert(layer_count == anv_minify(image->extent.depth, base_level));
1455 }
1456
1457 struct blorp_batch batch;
1458 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1459
1460 struct blorp_surf surf;
1461 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1462 fast_clear_aux_usage(image, aspect),
1463 &surf);
1464
1465 /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
1466 *
1467 * "After Render target fast clear, pipe-control with color cache
1468 * write-flush must be issued before sending any DRAW commands on
1469 * that render target."
1470 *
1471 * This comment is a bit cryptic and doesn't really tell you what's going
1472 * or what's really needed. It appears that fast clear ops are not
1473 * properly synchronized with other drawing. This means that we cannot
1474 * have a fast clear operation in the pipe at the same time as other
1475 * regular drawing operations. We need to use a PIPE_CONTROL to ensure
1476 * that the contents of the previous draw hit the render target before we
1477 * resolve and then use a second PIPE_CONTROL after the resolve to ensure
1478 * that it is completed before any additional drawing occurs.
1479 */
1480 cmd_buffer->state.pending_pipe_bits |=
1481 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1482
1483 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1484 uint32_t width_div = image->format->planes[plane].denominator_scales[0];
1485 uint32_t height_div = image->format->planes[plane].denominator_scales[1];
1486
1487 for (uint32_t l = 0; l < level_count; l++) {
1488 const uint32_t level = base_level + l;
1489
1490 const VkExtent3D extent = {
1491 .width = anv_minify(image->extent.width, level),
1492 .height = anv_minify(image->extent.height, level),
1493 .depth = anv_minify(image->extent.depth, level),
1494 };
1495
1496 if (image->type == VK_IMAGE_TYPE_3D)
1497 layer_count = extent.depth;
1498
1499 assert(level < anv_image_aux_levels(image, aspect));
1500 assert(base_layer + layer_count <= anv_image_aux_layers(image, aspect, level));
1501 blorp_fast_clear(&batch, &surf, surf.surf->format,
1502 level, base_layer, layer_count,
1503 0, 0,
1504 extent.width / width_div,
1505 extent.height / height_div);
1506 }
1507
1508 cmd_buffer->state.pending_pipe_bits |=
1509 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT;
1510 }
1511
1512 void
anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer * cmd_buffer)1513 anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer)
1514 {
1515 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1516 struct anv_subpass *subpass = cmd_buffer->state.subpass;
1517
1518 if (subpass->has_resolve) {
1519 struct blorp_batch batch;
1520 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1521
1522 /* We are about to do some MSAA resolves. We need to flush so that the
1523 * result of writes to the MSAA color attachments show up in the sampler
1524 * when we blit to the single-sampled resolve target.
1525 */
1526 cmd_buffer->state.pending_pipe_bits |=
1527 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1528 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1529
1530 for (uint32_t i = 0; i < subpass->color_count; ++i) {
1531 uint32_t src_att = subpass->color_attachments[i].attachment;
1532 uint32_t dst_att = subpass->resolve_attachments[i].attachment;
1533
1534 if (dst_att == VK_ATTACHMENT_UNUSED)
1535 continue;
1536
1537 assert(src_att < cmd_buffer->state.pass->attachment_count);
1538 assert(dst_att < cmd_buffer->state.pass->attachment_count);
1539
1540 if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
1541 /* From the Vulkan 1.0 spec:
1542 *
1543 * If the first use of an attachment in a render pass is as a
1544 * resolve attachment, then the loadOp is effectively ignored
1545 * as the resolve is guaranteed to overwrite all pixels in the
1546 * render area.
1547 */
1548 cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
1549 }
1550
1551 struct anv_image_view *src_iview = fb->attachments[src_att];
1552 struct anv_image_view *dst_iview = fb->attachments[dst_att];
1553
1554 enum isl_aux_usage src_aux_usage =
1555 cmd_buffer->state.attachments[src_att].aux_usage;
1556 enum isl_aux_usage dst_aux_usage =
1557 cmd_buffer->state.attachments[dst_att].aux_usage;
1558
1559 const VkRect2D render_area = cmd_buffer->state.render_area;
1560
1561 assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
1562 dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
1563
1564 struct blorp_surf src_surf, dst_surf;
1565 get_blorp_surf_for_anv_image(cmd_buffer->device, src_iview->image,
1566 VK_IMAGE_ASPECT_COLOR_BIT,
1567 src_aux_usage, &src_surf);
1568 get_blorp_surf_for_anv_image(cmd_buffer->device, dst_iview->image,
1569 VK_IMAGE_ASPECT_COLOR_BIT,
1570 dst_aux_usage, &dst_surf);
1571
1572 assert(!src_iview->image->format->can_ycbcr);
1573 assert(!dst_iview->image->format->can_ycbcr);
1574
1575 resolve_surface(&batch,
1576 &src_surf,
1577 src_iview->planes[0].isl.base_level,
1578 src_iview->planes[0].isl.base_array_layer,
1579 &dst_surf,
1580 dst_iview->planes[0].isl.base_level,
1581 dst_iview->planes[0].isl.base_array_layer,
1582 render_area.offset.x, render_area.offset.y,
1583 render_area.offset.x, render_area.offset.y,
1584 render_area.extent.width, render_area.extent.height);
1585 }
1586
1587 blorp_batch_finish(&batch);
1588 }
1589 }
1590
1591 void
anv_image_copy_to_shadow(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,uint32_t base_level,uint32_t level_count,uint32_t base_layer,uint32_t layer_count)1592 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
1593 const struct anv_image *image,
1594 uint32_t base_level, uint32_t level_count,
1595 uint32_t base_layer, uint32_t layer_count)
1596 {
1597 struct blorp_batch batch;
1598 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1599
1600 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT && image->n_planes == 1);
1601
1602 struct blorp_surf surf;
1603 get_blorp_surf_for_anv_image(cmd_buffer->device,
1604 image, VK_IMAGE_ASPECT_COLOR_BIT,
1605 ISL_AUX_USAGE_NONE, &surf);
1606
1607 struct blorp_surf shadow_surf = {
1608 .surf = &image->planes[0].shadow_surface.isl,
1609 .addr = {
1610 .buffer = image->planes[0].bo,
1611 .offset = image->planes[0].bo_offset +
1612 image->planes[0].shadow_surface.offset,
1613 .mocs = cmd_buffer->device->default_mocs,
1614 },
1615 };
1616
1617 for (uint32_t l = 0; l < level_count; l++) {
1618 const uint32_t level = base_level + l;
1619
1620 const VkExtent3D extent = {
1621 .width = anv_minify(image->extent.width, level),
1622 .height = anv_minify(image->extent.height, level),
1623 .depth = anv_minify(image->extent.depth, level),
1624 };
1625
1626 if (image->type == VK_IMAGE_TYPE_3D)
1627 layer_count = extent.depth;
1628
1629 for (uint32_t a = 0; a < layer_count; a++) {
1630 const uint32_t layer = base_layer + a;
1631
1632 blorp_copy(&batch, &surf, level, layer,
1633 &shadow_surf, level, layer,
1634 0, 0, 0, 0, extent.width, extent.height);
1635 }
1636 }
1637
1638 blorp_batch_finish(&batch);
1639 }
1640
1641 void
anv_gen8_hiz_op_resolve(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,enum blorp_hiz_op op)1642 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1643 const struct anv_image *image,
1644 enum blorp_hiz_op op)
1645 {
1646 assert(image);
1647
1648 assert(anv_image_aspect_to_plane(image->aspects,
1649 VK_IMAGE_ASPECT_DEPTH_BIT) == 0);
1650
1651 /* Don't resolve depth buffers without an auxiliary HiZ buffer and
1652 * don't perform such a resolve on gens that don't support it.
1653 */
1654 if (cmd_buffer->device->info.gen < 8 ||
1655 image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ)
1656 return;
1657
1658 assert(op == BLORP_HIZ_OP_HIZ_RESOLVE ||
1659 op == BLORP_HIZ_OP_DEPTH_RESOLVE);
1660
1661 struct blorp_batch batch;
1662 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer, 0);
1663
1664 struct blorp_surf surf;
1665 get_blorp_surf_for_anv_image(cmd_buffer->device,
1666 image, VK_IMAGE_ASPECT_DEPTH_BIT,
1667 ISL_AUX_USAGE_NONE, &surf);
1668
1669 /* Manually add the aux HiZ surf */
1670 surf.aux_surf = &image->planes[0].aux_surface.isl,
1671 surf.aux_addr = (struct blorp_address) {
1672 .buffer = image->planes[0].bo,
1673 .offset = image->planes[0].bo_offset +
1674 image->planes[0].aux_surface.offset,
1675 .mocs = cmd_buffer->device->default_mocs,
1676 };
1677 surf.aux_usage = ISL_AUX_USAGE_HIZ;
1678
1679 surf.clear_color.f32[0] = ANV_HZ_FC_VAL;
1680
1681 blorp_hiz_op(&batch, &surf, 0, 0, 1, op);
1682 blorp_batch_finish(&batch);
1683 }
1684
1685 void
anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,const struct anv_image * const image,VkImageAspectFlagBits aspect,const uint8_t level,const uint32_t start_layer,const uint32_t layer_count,const enum blorp_fast_clear_op op)1686 anv_ccs_resolve(struct anv_cmd_buffer * const cmd_buffer,
1687 const struct anv_image * const image,
1688 VkImageAspectFlagBits aspect,
1689 const uint8_t level,
1690 const uint32_t start_layer, const uint32_t layer_count,
1691 const enum blorp_fast_clear_op op)
1692 {
1693 assert(cmd_buffer && image);
1694
1695 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1696
1697 /* The resolved subresource range must have a CCS buffer. */
1698 assert(level < anv_image_aux_levels(image, aspect));
1699 assert(start_layer + layer_count <=
1700 anv_image_aux_layers(image, aspect, level));
1701 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV && image->samples == 1);
1702
1703 struct blorp_batch batch;
1704 blorp_batch_init(&cmd_buffer->device->blorp, &batch, cmd_buffer,
1705 BLORP_BATCH_PREDICATE_ENABLE);
1706
1707 struct blorp_surf surf;
1708 get_blorp_surf_for_anv_image(cmd_buffer->device, image, aspect,
1709 fast_clear_aux_usage(image, aspect),
1710 &surf);
1711 surf.clear_color_addr = anv_to_blorp_address(
1712 anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect, level));
1713
1714 blorp_ccs_resolve(&batch, &surf, level, start_layer, layer_count,
1715 image->planes[plane].surface.isl.format, op);
1716
1717 blorp_batch_finish(&batch);
1718 }
1719