1 /*
2 * Copyright © 2022 Collabora Ltd.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nil_image.h"
6
7 #include "util/u_math.h"
8
9 #include "nouveau_device.h"
10
11 #include "cl9097.h"
12 #include "clc597.h"
13
14 static struct nil_extent4d
nil_minify_extent4d(struct nil_extent4d extent,uint32_t level)15 nil_minify_extent4d(struct nil_extent4d extent, uint32_t level)
16 {
17 return (struct nil_extent4d) {
18 .w = u_minify(extent.w, level),
19 .h = u_minify(extent.h, level),
20 .d = u_minify(extent.d, level),
21 .a = extent.a,
22 };
23 }
24
25 static struct nil_extent4d
nil_extent4d_div_round_up(struct nil_extent4d num,struct nil_extent4d denom)26 nil_extent4d_div_round_up(struct nil_extent4d num, struct nil_extent4d denom)
27 {
28 return (struct nil_extent4d) {
29 .w = DIV_ROUND_UP(num.w, denom.w),
30 .h = DIV_ROUND_UP(num.h, denom.h),
31 .d = DIV_ROUND_UP(num.d, denom.d),
32 .a = DIV_ROUND_UP(num.a, denom.a),
33 };
34 }
35
36 static struct nil_extent4d
nil_extent4d_mul(struct nil_extent4d a,struct nil_extent4d b)37 nil_extent4d_mul(struct nil_extent4d a, struct nil_extent4d b)
38 {
39 return (struct nil_extent4d) {
40 .w = a.w * b.w,
41 .h = a.h * b.h,
42 .d = a.d * b.d,
43 .a = a.a * b.a,
44 };
45 }
46
47 static struct nil_offset4d
nil_offset4d_div_round_down(struct nil_offset4d num,struct nil_extent4d denom)48 nil_offset4d_div_round_down(struct nil_offset4d num, struct nil_extent4d denom)
49 {
50 return (struct nil_offset4d) {
51 .x = num.x / denom.w,
52 .y = num.y / denom.h,
53 .z = num.z / denom.d,
54 .a = num.a / denom.a,
55 };
56 }
57
58 static struct nil_offset4d
nil_offset4d_mul(struct nil_offset4d a,struct nil_extent4d b)59 nil_offset4d_mul(struct nil_offset4d a, struct nil_extent4d b)
60 {
61 return (struct nil_offset4d) {
62 .x = a.x * b.w,
63 .y = a.y * b.h,
64 .z = a.z * b.d,
65 .a = a.a * b.a,
66 };
67 }
68
69 static struct nil_extent4d
nil_extent4d_align(struct nil_extent4d ext,struct nil_extent4d alignment)70 nil_extent4d_align(struct nil_extent4d ext, struct nil_extent4d alignment)
71 {
72 return (struct nil_extent4d) {
73 .w = align(ext.w, alignment.w),
74 .h = align(ext.h, alignment.h),
75 .d = align(ext.d, alignment.d),
76 .a = align(ext.a, alignment.a),
77 };
78 }
79
80 static inline struct nil_extent4d
nil_px_extent_sa(enum nil_sample_layout sample_layout)81 nil_px_extent_sa(enum nil_sample_layout sample_layout)
82 {
83 switch (sample_layout) {
84 case NIL_SAMPLE_LAYOUT_1X1: return nil_extent4d(1, 1, 1, 1);
85 case NIL_SAMPLE_LAYOUT_2X1: return nil_extent4d(2, 1, 1, 1);
86 case NIL_SAMPLE_LAYOUT_2X2: return nil_extent4d(2, 2, 1, 1);
87 case NIL_SAMPLE_LAYOUT_4X2: return nil_extent4d(4, 2, 1, 1);
88 case NIL_SAMPLE_LAYOUT_4X4: return nil_extent4d(4, 4, 1, 1);
89 default: unreachable("Invalid sample layout");
90 }
91 }
92
93 static inline struct nil_extent4d
nil_el_extent_sa(enum pipe_format format)94 nil_el_extent_sa(enum pipe_format format)
95 {
96 const struct util_format_description *fmt =
97 util_format_description(format);
98
99 return (struct nil_extent4d) {
100 .w = fmt->block.width,
101 .h = fmt->block.height,
102 .d = fmt->block.depth,
103 .a = 1,
104 };
105 }
106
107 static struct nil_extent4d
nil_extent4d_px_to_sa(struct nil_extent4d extent_px,enum nil_sample_layout sample_layout)108 nil_extent4d_px_to_sa(struct nil_extent4d extent_px,
109 enum nil_sample_layout sample_layout)
110 {
111 return nil_extent4d_mul(extent_px, nil_px_extent_sa(sample_layout));
112 }
113
114 struct nil_extent4d
nil_extent4d_px_to_el(struct nil_extent4d extent_px,enum pipe_format format,enum nil_sample_layout sample_layout)115 nil_extent4d_px_to_el(struct nil_extent4d extent_px,
116 enum pipe_format format,
117 enum nil_sample_layout sample_layout)
118 {
119 const struct nil_extent4d extent_sa =
120 nil_extent4d_px_to_sa(extent_px, sample_layout);
121
122 return nil_extent4d_div_round_up(extent_sa, nil_el_extent_sa(format));
123 }
124
125 struct nil_offset4d
nil_offset4d_px_to_el(struct nil_offset4d offset_px,enum pipe_format format,enum nil_sample_layout sample_layout)126 nil_offset4d_px_to_el(struct nil_offset4d offset_px,
127 enum pipe_format format,
128 enum nil_sample_layout sample_layout)
129 {
130 const struct nil_offset4d offset_sa =
131 nil_offset4d_mul(offset_px, nil_px_extent_sa(sample_layout));
132
133 return nil_offset4d_div_round_down(offset_sa, nil_el_extent_sa(format));
134 }
135
136 static struct nil_extent4d
nil_extent4d_el_to_B(struct nil_extent4d extent_el,uint32_t B_per_el)137 nil_extent4d_el_to_B(struct nil_extent4d extent_el,
138 uint32_t B_per_el)
139 {
140 struct nil_extent4d extent_B = extent_el;
141 extent_B.w *= B_per_el;
142 return extent_B;
143 }
144
145 static struct nil_extent4d
nil_extent4d_B_to_GOB(struct nil_extent4d extent_B,bool gob_height_8)146 nil_extent4d_B_to_GOB(struct nil_extent4d extent_B,
147 bool gob_height_8)
148 {
149 const struct nil_extent4d gob_extent_B = {
150 .w = NIL_GOB_WIDTH_B,
151 .h = NIL_GOB_HEIGHT(gob_height_8),
152 .d = NIL_GOB_DEPTH,
153 .a = 1,
154 };
155
156 return nil_extent4d_div_round_up(extent_B, gob_extent_B);
157 }
158
159 static struct nil_extent4d
nil_tiling_extent_B(struct nil_tiling tiling)160 nil_tiling_extent_B(struct nil_tiling tiling)
161 {
162 if (tiling.is_tiled) {
163 return (struct nil_extent4d) {
164 .w = NIL_GOB_WIDTH_B, /* Tiles are always 1 GOB wide */
165 .h = NIL_GOB_HEIGHT(tiling.gob_height_8) << tiling.y_log2,
166 .d = NIL_GOB_DEPTH << tiling.z_log2,
167 .a = 1,
168 };
169 } else {
170 /* We handle linear images in nil_image_create */
171 return nil_extent4d(1, 1, 1, 1);
172 }
173 }
174
175 enum nil_sample_layout
nil_choose_sample_layout(uint32_t samples)176 nil_choose_sample_layout(uint32_t samples)
177 {
178 switch (samples) {
179 case 1: return NIL_SAMPLE_LAYOUT_1X1;
180 case 2: return NIL_SAMPLE_LAYOUT_2X1;
181 case 4: return NIL_SAMPLE_LAYOUT_2X2;
182 case 8: return NIL_SAMPLE_LAYOUT_4X2;
183 case 16: return NIL_SAMPLE_LAYOUT_4X4;
184 default:
185 unreachable("Unsupported sample count");
186 }
187 }
188
189 static struct nil_tiling
choose_tiling(struct nil_extent4d extent_B,enum nil_image_usage_flags usage)190 choose_tiling(struct nil_extent4d extent_B,
191 enum nil_image_usage_flags usage)
192 {
193 if (usage & NIL_IMAGE_USAGE_LINEAR_BIT)
194 return (struct nil_tiling) { .is_tiled = false };
195
196 struct nil_tiling tiling = {
197 .is_tiled = true,
198 .gob_height_8 = true,
199 };
200
201 const struct nil_extent4d extent_GOB =
202 nil_extent4d_B_to_GOB(extent_B, tiling.gob_height_8);
203
204 const uint32_t height_log2 = util_logbase2_ceil(extent_GOB.height);
205 const uint32_t depth_log2 = util_logbase2_ceil(extent_GOB.depth);
206
207 tiling.y_log2 = MIN2(height_log2, 5);
208 tiling.z_log2 = MIN2(depth_log2, 5);
209
210 if (usage & NIL_IMAGE_USAGE_2D_VIEW_BIT)
211 tiling.z_log2 = 0;
212
213 return tiling;
214 }
215
216 static uint32_t
nil_tiling_size_B(struct nil_tiling tiling)217 nil_tiling_size_B(struct nil_tiling tiling)
218 {
219 const struct nil_extent4d extent_B = nil_tiling_extent_B(tiling);
220 return extent_B.w * extent_B.h * extent_B.d * extent_B.a;
221 }
222
223 static struct nil_extent4d
nil_extent4d_B_to_tl(struct nil_extent4d extent_B,struct nil_tiling tiling)224 nil_extent4d_B_to_tl(struct nil_extent4d extent_B,
225 struct nil_tiling tiling)
226 {
227 return nil_extent4d_div_round_up(extent_B, nil_tiling_extent_B(tiling));
228 }
229
230 struct nil_extent4d
nil_image_level_extent_px(const struct nil_image * image,uint32_t level)231 nil_image_level_extent_px(const struct nil_image *image, uint32_t level)
232 {
233 assert(level == 0 || image->sample_layout == NIL_SAMPLE_LAYOUT_1X1);
234
235 return nil_minify_extent4d(image->extent_px, level);
236 }
237
238 struct nil_extent4d
nil_image_level_extent_sa(const struct nil_image * image,uint32_t level)239 nil_image_level_extent_sa(const struct nil_image *image, uint32_t level)
240 {
241 const struct nil_extent4d level_extent_px =
242 nil_image_level_extent_px(image, level);
243
244 return nil_extent4d_px_to_sa(level_extent_px, image->sample_layout);
245 }
246
247 static struct nil_extent4d
image_level_extent_B(const struct nil_image * image,uint32_t level)248 image_level_extent_B(const struct nil_image *image, uint32_t level)
249 {
250 const struct nil_extent4d level_extent_px =
251 nil_image_level_extent_px(image, level);
252 const struct nil_extent4d level_extent_el =
253 nil_extent4d_px_to_el(level_extent_px, image->format,
254 image->sample_layout);
255 const uint32_t B_per_el = util_format_get_blocksize(image->format);
256 return nil_extent4d_el_to_B(level_extent_el, B_per_el);
257 }
258
259 static uint8_t
tu102_choose_pte_kind(enum pipe_format format,bool compressed)260 tu102_choose_pte_kind(enum pipe_format format, bool compressed)
261 {
262 switch (format) {
263 case PIPE_FORMAT_Z16_UNORM:
264 if (compressed)
265 return 0x0b; // NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC
266 else
267 return 0x01; // NV_MMU_PTE_KIND_Z16
268 case PIPE_FORMAT_X8Z24_UNORM:
269 case PIPE_FORMAT_S8X24_UINT:
270 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
271 if (compressed)
272 return 0x0e; // NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC
273 else
274 return 0x05; // NV_MMU_PTE_KIND_Z24S8
275 case PIPE_FORMAT_X24S8_UINT:
276 case PIPE_FORMAT_Z24X8_UNORM:
277 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
278 if (compressed)
279 return 0x0c; // NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC
280 else
281 return 0x03; // NV_MMU_PTE_KIND_S8Z24
282 case PIPE_FORMAT_X32_S8X24_UINT:
283 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
284 if (compressed)
285 return 0x0d; // NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC
286 else
287 return 0x04; // NV_MMU_PTE_KIND_ZF32_X24S8
288 case PIPE_FORMAT_Z32_FLOAT:
289 return 0x06;
290 default:
291 return 0;
292 }
293 }
294
295 static uint8_t
nvc0_choose_pte_kind(enum pipe_format format,uint32_t samples,bool compressed)296 nvc0_choose_pte_kind(enum pipe_format format,
297 uint32_t samples, bool compressed)
298 {
299 const unsigned ms = util_logbase2(samples);
300
301 switch (format) {
302 case PIPE_FORMAT_Z16_UNORM:
303 if (compressed)
304 return 0x02 + ms;
305 else
306 return 0x01;
307 case PIPE_FORMAT_X8Z24_UNORM:
308 case PIPE_FORMAT_S8X24_UINT:
309 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
310 if (compressed)
311 return 0x51 + ms;
312 else
313 return 0x46;
314 case PIPE_FORMAT_X24S8_UINT:
315 case PIPE_FORMAT_Z24X8_UNORM:
316 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
317 if (compressed)
318 return 0x17 + ms;
319 else
320 return 0x11;
321 break;
322 case PIPE_FORMAT_Z32_FLOAT:
323 if (compressed)
324 return 0x86 + ms;
325 else
326 return 0x7b;
327 break;
328 case PIPE_FORMAT_X32_S8X24_UINT:
329 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
330 if (compressed)
331 return 0xce + ms;
332 else
333 return 0xc3;
334 default:
335 switch (util_format_get_blocksizebits(format)) {
336 case 128:
337 if (compressed)
338 return 0xf4 + ms * 2;
339 else
340 return 0xfe;
341 break;
342 case 64:
343 if (compressed) {
344 switch (samples) {
345 case 1: return 0xe6;
346 case 2: return 0xeb;
347 case 4: return 0xed;
348 case 8: return 0xf2;
349 default: return 0;
350 }
351 } else {
352 return 0xfe;
353 }
354 break;
355 case 32:
356 if (compressed && ms) {
357 switch (samples) {
358 /* This one makes things blurry:
359 case 1: return 0xdb;
360 */
361 case 2: return 0xdd;
362 case 4: return 0xdf;
363 case 8: return 0xe4;
364 default: return 0;
365 }
366 } else {
367 return 0xfe;
368 }
369 break;
370 case 16:
371 case 8:
372 return 0xfe;
373 default:
374 return 0;
375 }
376 }
377 }
378
379 static uint8_t
nil_choose_pte_kind(struct nv_device_info * dev,enum pipe_format format,uint32_t samples,bool compressed)380 nil_choose_pte_kind(struct nv_device_info *dev,
381 enum pipe_format format,
382 uint32_t samples, bool compressed)
383 {
384 if (dev->cls_eng3d >= TURING_A)
385 return tu102_choose_pte_kind(format, compressed);
386 else if (dev->cls_eng3d >= FERMI_A)
387 return nvc0_choose_pte_kind(format, samples, compressed);
388 else
389 unreachable("Unsupported 3D engine class");
390 }
391
392 bool
nil_image_init(struct nv_device_info * dev,struct nil_image * image,const struct nil_image_init_info * restrict info)393 nil_image_init(struct nv_device_info *dev,
394 struct nil_image *image,
395 const struct nil_image_init_info *restrict info)
396 {
397 switch (info->dim) {
398 case NIL_IMAGE_DIM_1D:
399 assert(info->extent_px.h == 1);
400 assert(info->extent_px.d == 1);
401 assert(info->samples == 1);
402 break;
403 case NIL_IMAGE_DIM_2D:
404 assert(info->extent_px.d == 1);
405 break;
406 case NIL_IMAGE_DIM_3D:
407 assert(info->extent_px.a == 1);
408 assert(info->samples == 1);
409 break;
410 }
411
412 *image = (struct nil_image) {
413 .dim = info->dim,
414 .format = info->format,
415 .extent_px = info->extent_px,
416 .sample_layout = nil_choose_sample_layout(info->samples),
417 .num_levels = info->levels,
418 };
419
420 uint64_t layer_size_B = 0;
421 for (uint32_t l = 0; l < info->levels; l++) {
422 struct nil_extent4d lvl_ext_B = image_level_extent_B(image, l);
423
424 /* Tiling is chosen per-level with LOD0 acting as a maximum */
425 struct nil_tiling lvl_tiling = choose_tiling(lvl_ext_B, info->usage);
426
427 if (lvl_tiling.is_tiled) {
428 /* Align the size to tiles */
429 struct nil_extent4d lvl_tiling_ext_B = nil_tiling_extent_B(lvl_tiling);
430 lvl_ext_B = nil_extent4d_align(lvl_ext_B, lvl_tiling_ext_B);
431
432 image->levels[l] = (struct nil_image_level) {
433 .offset_B = layer_size_B,
434 .tiling = lvl_tiling,
435 .row_stride_B = lvl_ext_B.width,
436 };
437
438 layer_size_B += (uint64_t)lvl_ext_B.w *
439 (uint64_t)lvl_ext_B.h *
440 (uint64_t)lvl_ext_B.d;
441 } else {
442 /* Linear images need to be 2D */
443 assert(image->dim == NIL_IMAGE_DIM_2D);
444 /* NVIDIA can't do linear and mipmapping */
445 assert(image->num_levels == 1);
446 /* NVIDIA can't do linear and multisampling*/
447 assert(image->sample_layout == NIL_SAMPLE_LAYOUT_1X1);
448
449 image->levels[l] = (struct nil_image_level) {
450 .offset_B = layer_size_B,
451 .tiling = lvl_tiling,
452 /* Row stride needs to be aligned to 128B for render to work */
453 .row_stride_B = align(lvl_ext_B.width, 128),
454 };
455
456 assert(lvl_ext_B.d == 1);
457 layer_size_B += (uint64_t)image->levels[l].row_stride_B *
458 (uint64_t)lvl_ext_B.h;
459
460 }
461 }
462
463 /* Align the image and array stride to a single level0 tile */
464 image->align_B = nil_tiling_size_B(image->levels[0].tiling);
465
466 /* I have no idea why but hardware seems to align layer strides */
467 image->array_stride_B = (uint32_t)align64(layer_size_B, image->align_B);
468
469 image->size_B = (uint64_t)image->array_stride_B * image->extent_px.a;
470
471 if (image->levels[0].tiling.is_tiled) {
472 image->tile_mode = (uint16_t)image->levels[0].tiling.y_log2 << 4 |
473 (uint16_t)image->levels[0].tiling.z_log2 << 8;
474
475 image->pte_kind = nil_choose_pte_kind(dev, info->format, info->samples,
476 false /* TODO: compressed */);
477
478 image->align_B = MAX2(image->align_B, 4096);
479 if (image->pte_kind >= 0xb && image->pte_kind <= 0xe)
480 image->align_B = MAX2(image->align_B, (1 << 16));
481 } else {
482 /* Linear images need to be aligned to 128B for render to work */
483 image->align_B = MAX2(image->align_B, 128);
484 }
485
486 image->size_B = align64(image->size_B, image->align_B);
487 return true;
488 }
489
490 uint64_t
nil_image_level_size_B(const struct nil_image * image,uint32_t level)491 nil_image_level_size_B(const struct nil_image *image, uint32_t level)
492 {
493 assert(level < image->num_levels);
494
495 /* See the nil_image::levels[] computations */
496 struct nil_extent4d lvl_ext_B = image_level_extent_B(image, level);
497 struct nil_extent4d lvl_tiling_ext_B =
498 nil_tiling_extent_B(image->levels[level].tiling);
499 lvl_ext_B = nil_extent4d_align(lvl_ext_B, lvl_tiling_ext_B);
500
501 return (uint64_t)lvl_ext_B.w *
502 (uint64_t)lvl_ext_B.h *
503 (uint64_t)lvl_ext_B.d;
504 }
505
506 uint64_t
nil_image_level_depth_stride_B(const struct nil_image * image,uint32_t level)507 nil_image_level_depth_stride_B(const struct nil_image *image, uint32_t level)
508 {
509 assert(level < image->num_levels);
510
511 /* See the nil_image::levels[] computations */
512 struct nil_extent4d lvl_ext_B = image_level_extent_B(image, level);
513 struct nil_extent4d lvl_tiling_ext_B =
514 nil_tiling_extent_B(image->levels[level].tiling);
515 lvl_ext_B = nil_extent4d_align(lvl_ext_B, lvl_tiling_ext_B);
516
517 return (uint64_t)lvl_ext_B.w * (uint64_t)lvl_ext_B.h;
518 }
519
520 void
nil_image_for_level(const struct nil_image * image_in,uint32_t level,struct nil_image * lvl_image_out,uint64_t * offset_B_out)521 nil_image_for_level(const struct nil_image *image_in,
522 uint32_t level,
523 struct nil_image *lvl_image_out,
524 uint64_t *offset_B_out)
525 {
526 assert(level < image_in->num_levels);
527
528 const struct nil_extent4d lvl_extent_px =
529 nil_image_level_extent_px(image_in, level);
530 struct nil_image_level lvl = image_in->levels[level];
531 const uint32_t align_B = nil_tiling_size_B(lvl.tiling);
532
533 uint64_t size_B = image_in->size_B - lvl.offset_B;
534 if (level + 1 < image_in->num_levels) {
535 /* This assumes levels are sequential, tightly packed, and that each
536 * level has a higher alignment than the next one. All of this is
537 * currently true
538 */
539 const uint64_t next_lvl_offset_B = image_in->levels[level + 1].offset_B;
540 assert(next_lvl_offset_B > lvl.offset_B);
541 size_B -= next_lvl_offset_B - lvl.offset_B;
542 }
543
544 *offset_B_out = lvl.offset_B;
545 lvl.offset_B = 0;
546
547 *lvl_image_out = (struct nil_image) {
548 .dim = image_in->dim,
549 .format = image_in->format,
550 .extent_px = lvl_extent_px,
551 .sample_layout = image_in->sample_layout,
552 .num_levels = 1,
553 .levels[0] = lvl,
554 .array_stride_B = image_in->array_stride_B,
555 .align_B = align_B,
556 .size_B = size_B,
557 .tile_mode = image_in->tile_mode,
558 .pte_kind = image_in->pte_kind,
559 };
560 }
561
562 static enum pipe_format
pipe_format_for_bits(uint32_t bits)563 pipe_format_for_bits(uint32_t bits)
564 {
565 switch (bits) {
566 case 32: return PIPE_FORMAT_R32_UINT;
567 case 64: return PIPE_FORMAT_R32G32_UINT;
568 case 128: return PIPE_FORMAT_R32G32B32A32_UINT;
569 default:
570 unreachable("No PIPE_FORMAT with this size");
571 }
572 }
573
574 void
nil_image_level_as_uncompressed(const struct nil_image * image_in,uint32_t level,struct nil_image * uc_image_out,uint64_t * offset_B_out)575 nil_image_level_as_uncompressed(const struct nil_image *image_in,
576 uint32_t level,
577 struct nil_image *uc_image_out,
578 uint64_t *offset_B_out)
579 {
580 assert(image_in->sample_layout == NIL_SAMPLE_LAYOUT_1X1);
581
582 /* Format is arbitrary. Pick one that has the right number of bits. */
583 const enum pipe_format uc_format =
584 pipe_format_for_bits(util_format_get_blocksizebits(image_in->format));
585
586 struct nil_image lvl_image;
587 nil_image_for_level(image_in, level, &lvl_image, offset_B_out);
588
589 *uc_image_out = lvl_image;
590 uc_image_out->format = uc_format;
591 uc_image_out->extent_px =
592 nil_extent4d_px_to_el(lvl_image.extent_px, lvl_image.format,
593 lvl_image.sample_layout);
594 }
595
596 void
nil_image_3d_level_as_2d_array(const struct nil_image * image_3d,uint32_t level,struct nil_image * image_2d_out,uint64_t * offset_B_out)597 nil_image_3d_level_as_2d_array(const struct nil_image *image_3d,
598 uint32_t level,
599 struct nil_image *image_2d_out,
600 uint64_t *offset_B_out)
601 {
602 assert(image_3d->dim == NIL_IMAGE_DIM_3D);
603 assert(image_3d->extent_px.array_len == 1);
604 assert(image_3d->sample_layout == NIL_SAMPLE_LAYOUT_1X1);
605
606 struct nil_image lvl_image;
607 nil_image_for_level(image_3d, level, &lvl_image, offset_B_out);
608
609 assert(lvl_image.num_levels == 1);
610 assert(!lvl_image.levels[0].tiling.is_tiled ||
611 lvl_image.levels[0].tiling.z_log2 == 0);
612
613 struct nil_extent4d lvl_tiling_ext_B =
614 nil_tiling_extent_B(lvl_image.levels[0].tiling);
615 struct nil_extent4d lvl_ext_B = image_level_extent_B(&lvl_image, 0);
616 lvl_ext_B = nil_extent4d_align(lvl_ext_B, lvl_tiling_ext_B);
617 uint64_t z_stride = (uint64_t)lvl_ext_B.w * (uint64_t)lvl_ext_B.h;
618
619 *image_2d_out = lvl_image;
620 image_2d_out->dim = NIL_IMAGE_DIM_2D;
621 image_2d_out->extent_px.d = 1;
622 image_2d_out->extent_px.a = lvl_image.extent_px.d;
623 image_2d_out->array_stride_B = z_stride;
624 }
625