1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file v3d_tiling.c
25 *
26 * Handles information about the VC5 tiling formats, and loading and storing
27 * from them.
28 */
29
30 #include <stdint.h>
31 #include <string.h>
32
33 #include "v3dv_private.h"
34
35 #include "util/u_math.h"
36 #include "util/u_box.h"
37
38 #include "broadcom/common/v3d_cpu_tiling.h"
39
40 /** Return the width in pixels of a 64-byte microtile. */
41 uint32_t
v3d_utile_width(int cpp)42 v3d_utile_width(int cpp)
43 {
44 switch (cpp) {
45 case 1:
46 case 2:
47 return 8;
48 case 4:
49 case 8:
50 return 4;
51 case 16:
52 return 2;
53 default:
54 unreachable("unknown cpp");
55 }
56 }
57
58 /** Return the height in pixels of a 64-byte microtile. */
59 uint32_t
v3d_utile_height(int cpp)60 v3d_utile_height(int cpp)
61 {
62 switch (cpp) {
63 case 1:
64 return 8;
65 case 2:
66 case 4:
67 return 4;
68 case 8:
69 case 16:
70 return 2;
71 default:
72 unreachable("unknown cpp");
73 }
74 }
75
76 /**
77 * Returns the byte address for a given pixel within a utile.
78 *
79 * Utiles are 64b blocks of pixels in raster order, with 32bpp being a 4x4
80 * arrangement.
81 */
82 static inline uint32_t
v3d_get_utile_pixel_offset(uint32_t cpp,uint32_t x,uint32_t y)83 v3d_get_utile_pixel_offset(uint32_t cpp, uint32_t x, uint32_t y)
84 {
85 uint32_t utile_w = v3d_utile_width(cpp);
86
87 assert(x < utile_w && y < v3d_utile_height(cpp));
88
89 return x * cpp + y * utile_w * cpp;
90 }
91
92 /**
93 * Returns the byte offset for a given pixel in a LINEARTILE layout.
94 *
95 * LINEARTILE is a single line of utiles in either the X or Y direction.
96 */
97 static inline uint32_t
v3d_get_lt_pixel_offset(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y)98 v3d_get_lt_pixel_offset(uint32_t cpp, uint32_t image_h, uint32_t x, uint32_t y)
99 {
100 uint32_t utile_w = v3d_utile_width(cpp);
101 uint32_t utile_h = v3d_utile_height(cpp);
102 uint32_t utile_index_x = x / utile_w;
103 uint32_t utile_index_y = y / utile_h;
104
105 assert(utile_index_x == 0 || utile_index_y == 0);
106
107 return (64 * (utile_index_x + utile_index_y) +
108 v3d_get_utile_pixel_offset(cpp,
109 x & (utile_w - 1),
110 y & (utile_h - 1)));
111 }
112
113 /**
114 * Returns the byte offset for a given pixel in a UBLINEAR layout.
115 *
116 * UBLINEAR is the layout where pixels are arranged in UIF blocks (2x2
117 * utiles), and the UIF blocks are in 1 or 2 columns in raster order.
118 */
119 static inline uint32_t
v3d_get_ublinear_pixel_offset(uint32_t cpp,uint32_t x,uint32_t y,int ublinear_number)120 v3d_get_ublinear_pixel_offset(uint32_t cpp, uint32_t x, uint32_t y,
121 int ublinear_number)
122 {
123 uint32_t utile_w = v3d_utile_width(cpp);
124 uint32_t utile_h = v3d_utile_height(cpp);
125 uint32_t ub_w = utile_w * 2;
126 uint32_t ub_h = utile_h * 2;
127 uint32_t ub_x = x / ub_w;
128 uint32_t ub_y = y / ub_h;
129
130 return (256 * (ub_y * ublinear_number +
131 ub_x) +
132 ((x & utile_w) ? 64 : 0) +
133 ((y & utile_h) ? 128 : 0) +
134 + v3d_get_utile_pixel_offset(cpp,
135 x & (utile_w - 1),
136 y & (utile_h - 1)));
137 }
138
139 static inline uint32_t
v3d_get_ublinear_2_column_pixel_offset(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y)140 v3d_get_ublinear_2_column_pixel_offset(uint32_t cpp, uint32_t image_h,
141 uint32_t x, uint32_t y)
142 {
143 return v3d_get_ublinear_pixel_offset(cpp, x, y, 2);
144 }
145
146 static inline uint32_t
v3d_get_ublinear_1_column_pixel_offset(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y)147 v3d_get_ublinear_1_column_pixel_offset(uint32_t cpp, uint32_t image_h,
148 uint32_t x, uint32_t y)
149 {
150 return v3d_get_ublinear_pixel_offset(cpp, x, y, 1);
151 }
152
153 /**
154 * Returns the byte offset for a given pixel in a UIF layout.
155 *
156 * UIF is the general VC5 tiling layout shared across 3D, media, and scanout.
157 * It stores pixels in UIF blocks (2x2 utiles), and UIF blocks are stored in
158 * 4x4 groups, and those 4x4 groups are then stored in raster order.
159 */
160 static inline uint32_t
v3d_get_uif_pixel_offset(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y,bool do_xor)161 v3d_get_uif_pixel_offset(uint32_t cpp, uint32_t image_h, uint32_t x, uint32_t y,
162 bool do_xor)
163 {
164 uint32_t utile_w = v3d_utile_width(cpp);
165 uint32_t utile_h = v3d_utile_height(cpp);
166 uint32_t mb_width = utile_w * 2;
167 uint32_t mb_height = utile_h * 2;
168 uint32_t log2_mb_width = ffs(mb_width) - 1;
169 uint32_t log2_mb_height = ffs(mb_height) - 1;
170
171 /* Macroblock X, y */
172 uint32_t mb_x = x >> log2_mb_width;
173 uint32_t mb_y = y >> log2_mb_height;
174 /* X, y within the macroblock */
175 uint32_t mb_pixel_x = x - (mb_x << log2_mb_width);
176 uint32_t mb_pixel_y = y - (mb_y << log2_mb_height);
177
178 if (do_xor && (mb_x / 4) & 1)
179 mb_y ^= 0x10;
180
181 uint32_t mb_h = align(image_h, 1 << log2_mb_height) >> log2_mb_height;
182 uint32_t mb_id = ((mb_x / 4) * ((mb_h - 1) * 4)) + mb_x + mb_y * 4;
183
184 uint32_t mb_base_addr = mb_id * 256;
185
186 bool top = mb_pixel_y < utile_h;
187 bool left = mb_pixel_x < utile_w;
188
189 /* Docs have this in pixels, we do bytes here. */
190 uint32_t mb_tile_offset = (!top * 128 + !left * 64);
191
192 uint32_t utile_x = mb_pixel_x & (utile_w - 1);
193 uint32_t utile_y = mb_pixel_y & (utile_h - 1);
194
195 uint32_t mb_pixel_address = (mb_base_addr +
196 mb_tile_offset +
197 v3d_get_utile_pixel_offset(cpp,
198 utile_x,
199 utile_y));
200
201 return mb_pixel_address;
202 }
203
204 static inline uint32_t
v3d_get_uif_xor_pixel_offset(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y)205 v3d_get_uif_xor_pixel_offset(uint32_t cpp, uint32_t image_h,
206 uint32_t x, uint32_t y)
207 {
208 return v3d_get_uif_pixel_offset(cpp, image_h, x, y, true);
209 }
210
211 static inline uint32_t
v3d_get_uif_no_xor_pixel_offset(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y)212 v3d_get_uif_no_xor_pixel_offset(uint32_t cpp, uint32_t image_h,
213 uint32_t x, uint32_t y)
214 {
215 return v3d_get_uif_pixel_offset(cpp, image_h, x, y, false);
216 }
217
218 /* Loads/stores non-utile-aligned boxes by walking over the destination
219 * rectangle, computing the address on the GPU, and storing/loading a pixel at
220 * a time.
221 */
222 static inline void
v3d_move_pixels_unaligned(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,uint32_t image_h,const struct pipe_box * box,uint32_t (* get_pixel_offset)(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y),bool is_load)223 v3d_move_pixels_unaligned(void *gpu, uint32_t gpu_stride,
224 void *cpu, uint32_t cpu_stride,
225 int cpp, uint32_t image_h,
226 const struct pipe_box *box,
227 uint32_t (*get_pixel_offset)(uint32_t cpp,
228 uint32_t image_h,
229 uint32_t x, uint32_t y),
230 bool is_load)
231 {
232 for (uint32_t y = 0; y < box->height; y++) {
233 void *cpu_row = cpu + y * cpu_stride;
234
235 for (int x = 0; x < box->width; x++) {
236 uint32_t pixel_offset = get_pixel_offset(cpp, image_h,
237 box->x + x,
238 box->y + y);
239
240 if (false) {
241 fprintf(stderr, "%3d,%3d -> %d\n",
242 box->x + x, box->y + y,
243 pixel_offset);
244 }
245
246 if (is_load) {
247 memcpy(cpu_row + x * cpp,
248 gpu + pixel_offset,
249 cpp);
250 } else {
251 memcpy(gpu + pixel_offset,
252 cpu_row + x * cpp,
253 cpp);
254 }
255 }
256 }
257 }
258
259 /* Breaks the image down into utiles and calls either the fast whole-utile
260 * load/store functions, or the unaligned fallback case.
261 */
262 static inline void
v3d_move_pixels_general_percpp(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,uint32_t image_h,const struct pipe_box * box,uint32_t (* get_pixel_offset)(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y),bool is_load)263 v3d_move_pixels_general_percpp(void *gpu, uint32_t gpu_stride,
264 void *cpu, uint32_t cpu_stride,
265 int cpp, uint32_t image_h,
266 const struct pipe_box *box,
267 uint32_t (*get_pixel_offset)(uint32_t cpp,
268 uint32_t image_h,
269 uint32_t x, uint32_t y),
270 bool is_load)
271 {
272 uint32_t utile_w = v3d_utile_width(cpp);
273 uint32_t utile_h = v3d_utile_height(cpp);
274 uint32_t utile_gpu_stride = utile_w * cpp;
275 uint32_t x1 = box->x;
276 uint32_t y1 = box->y;
277 uint32_t x2 = box->x + box->width;
278 uint32_t y2 = box->y + box->height;
279 uint32_t align_x1 = align(x1, utile_w);
280 uint32_t align_y1 = align(y1, utile_h);
281 uint32_t align_x2 = x2 & ~(utile_w - 1);
282 uint32_t align_y2 = y2 & ~(utile_h - 1);
283
284 /* Load/store all the whole utiles first. */
285 for (uint32_t y = align_y1; y < align_y2; y += utile_h) {
286 void *cpu_row = cpu + (y - box->y) * cpu_stride;
287
288 for (uint32_t x = align_x1; x < align_x2; x += utile_w) {
289 void *utile_gpu = (gpu +
290 get_pixel_offset(cpp, image_h, x, y));
291 void *utile_cpu = cpu_row + (x - box->x) * cpp;
292
293 if (is_load) {
294 v3d_load_utile(utile_cpu, cpu_stride,
295 utile_gpu, utile_gpu_stride);
296 } else {
297 v3d_store_utile(utile_gpu, utile_gpu_stride,
298 utile_cpu, cpu_stride);
299 }
300 }
301 }
302
303 /* If there were no aligned utiles in the middle, load/store the whole
304 * thing unaligned.
305 */
306 if (align_y2 <= align_y1 ||
307 align_x2 <= align_x1) {
308 v3d_move_pixels_unaligned(gpu, gpu_stride,
309 cpu, cpu_stride,
310 cpp, image_h,
311 box,
312 get_pixel_offset, is_load);
313 return;
314 }
315
316 /* Load/store the partial utiles. */
317 struct pipe_box partial_boxes[4] = {
318 /* Top */
319 {
320 .x = x1,
321 .width = x2 - x1,
322 .y = y1,
323 .height = align_y1 - y1,
324 },
325 /* Bottom */
326 {
327 .x = x1,
328 .width = x2 - x1,
329 .y = align_y2,
330 .height = y2 - align_y2,
331 },
332 /* Left */
333 {
334 .x = x1,
335 .width = align_x1 - x1,
336 .y = align_y1,
337 .height = align_y2 - align_y1,
338 },
339 /* Right */
340 {
341 .x = align_x2,
342 .width = x2 - align_x2,
343 .y = align_y1,
344 .height = align_y2 - align_y1,
345 },
346 };
347 for (int i = 0; i < ARRAY_SIZE(partial_boxes); i++) {
348 void *partial_cpu = (cpu +
349 (partial_boxes[i].y - y1) * cpu_stride +
350 (partial_boxes[i].x - x1) * cpp);
351
352 v3d_move_pixels_unaligned(gpu, gpu_stride,
353 partial_cpu, cpu_stride,
354 cpp, image_h,
355 &partial_boxes[i],
356 get_pixel_offset, is_load);
357 }
358 }
359
360 static inline void
v3d_move_pixels_general(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,uint32_t image_h,const struct pipe_box * box,uint32_t (* get_pixel_offset)(uint32_t cpp,uint32_t image_h,uint32_t x,uint32_t y),bool is_load)361 v3d_move_pixels_general(void *gpu, uint32_t gpu_stride,
362 void *cpu, uint32_t cpu_stride,
363 int cpp, uint32_t image_h,
364 const struct pipe_box *box,
365 uint32_t (*get_pixel_offset)(uint32_t cpp,
366 uint32_t image_h,
367 uint32_t x, uint32_t y),
368 bool is_load)
369 {
370 switch (cpp) {
371 case 1:
372 v3d_move_pixels_general_percpp(gpu, gpu_stride,
373 cpu, cpu_stride,
374 1, image_h, box,
375 get_pixel_offset,
376 is_load);
377 break;
378 case 2:
379 v3d_move_pixels_general_percpp(gpu, gpu_stride,
380 cpu, cpu_stride,
381 2, image_h, box,
382 get_pixel_offset,
383 is_load);
384 break;
385 case 4:
386 v3d_move_pixels_general_percpp(gpu, gpu_stride,
387 cpu, cpu_stride,
388 4, image_h, box,
389 get_pixel_offset,
390 is_load);
391 break;
392 case 8:
393 v3d_move_pixels_general_percpp(gpu, gpu_stride,
394 cpu, cpu_stride,
395 8, image_h, box,
396 get_pixel_offset,
397 is_load);
398 break;
399 case 16:
400 v3d_move_pixels_general_percpp(gpu, gpu_stride,
401 cpu, cpu_stride,
402 16, image_h, box,
403 get_pixel_offset,
404 is_load);
405 break;
406 }
407 }
408
409 static inline void
v3d_move_tiled_image(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,enum v3d_tiling_mode tiling_format,int cpp,uint32_t image_h,const struct pipe_box * box,bool is_load)410 v3d_move_tiled_image(void *gpu, uint32_t gpu_stride,
411 void *cpu, uint32_t cpu_stride,
412 enum v3d_tiling_mode tiling_format,
413 int cpp,
414 uint32_t image_h,
415 const struct pipe_box *box,
416 bool is_load)
417 {
418 switch (tiling_format) {
419 case VC5_TILING_UIF_XOR:
420 v3d_move_pixels_general(gpu, gpu_stride,
421 cpu, cpu_stride,
422 cpp, image_h, box,
423 v3d_get_uif_xor_pixel_offset,
424 is_load);
425 break;
426 case VC5_TILING_UIF_NO_XOR:
427 v3d_move_pixels_general(gpu, gpu_stride,
428 cpu, cpu_stride,
429 cpp, image_h, box,
430 v3d_get_uif_no_xor_pixel_offset,
431 is_load);
432 break;
433 case VC5_TILING_UBLINEAR_2_COLUMN:
434 v3d_move_pixels_general(gpu, gpu_stride,
435 cpu, cpu_stride,
436 cpp, image_h, box,
437 v3d_get_ublinear_2_column_pixel_offset,
438 is_load);
439 break;
440 case VC5_TILING_UBLINEAR_1_COLUMN:
441 v3d_move_pixels_general(gpu, gpu_stride,
442 cpu, cpu_stride,
443 cpp, image_h, box,
444 v3d_get_ublinear_1_column_pixel_offset,
445 is_load);
446 break;
447 case VC5_TILING_LINEARTILE:
448 v3d_move_pixels_general(gpu, gpu_stride,
449 cpu, cpu_stride,
450 cpp, image_h, box,
451 v3d_get_lt_pixel_offset,
452 is_load);
453 break;
454 default:
455 unreachable("Unsupported tiling format");
456 break;
457 }
458 }
459
460 /**
461 * Loads pixel data from the start (microtile-aligned) box in \p src to the
462 * start of \p dst according to the given tiling format.
463 */
464 void
v3d_load_tiled_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,enum v3d_tiling_mode tiling_format,int cpp,uint32_t image_h,const struct pipe_box * box)465 v3d_load_tiled_image(void *dst, uint32_t dst_stride,
466 void *src, uint32_t src_stride,
467 enum v3d_tiling_mode tiling_format, int cpp,
468 uint32_t image_h,
469 const struct pipe_box *box)
470 {
471 v3d_move_tiled_image(src, src_stride,
472 dst, dst_stride,
473 tiling_format,
474 cpp,
475 image_h,
476 box,
477 true);
478 }
479
480 /**
481 * Stores pixel data from the start of \p src into a (microtile-aligned) box in
482 * \p dst according to the given tiling format.
483 */
484 void
v3d_store_tiled_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,enum v3d_tiling_mode tiling_format,int cpp,uint32_t image_h,const struct pipe_box * box)485 v3d_store_tiled_image(void *dst, uint32_t dst_stride,
486 void *src, uint32_t src_stride,
487 enum v3d_tiling_mode tiling_format, int cpp,
488 uint32_t image_h,
489 const struct pipe_box *box)
490 {
491 v3d_move_tiled_image(dst, dst_stride,
492 src, src_stride,
493 tiling_format,
494 cpp,
495 image_h,
496 box,
497 false);
498 }
499