1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 /** @file vc4_tiling.c
25 *
26 * Handles information about the VC4 tiling formats, and loading and storing
27 * from them.
28 *
29 * Texture mipmap levels on VC4 are (with the exception of 32-bit RGBA raster
30 * textures for scanout) stored as groups of microtiles. If the texture is at
31 * least 4x4 microtiles (utiles), then those microtiles are arranged in a sort
32 * of Hilbert-fractal-ish layout (T), otherwise the microtiles are in raster
33 * order (LT).
34 *
35 * Specifically, the T format has:
36 *
37 * - 64b utiles of pixels in a raster-order grid according to cpp. It's 4x4
38 * pixels at 32 bit depth.
39 *
40 * - 1k subtiles made of a 4x4 raster-order grid of 64b utiles (so usually
41 * 16x16 pixels).
42 *
43 * - 4k tiles made of a 2x2 grid of 1k subtiles (so usually 32x32 pixels). On
44 * even 4k tile rows, they're arranged as (BL, TL, TR, BR), and on odd rows
45 * they're (TR, BR, BL, TL), where bottom left is start of memory.
46 *
47 * - an image made of 4k tiles in rows either left-to-right (even rows of 4k
48 * tiles) or right-to-left (odd rows of 4k tiles).
49 */
50
51 #include "vc4_screen.h"
52 #include "vc4_context.h"
53 #include "vc4_tiling.h"
54
55 /** Returns the stride in bytes of a 64-byte microtile. */
56 static uint32_t
vc4_utile_stride(int cpp)57 vc4_utile_stride(int cpp)
58 {
59 switch (cpp) {
60 case 1:
61 return 8;
62 case 2:
63 case 4:
64 case 8:
65 return 16;
66 default:
67 unreachable("bad cpp");
68 }
69 }
70
71 /**
72 * The texture unit decides what tiling format a particular miplevel is using
73 * this function, so we lay out our miptrees accordingly.
74 */
75 bool
vc4_size_is_lt(uint32_t width,uint32_t height,int cpp)76 vc4_size_is_lt(uint32_t width, uint32_t height, int cpp)
77 {
78 return (width <= 4 * vc4_utile_width(cpp) ||
79 height <= 4 * vc4_utile_height(cpp));
80 }
81
82 static void
vc4_load_utile(void * dst,void * src,uint32_t dst_stride,uint32_t cpp)83 vc4_load_utile(void *dst, void *src, uint32_t dst_stride, uint32_t cpp)
84 {
85 uint32_t src_stride = vc4_utile_stride(cpp);
86
87 for (uint32_t src_offset = 0; src_offset < 64; src_offset += src_stride) {
88 memcpy(dst, src + src_offset, src_stride);
89 dst += dst_stride;
90 }
91 }
92
93 static void
vc4_store_utile(void * dst,void * src,uint32_t src_stride,uint32_t cpp)94 vc4_store_utile(void *dst, void *src, uint32_t src_stride, uint32_t cpp)
95 {
96 uint32_t dst_stride = vc4_utile_stride(cpp);
97
98 for (uint32_t dst_offset = 0; dst_offset < 64; dst_offset += dst_stride) {
99 memcpy(dst + dst_offset, src, dst_stride);
100 src += src_stride;
101 }
102 }
103
104 static void
check_box_utile_alignment(const struct pipe_box * box,int cpp)105 check_box_utile_alignment(const struct pipe_box *box, int cpp)
106 {
107 assert(!(box->x & (vc4_utile_width(cpp) - 1)));
108 assert(!(box->y & (vc4_utile_height(cpp) - 1)));
109 assert(!(box->width & (vc4_utile_width(cpp) - 1)));
110 assert(!(box->height & (vc4_utile_height(cpp) - 1)));
111 }
112
113 static void
vc4_load_lt_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,int cpp,const struct pipe_box * box)114 vc4_load_lt_image(void *dst, uint32_t dst_stride,
115 void *src, uint32_t src_stride,
116 int cpp, const struct pipe_box *box)
117 {
118 uint32_t utile_w = vc4_utile_width(cpp);
119 uint32_t utile_h = vc4_utile_height(cpp);
120 uint32_t xstart = box->x;
121 uint32_t ystart = box->y;
122
123 for (uint32_t y = 0; y < box->height; y += utile_h) {
124 for (int x = 0; x < box->width; x += utile_w) {
125 vc4_load_utile(dst + (dst_stride * y +
126 x * cpp),
127 src + ((ystart + y) * src_stride +
128 (xstart + x) * 64 / utile_w),
129 dst_stride, cpp);
130 }
131 }
132 }
133
134 static void
vc4_store_lt_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,int cpp,const struct pipe_box * box)135 vc4_store_lt_image(void *dst, uint32_t dst_stride,
136 void *src, uint32_t src_stride,
137 int cpp, const struct pipe_box *box)
138 {
139 uint32_t utile_w = vc4_utile_width(cpp);
140 uint32_t utile_h = vc4_utile_height(cpp);
141 uint32_t xstart = box->x;
142 uint32_t ystart = box->y;
143
144 for (uint32_t y = 0; y < box->height; y += utile_h) {
145 for (int x = 0; x < box->width; x += utile_w) {
146 vc4_store_utile(dst + ((ystart + y) * dst_stride +
147 (xstart + x) * 64 / utile_w),
148 src + (src_stride * y +
149 x * cpp),
150 src_stride, cpp);
151 }
152 }
153 }
154
155 /**
156 * Takes a utile x and y (and the number of utiles of width of the image) and
157 * returns the offset to the utile within a VC4_TILING_FORMAT_TF image.
158 */
159 static uint32_t
t_utile_address(uint32_t utile_x,uint32_t utile_y,uint32_t utile_stride)160 t_utile_address(uint32_t utile_x, uint32_t utile_y,
161 uint32_t utile_stride)
162 {
163 /* T images have to be aligned to 8 utiles (4x4 subtiles, which are
164 * 2x2 in a 4k tile).
165 */
166 assert(!(utile_stride & 7));
167 uint32_t tile_stride = utile_stride >> 3;
168 /* 4k tile offsets. */
169 uint32_t tile_x = utile_x >> 3;
170 uint32_t tile_y = utile_y >> 3;
171 bool odd_tile_y = tile_y & 1;
172
173 /* Odd lines of 4k tiles go right-to-left. */
174 if (odd_tile_y)
175 tile_x = tile_stride - tile_x - 1;
176
177 uint32_t tile_offset = 4096 * (tile_y * tile_stride + tile_x);
178
179 uint32_t stile_x = (utile_x >> 2) & 1;
180 uint32_t stile_y = (utile_y >> 2) & 1;
181 uint32_t stile_index = (stile_y << 1) + stile_x;
182 static const uint32_t odd_stile_map[4] = {2, 1, 3, 0};
183 static const uint32_t even_stile_map[4] = {0, 3, 1, 2};
184
185 uint32_t stile_offset = 1024 * (odd_tile_y ?
186 odd_stile_map[stile_index] :
187 even_stile_map[stile_index]);
188
189 /* This function no longer handles the utile offset within a subtile.
190 * Walking subtiles is the job of the LT image handler.
191 */
192 assert(!(utile_x & 3) && !(utile_y & 3));
193
194 #if 0
195 fprintf(stderr, "utile %d,%d -> %d + %d + %d (stride %d,%d) = %d\n",
196 utile_x, utile_y,
197 tile_offset, stile_offset, utile_offset,
198 utile_stride, tile_stride,
199 tile_offset + stile_offset + utile_offset);
200 #endif
201
202 return tile_offset + stile_offset;
203 }
204
205 /**
206 * Loads or stores a T texture image by breaking it down into subtiles
207 * (1024-byte, 4x4-utile) sub-images that we can use the LT tiling functions
208 * on.
209 */
210 static inline void
vc4_t_image_helper(void * gpu,uint32_t gpu_stride,void * cpu,uint32_t cpu_stride,int cpp,const struct pipe_box * box,bool to_cpu)211 vc4_t_image_helper(void *gpu, uint32_t gpu_stride,
212 void *cpu, uint32_t cpu_stride,
213 int cpp, const struct pipe_box *box,
214 bool to_cpu)
215 {
216 uint32_t utile_w = vc4_utile_width(cpp);
217 uint32_t utile_h = vc4_utile_height(cpp);
218 uint32_t utile_w_shift = ffs(utile_w) - 1;
219 uint32_t utile_h_shift = ffs(utile_h) - 1;
220 uint32_t stile_w = 4 * utile_w;
221 uint32_t stile_h = 4 * utile_h;
222 assert(stile_w * stile_h * cpp == 1024);
223 uint32_t utile_stride = gpu_stride / cpp / utile_w;
224 uint32_t x1 = box->x;
225 uint32_t y1 = box->y;
226 uint32_t x2 = box->x + box->width;
227 uint32_t y2 = box->y + box->height;
228 struct pipe_box partial_box;
229 uint32_t gpu_lt_stride = stile_w * cpp;
230
231 for (uint32_t y = y1; y < y2; y = align(y + 1, stile_h)) {
232 partial_box.y = y & (stile_h - 1);
233 partial_box.height = MIN2(y2 - y, stile_h - partial_box.y);
234
235 uint32_t cpu_offset = 0;
236 for (uint32_t x = x1; x < x2; x = align(x + 1, stile_w)) {
237 partial_box.x = x & (stile_w - 1);
238 partial_box.width = MIN2(x2 - x,
239 stile_w - partial_box.x);
240
241 /* The dst offset we want is the start of this
242 * subtile
243 */
244 uint32_t gpu_offset =
245 t_utile_address((x >> utile_w_shift) & ~0x3,
246 (y >> utile_h_shift) & ~0x3,
247 utile_stride);
248
249 if (to_cpu) {
250 vc4_load_lt_image(cpu + cpu_offset,
251 cpu_stride,
252 gpu + gpu_offset,
253 gpu_lt_stride,
254 cpp, &partial_box);
255 } else {
256 vc4_store_lt_image(gpu + gpu_offset,
257 gpu_lt_stride,
258 cpu + cpu_offset,
259 cpu_stride,
260 cpp, &partial_box);
261 }
262
263 cpu_offset += partial_box.width * cpp;
264 }
265 cpu += cpu_stride * partial_box.height;
266 }
267 }
268
269 static void
vc4_store_t_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,int cpp,const struct pipe_box * box)270 vc4_store_t_image(void *dst, uint32_t dst_stride,
271 void *src, uint32_t src_stride,
272 int cpp, const struct pipe_box *box)
273 {
274 vc4_t_image_helper(dst, dst_stride,
275 src, src_stride,
276 cpp, box, false);
277 }
278
279 static void
vc4_load_t_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,int cpp,const struct pipe_box * box)280 vc4_load_t_image(void *dst, uint32_t dst_stride,
281 void *src, uint32_t src_stride,
282 int cpp, const struct pipe_box *box)
283 {
284 vc4_t_image_helper(src, src_stride,
285 dst, dst_stride,
286 cpp, box, true);
287 }
288
289 /**
290 * Loads pixel data from the start (microtile-aligned) box in \p src to the
291 * start of \p dst according to the given tiling format.
292 */
293 void
vc4_load_tiled_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,uint8_t tiling_format,int cpp,const struct pipe_box * box)294 vc4_load_tiled_image(void *dst, uint32_t dst_stride,
295 void *src, uint32_t src_stride,
296 uint8_t tiling_format, int cpp,
297 const struct pipe_box *box)
298 {
299 check_box_utile_alignment(box, cpp);
300
301 if (tiling_format == VC4_TILING_FORMAT_LT) {
302 vc4_load_lt_image(dst, dst_stride,
303 src, src_stride,
304 cpp, box);
305 } else {
306 assert(tiling_format == VC4_TILING_FORMAT_T);
307 vc4_load_t_image(dst, dst_stride,
308 src, src_stride,
309 cpp, box);
310 }
311 }
312
313 /**
314 * Stores pixel data from the start of \p src into a (microtile-aligned) box in
315 * \p dst according to the given tiling format.
316 */
317 void
vc4_store_tiled_image(void * dst,uint32_t dst_stride,void * src,uint32_t src_stride,uint8_t tiling_format,int cpp,const struct pipe_box * box)318 vc4_store_tiled_image(void *dst, uint32_t dst_stride,
319 void *src, uint32_t src_stride,
320 uint8_t tiling_format, int cpp,
321 const struct pipe_box *box)
322 {
323 check_box_utile_alignment(box, cpp);
324
325 if (tiling_format == VC4_TILING_FORMAT_LT) {
326 vc4_store_lt_image(dst, dst_stride,
327 src, src_stride,
328 cpp, box);
329 } else {
330 assert(tiling_format == VC4_TILING_FORMAT_T);
331 vc4_store_t_image(dst, dst_stride,
332 src, src_stride,
333 cpp, box);
334 }
335 }
336
337