• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2 
3 /*
4  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Rob Clark <robclark@freedesktop.org>
27  */
28 
29 #ifndef FREEDRENO_UTIL_H_
30 #define FREEDRENO_UTIL_H_
31 
32 #include <freedreno_drmif.h>
33 #include <freedreno_ringbuffer.h>
34 
35 #include "pipe/p_format.h"
36 #include "pipe/p_state.h"
37 #include "util/u_debug.h"
38 #include "util/u_math.h"
39 #include "util/u_half.h"
40 #include "util/u_dynarray.h"
41 #include "util/u_pack_color.h"
42 
43 #include "disasm.h"
44 #include "adreno_common.xml.h"
45 #include "adreno_pm4.xml.h"
46 
47 enum adreno_rb_depth_format fd_pipe2depth(enum pipe_format format);
48 enum pc_di_index_size fd_pipe2index(enum pipe_format format);
49 enum pipe_format fd_gmem_restore_format(enum pipe_format format);
50 enum adreno_rb_blend_factor fd_blend_factor(unsigned factor);
51 enum adreno_pa_su_sc_draw fd_polygon_mode(unsigned mode);
52 enum adreno_stencil_op fd_stencil_op(unsigned op);
53 
54 #define A3XX_MAX_MIP_LEVELS 14
55 /* TBD if it is same on a2xx, but for now: */
56 #define MAX_MIP_LEVELS A3XX_MAX_MIP_LEVELS
57 
58 #define A2XX_MAX_RENDER_TARGETS 1
59 #define A3XX_MAX_RENDER_TARGETS 4
60 #define A4XX_MAX_RENDER_TARGETS 8
61 #define A5XX_MAX_RENDER_TARGETS 8
62 
63 #define MAX_RENDER_TARGETS A5XX_MAX_RENDER_TARGETS
64 
65 #define FD_DBG_MSGS     0x0001
66 #define FD_DBG_DISASM   0x0002
67 #define FD_DBG_DCLEAR   0x0004
68 #define FD_DBG_DDRAW    0x0008
69 #define FD_DBG_NOSCIS   0x0010
70 #define FD_DBG_DIRECT   0x0020
71 #define FD_DBG_NOBYPASS 0x0040
72 #define FD_DBG_FRAGHALF 0x0080
73 #define FD_DBG_NOBIN    0x0100
74 #define FD_DBG_OPTMSGS  0x0200
75 #define FD_DBG_GLSL120  0x0400
76 #define FD_DBG_SHADERDB 0x0800
77 #define FD_DBG_FLUSH    0x1000
78 #define FD_DBG_DEQP     0x2000
79 #define FD_DBG_INORDER  0x4000
80 #define FD_DBG_BSTAT    0x8000
81 #define FD_DBG_NOGROW  0x10000
82 #define FD_DBG_LRZ     0x20000
83 #define FD_DBG_NOINDR  0x40000
84 #define FD_DBG_NOBLIT  0x80000
85 #define FD_DBG_HIPRIO 0x100000
86 #define FD_DBG_TTILE  0x200000
87 
88 extern int fd_mesa_debug;
89 extern bool fd_binning_enabled;
90 
91 #define DBG(fmt, ...) \
92 		do { if (fd_mesa_debug & FD_DBG_MSGS) \
93 			debug_printf("%s:%d: "fmt "\n", \
94 				__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
95 
96 /* for conditionally setting boolean flag(s): */
97 #define COND(bool, val) ((bool) ? (val) : 0)
98 
99 #define CP_REG(reg) ((0x4 << 16) | ((unsigned int)((reg) - (0x2000))))
100 
DRAW(enum pc_di_primtype prim_type,enum pc_di_src_sel source_select,enum pc_di_index_size index_size,enum pc_di_vis_cull_mode vis_cull_mode,uint8_t instances)101 static inline uint32_t DRAW(enum pc_di_primtype prim_type,
102 		enum pc_di_src_sel source_select, enum pc_di_index_size index_size,
103 		enum pc_di_vis_cull_mode vis_cull_mode,
104 		uint8_t instances)
105 {
106 	return (prim_type         << 0) |
107 			(source_select     << 6) |
108 			((index_size & 1)  << 11) |
109 			((index_size >> 1) << 13) |
110 			(vis_cull_mode     << 9) |
111 			(1                 << 14) |
112 			(instances         << 24);
113 }
114 
115 /* for tracking cmdstream positions that need to be patched: */
116 struct fd_cs_patch {
117 	uint32_t *cs;
118 	uint32_t val;
119 };
120 #define fd_patch_num_elements(buf) ((buf)->size / sizeof(struct fd_cs_patch))
121 #define fd_patch_element(buf, i)   util_dynarray_element(buf, struct fd_cs_patch, i)
122 
123 static inline enum pipe_format
pipe_surface_format(struct pipe_surface * psurf)124 pipe_surface_format(struct pipe_surface *psurf)
125 {
126 	if (!psurf)
127 		return PIPE_FORMAT_NONE;
128 	return psurf->format;
129 }
130 
131 static inline bool
fd_surface_half_precision(const struct pipe_surface * psurf)132 fd_surface_half_precision(const struct pipe_surface *psurf)
133 {
134 	enum pipe_format format;
135 
136 	if (!psurf)
137 		return true;
138 
139 	format = psurf->format;
140 
141 	/* colors are provided in consts, which go through cov.f32f16, which will
142 	 * break these values
143 	 */
144 	if (util_format_is_pure_integer(format))
145 		return false;
146 
147 	/* avoid losing precision on 32-bit float formats */
148 	if (util_format_is_float(format) &&
149 		util_format_get_component_bits(format, UTIL_FORMAT_COLORSPACE_RGB, 0) == 32)
150 		return false;
151 
152 	return true;
153 }
154 
155 static inline unsigned
fd_sampler_first_level(const struct pipe_sampler_view * view)156 fd_sampler_first_level(const struct pipe_sampler_view *view)
157 {
158 	if (view->target == PIPE_BUFFER)
159 		return 0;
160 	return view->u.tex.first_level;
161 }
162 
163 static inline unsigned
fd_sampler_last_level(const struct pipe_sampler_view * view)164 fd_sampler_last_level(const struct pipe_sampler_view *view)
165 {
166 	if (view->target == PIPE_BUFFER)
167 		return 0;
168 	return view->u.tex.last_level;
169 }
170 
171 static inline bool
fd_half_precision(struct pipe_framebuffer_state * pfb)172 fd_half_precision(struct pipe_framebuffer_state *pfb)
173 {
174 	unsigned i;
175 
176 	for (i = 0; i < pfb->nr_cbufs; i++)
177 		if (!fd_surface_half_precision(pfb->cbufs[i]))
178 			return false;
179 
180 	return true;
181 }
182 
183 #define LOG_DWORDS 0
184 
185 static inline void emit_marker(struct fd_ringbuffer *ring, int scratch_idx);
186 static inline void emit_marker5(struct fd_ringbuffer *ring, int scratch_idx);
187 
188 static inline void
OUT_RING(struct fd_ringbuffer * ring,uint32_t data)189 OUT_RING(struct fd_ringbuffer *ring, uint32_t data)
190 {
191 	if (LOG_DWORDS) {
192 		DBG("ring[%p]: OUT_RING   %04x:  %08x", ring,
193 				(uint32_t)(ring->cur - ring->last_start), data);
194 	}
195 	fd_ringbuffer_emit(ring, data);
196 }
197 
198 /* like OUT_RING() but appends a cmdstream patch point to 'buf' */
199 static inline void
OUT_RINGP(struct fd_ringbuffer * ring,uint32_t data,struct util_dynarray * buf)200 OUT_RINGP(struct fd_ringbuffer *ring, uint32_t data,
201 		struct util_dynarray *buf)
202 {
203 	if (LOG_DWORDS) {
204 		DBG("ring[%p]: OUT_RINGP  %04x:  %08x", ring,
205 				(uint32_t)(ring->cur - ring->last_start), data);
206 	}
207 	util_dynarray_append(buf, struct fd_cs_patch, ((struct fd_cs_patch){
208 		.cs  = ring->cur++,
209 		.val = data,
210 	}));
211 }
212 
213 /*
214  * NOTE: OUT_RELOC*() is 2 dwords (64b) on a5xx+
215  */
216 
217 static inline void
OUT_RELOC(struct fd_ringbuffer * ring,struct fd_bo * bo,uint32_t offset,uint64_t or,int32_t shift)218 OUT_RELOC(struct fd_ringbuffer *ring, struct fd_bo *bo,
219 		uint32_t offset, uint64_t or, int32_t shift)
220 {
221 	if (LOG_DWORDS) {
222 		DBG("ring[%p]: OUT_RELOC   %04x:  %p+%u << %d", ring,
223 				(uint32_t)(ring->cur - ring->last_start), bo, offset, shift);
224 	}
225 	debug_assert(offset < fd_bo_size(bo));
226 	fd_ringbuffer_reloc2(ring, &(struct fd_reloc){
227 		.bo = bo,
228 		.flags = FD_RELOC_READ,
229 		.offset = offset,
230 		.or = or,
231 		.shift = shift,
232 		.orhi = or >> 32,
233 	});
234 }
235 
236 static inline void
OUT_RELOCW(struct fd_ringbuffer * ring,struct fd_bo * bo,uint32_t offset,uint64_t or,int32_t shift)237 OUT_RELOCW(struct fd_ringbuffer *ring, struct fd_bo *bo,
238 		uint32_t offset, uint64_t or, int32_t shift)
239 {
240 	if (LOG_DWORDS) {
241 		DBG("ring[%p]: OUT_RELOCW  %04x:  %p+%u << %d", ring,
242 				(uint32_t)(ring->cur - ring->last_start), bo, offset, shift);
243 	}
244 	debug_assert(offset < fd_bo_size(bo));
245 	fd_ringbuffer_reloc2(ring, &(struct fd_reloc){
246 		.bo = bo,
247 		.flags = FD_RELOC_READ | FD_RELOC_WRITE,
248 		.offset = offset,
249 		.or = or,
250 		.shift = shift,
251 		.orhi = or >> 32,
252 	});
253 }
254 
BEGIN_RING(struct fd_ringbuffer * ring,uint32_t ndwords)255 static inline void BEGIN_RING(struct fd_ringbuffer *ring, uint32_t ndwords)
256 {
257 	if (ring->cur + ndwords >= ring->end)
258 		fd_ringbuffer_grow(ring, ndwords);
259 }
260 
261 static inline uint32_t
__gpu_id(struct fd_ringbuffer * ring)262 __gpu_id(struct fd_ringbuffer *ring)
263 {
264 	uint64_t val;
265 	fd_pipe_get_param(ring->pipe, FD_GPU_ID, &val);
266 	return val;
267 }
268 
269 static inline void
OUT_PKT0(struct fd_ringbuffer * ring,uint16_t regindx,uint16_t cnt)270 OUT_PKT0(struct fd_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
271 {
272 	debug_assert(__gpu_id(ring) < 500);
273 	BEGIN_RING(ring, cnt+1);
274 	OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
275 }
276 
277 static inline void
OUT_PKT2(struct fd_ringbuffer * ring)278 OUT_PKT2(struct fd_ringbuffer *ring)
279 {
280 	debug_assert(__gpu_id(ring) < 500);
281 	BEGIN_RING(ring, 1);
282 	OUT_RING(ring, CP_TYPE2_PKT);
283 }
284 
285 static inline void
OUT_PKT3(struct fd_ringbuffer * ring,uint8_t opcode,uint16_t cnt)286 OUT_PKT3(struct fd_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
287 {
288 	debug_assert(__gpu_id(ring) < 500);
289 	BEGIN_RING(ring, cnt+1);
290 	OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
291 }
292 
293 /*
294  * Starting with a5xx, pkt4/pkt7 are used instead of pkt0/pkt3
295  */
296 
297 static inline unsigned
_odd_parity_bit(unsigned val)298 _odd_parity_bit(unsigned val)
299 {
300 	/* See: http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
301 	 * note that we want odd parity so 0x6996 is inverted.
302 	 */
303 	val ^= val >> 16;
304 	val ^= val >> 8;
305 	val ^= val >> 4;
306 	val &= 0xf;
307 	return (~0x6996 >> val) & 1;
308 }
309 
310 static inline void
OUT_PKT4(struct fd_ringbuffer * ring,uint16_t regindx,uint16_t cnt)311 OUT_PKT4(struct fd_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
312 {
313 	BEGIN_RING(ring, cnt+1);
314 	OUT_RING(ring, CP_TYPE4_PKT | cnt |
315 			(_odd_parity_bit(cnt) << 7) |
316 			((regindx & 0x3ffff) << 8) |
317 			((_odd_parity_bit(regindx) << 27)));
318 }
319 
320 static inline void
OUT_PKT7(struct fd_ringbuffer * ring,uint8_t opcode,uint16_t cnt)321 OUT_PKT7(struct fd_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
322 {
323 	BEGIN_RING(ring, cnt+1);
324 	OUT_RING(ring, CP_TYPE7_PKT | cnt |
325 			(_odd_parity_bit(cnt) << 15) |
326 			((opcode & 0x7f) << 16) |
327 			((_odd_parity_bit(opcode) << 23)));
328 }
329 
330 static inline void
OUT_WFI(struct fd_ringbuffer * ring)331 OUT_WFI(struct fd_ringbuffer *ring)
332 {
333 	OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
334 	OUT_RING(ring, 0x00000000);
335 }
336 
337 static inline void
OUT_WFI5(struct fd_ringbuffer * ring)338 OUT_WFI5(struct fd_ringbuffer *ring)
339 {
340 	OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
341 }
342 
343 static inline void
__OUT_IB(struct fd_ringbuffer * ring,bool prefetch,struct fd_ringbuffer * target)344 __OUT_IB(struct fd_ringbuffer *ring, bool prefetch, struct fd_ringbuffer *target)
345 {
346 	unsigned count = fd_ringbuffer_cmd_count(target);
347 
348 	debug_assert(__gpu_id(ring) < 500);
349 
350 	/* for debug after a lock up, write a unique counter value
351 	 * to scratch6 for each IB, to make it easier to match up
352 	 * register dumps to cmdstream.  The combination of IB and
353 	 * DRAW (scratch7) is enough to "triangulate" the particular
354 	 * draw that caused lockup.
355 	 */
356 	emit_marker(ring, 6);
357 
358 	for (unsigned i = 0; i < count; i++) {
359 		uint32_t dwords;
360 		OUT_PKT3(ring, prefetch ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
361 		dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
362 		assert(dwords > 0);
363 		OUT_RING(ring, dwords);
364 		OUT_PKT2(ring);
365 	}
366 
367 	emit_marker(ring, 6);
368 }
369 
370 static inline void
__OUT_IB5(struct fd_ringbuffer * ring,struct fd_ringbuffer * target)371 __OUT_IB5(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
372 {
373 	unsigned count = fd_ringbuffer_cmd_count(target);
374 
375 	/* for debug after a lock up, write a unique counter value
376 	 * to scratch6 for each IB, to make it easier to match up
377 	 * register dumps to cmdstream.  The combination of IB and
378 	 * DRAW (scratch7) is enough to "triangulate" the particular
379 	 * draw that caused lockup.
380 	 */
381 	emit_marker5(ring, 6);
382 
383 	for (unsigned i = 0; i < count; i++) {
384 		uint32_t dwords;
385 		OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
386 		dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
387 		assert(dwords > 0);
388 		OUT_RING(ring, dwords);
389 	}
390 
391 	emit_marker5(ring, 6);
392 }
393 
394 /* CP_SCRATCH_REG4 is used to hold base address for query results: */
395 // XXX annoyingly scratch regs move on a5xx.. and additionally different
396 // packet types.. so freedreno_query_hw is going to need a bit of
397 // rework..
398 #define HW_QUERY_BASE_REG REG_AXXX_CP_SCRATCH_REG4
399 
400 static inline void
emit_marker(struct fd_ringbuffer * ring,int scratch_idx)401 emit_marker(struct fd_ringbuffer *ring, int scratch_idx)
402 {
403 	extern unsigned marker_cnt;
404 	unsigned reg = REG_AXXX_CP_SCRATCH_REG0 + scratch_idx;
405 	assert(reg != HW_QUERY_BASE_REG);
406 	if (reg == HW_QUERY_BASE_REG)
407 		return;
408 	OUT_PKT0(ring, reg, 1);
409 	OUT_RING(ring, ++marker_cnt);
410 }
411 
412 static inline void
emit_marker5(struct fd_ringbuffer * ring,int scratch_idx)413 emit_marker5(struct fd_ringbuffer *ring, int scratch_idx)
414 {
415 	extern unsigned marker_cnt;
416 //XXX	unsigned reg = REG_A5XX_CP_SCRATCH_REG(scratch_idx);
417 	unsigned reg = 0x00000b78 + scratch_idx;
418 	OUT_PKT4(ring, reg, 1);
419 	OUT_RING(ring, ++marker_cnt);
420 }
421 
422 /* helper to get numeric value from environment variable..  mostly
423  * just leaving this here because it is helpful to brute-force figure
424  * out unknown formats, etc, which blob driver does not support:
425  */
env2u(const char * envvar)426 static inline uint32_t env2u(const char *envvar)
427 {
428 	char *str = getenv(envvar);
429 	if (str)
430 		return strtoul(str, NULL, 0);
431 	return 0;
432 }
433 
434 static inline uint32_t
pack_rgba(enum pipe_format format,const float * rgba)435 pack_rgba(enum pipe_format format, const float *rgba)
436 {
437 	union util_color uc;
438 	util_pack_color(rgba, format, &uc);
439 	return uc.ui[0];
440 }
441 
442 /*
443  * swap - swap value of @a and @b
444  */
445 #define swap(a, b) \
446 	do { __typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
447 
448 #define foreach_bit(b, mask) \
449 	for (uint32_t _m = (mask); _m && ({(b) = u_bit_scan(&_m); 1;});)
450 
451 
452 #define BIT(bit) (1u << bit)
453 
454 /*
455  * a4xx+ helpers:
456  */
457 
458 static inline enum a4xx_state_block
fd4_stage2shadersb(enum shader_t type)459 fd4_stage2shadersb(enum shader_t type)
460 {
461 	switch (type) {
462 	case SHADER_VERTEX:
463 		return SB4_VS_SHADER;
464 	case SHADER_FRAGMENT:
465 		return SB4_FS_SHADER;
466 	case SHADER_COMPUTE:
467 		return SB4_CS_SHADER;
468 	default:
469 		unreachable("bad shader type");
470 		return ~0;
471 	}
472 }
473 
474 #endif /* FREEDRENO_UTIL_H_ */
475