1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors: Marek Olšák <maraeo@gmail.com>
24 *
25 */
26
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "evergreen_compute.h"
30 #include "tgsi/tgsi_parse.h"
31 #include "util/list.h"
32 #include "util/u_draw_quad.h"
33 #include "util/u_memory.h"
34 #include "util/format/u_format_s3tc.h"
35 #include "util/u_upload_mgr.h"
36 #include "util/os_time.h"
37 #include "vl/vl_decoder.h"
38 #include "vl/vl_video_buffer.h"
39 #include "radeon_video.h"
40 #include <inttypes.h>
41 #include <sys/utsname.h>
42 #include <stdlib.h>
43
44 #ifdef LLVM_AVAILABLE
45 #include <llvm-c/TargetMachine.h>
46 #endif
47
48 struct r600_multi_fence {
49 struct pipe_reference reference;
50 struct pipe_fence_handle *gfx;
51 struct pipe_fence_handle *sdma;
52
53 /* If the context wasn't flushed at fence creation, this is non-NULL. */
54 struct {
55 struct r600_common_context *ctx;
56 unsigned ib_index;
57 } gfx_unflushed;
58 };
59
60 /*
61 * pipe_context
62 */
63
64 /**
65 * Write an EOP event.
66 *
67 * \param event EVENT_TYPE_*
68 * \param event_flags Optional cache flush flags (TC)
69 * \param data_sel 1 = fence, 3 = timestamp
70 * \param buf Buffer
71 * \param va GPU address
72 * \param old_value Previous fence value (for a bug workaround)
73 * \param new_value Fence value to write for this event.
74 */
r600_gfx_write_event_eop(struct r600_common_context * ctx,unsigned event,unsigned event_flags,unsigned data_sel,struct r600_resource * buf,uint64_t va,uint32_t new_fence,unsigned query_type)75 void r600_gfx_write_event_eop(struct r600_common_context *ctx,
76 unsigned event, unsigned event_flags,
77 unsigned data_sel,
78 struct r600_resource *buf, uint64_t va,
79 uint32_t new_fence, unsigned query_type)
80 {
81 struct radeon_cmdbuf *cs = &ctx->gfx.cs;
82 unsigned op = EVENT_TYPE(event) |
83 EVENT_INDEX(5) |
84 event_flags;
85 unsigned sel = EOP_DATA_SEL(data_sel);
86
87 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
88 radeon_emit(cs, op);
89 radeon_emit(cs, va);
90 radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
91 radeon_emit(cs, new_fence); /* immediate data */
92 radeon_emit(cs, 0); /* unused */
93
94 if (buf)
95 r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
96 RADEON_PRIO_QUERY);
97 }
98
r600_gfx_write_fence_dwords(struct r600_common_screen * screen)99 unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen)
100 {
101 unsigned dwords = 6;
102
103 if (!screen->info.r600_has_virtual_memory)
104 dwords += 2;
105
106 return dwords;
107 }
108
r600_gfx_wait_fence(struct r600_common_context * ctx,struct r600_resource * buf,uint64_t va,uint32_t ref,uint32_t mask)109 void r600_gfx_wait_fence(struct r600_common_context *ctx,
110 struct r600_resource *buf,
111 uint64_t va, uint32_t ref, uint32_t mask)
112 {
113 struct radeon_cmdbuf *cs = &ctx->gfx.cs;
114
115 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
116 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
117 radeon_emit(cs, va);
118 radeon_emit(cs, va >> 32);
119 radeon_emit(cs, ref); /* reference value */
120 radeon_emit(cs, mask); /* mask */
121 radeon_emit(cs, 4); /* poll interval */
122
123 if (buf)
124 r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ,
125 RADEON_PRIO_QUERY);
126 }
127
r600_draw_rectangle(struct blitter_context * blitter,void * vertex_elements_cso,blitter_get_vs_func get_vs,int x1,int y1,int x2,int y2,float depth,unsigned num_instances,enum blitter_attrib_type type,const union blitter_attrib * attrib)128 void r600_draw_rectangle(struct blitter_context *blitter,
129 void *vertex_elements_cso,
130 blitter_get_vs_func get_vs,
131 int x1, int y1, int x2, int y2,
132 float depth, unsigned num_instances,
133 enum blitter_attrib_type type,
134 const union blitter_attrib *attrib)
135 {
136 struct r600_common_context *rctx =
137 (struct r600_common_context*)util_blitter_get_pipe(blitter);
138 struct pipe_viewport_state viewport;
139 struct pipe_resource *buf = NULL;
140 unsigned offset = 0;
141 float *vb;
142
143 rctx->b.bind_vertex_elements_state(&rctx->b, vertex_elements_cso);
144 rctx->b.bind_vs_state(&rctx->b, get_vs(blitter));
145
146 /* Some operations (like color resolve on r6xx) don't work
147 * with the conventional primitive types.
148 * One that works is PT_RECTLIST, which we use here. */
149
150 /* setup viewport */
151 viewport.scale[0] = 1.0f;
152 viewport.scale[1] = 1.0f;
153 viewport.scale[2] = 1.0f;
154 viewport.translate[0] = 0.0f;
155 viewport.translate[1] = 0.0f;
156 viewport.translate[2] = 0.0f;
157 rctx->b.set_viewport_states(&rctx->b, 0, 1, &viewport);
158
159 /* Upload vertices. The hw rectangle has only 3 vertices,
160 * The 4th one is derived from the first 3.
161 * The vertex specification should match u_blitter's vertex element state. */
162 u_upload_alloc(rctx->b.stream_uploader, 0, sizeof(float) * 24,
163 rctx->screen->info.tcc_cache_line_size,
164 &offset, &buf, (void**)&vb);
165 if (!buf)
166 return;
167
168 vb[0] = x1;
169 vb[1] = y1;
170 vb[2] = depth;
171 vb[3] = 1;
172
173 vb[8] = x1;
174 vb[9] = y2;
175 vb[10] = depth;
176 vb[11] = 1;
177
178 vb[16] = x2;
179 vb[17] = y1;
180 vb[18] = depth;
181 vb[19] = 1;
182
183 switch (type) {
184 case UTIL_BLITTER_ATTRIB_COLOR:
185 memcpy(vb+4, attrib->color, sizeof(float)*4);
186 memcpy(vb+12, attrib->color, sizeof(float)*4);
187 memcpy(vb+20, attrib->color, sizeof(float)*4);
188 break;
189 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
190 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
191 vb[6] = vb[14] = vb[22] = attrib->texcoord.z;
192 vb[7] = vb[15] = vb[23] = attrib->texcoord.w;
193 /* fall through */
194 vb[4] = attrib->texcoord.x1;
195 vb[5] = attrib->texcoord.y1;
196 vb[12] = attrib->texcoord.x1;
197 vb[13] = attrib->texcoord.y2;
198 vb[20] = attrib->texcoord.x2;
199 vb[21] = attrib->texcoord.y1;
200 break;
201 default:; /* Nothing to do. */
202 }
203
204 /* draw */
205 struct pipe_vertex_buffer vbuffer = {};
206 vbuffer.buffer.resource = buf;
207 vbuffer.stride = 2 * 4 * sizeof(float); /* vertex size */
208 vbuffer.buffer_offset = offset;
209
210 rctx->b.set_vertex_buffers(&rctx->b, blitter->vb_slot, 1, 0, false, &vbuffer);
211 util_draw_arrays_instanced(&rctx->b, R600_PRIM_RECTANGLE_LIST, 0, 3,
212 0, num_instances);
213 pipe_resource_reference(&buf, NULL);
214 }
215
r600_dma_emit_wait_idle(struct r600_common_context * rctx)216 static void r600_dma_emit_wait_idle(struct r600_common_context *rctx)
217 {
218 struct radeon_cmdbuf *cs = &rctx->dma.cs;
219
220 if (rctx->chip_class >= EVERGREEN)
221 radeon_emit(cs, 0xf0000000); /* NOP */
222 else {
223 /* TODO: R600-R700 should use the FENCE packet.
224 * CS checker support is required. */
225 }
226 }
227
r600_need_dma_space(struct r600_common_context * ctx,unsigned num_dw,struct r600_resource * dst,struct r600_resource * src)228 void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
229 struct r600_resource *dst, struct r600_resource *src)
230 {
231 uint64_t vram = (uint64_t)ctx->dma.cs.used_vram_kb * 1024;
232 uint64_t gtt = (uint64_t)ctx->dma.cs.used_gart_kb * 1024;
233
234 if (dst) {
235 vram += dst->vram_usage;
236 gtt += dst->gart_usage;
237 }
238 if (src) {
239 vram += src->vram_usage;
240 gtt += src->gart_usage;
241 }
242
243 /* Flush the GFX IB if DMA depends on it. */
244 if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
245 ((dst &&
246 ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, dst->buf,
247 RADEON_USAGE_READWRITE)) ||
248 (src &&
249 ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, src->buf,
250 RADEON_USAGE_WRITE))))
251 ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
252
253 /* Flush if there's not enough space, or if the memory usage per IB
254 * is too large.
255 *
256 * IBs using too little memory are limited by the IB submission overhead.
257 * IBs using too much memory are limited by the kernel/TTM overhead.
258 * Too long IBs create CPU-GPU pipeline bubbles and add latency.
259 *
260 * This heuristic makes sure that DMA requests are executed
261 * very soon after the call is made and lowers memory usage.
262 * It improves texture upload performance by keeping the DMA
263 * engine busy while uploads are being submitted.
264 */
265 num_dw++; /* for emit_wait_idle below */
266 if (!ctx->ws->cs_check_space(&ctx->dma.cs, num_dw, false) ||
267 ctx->dma.cs.used_vram_kb + ctx->dma.cs.used_gart_kb > 64 * 1024 ||
268 !radeon_cs_memory_below_limit(ctx->screen, &ctx->dma.cs, vram, gtt)) {
269 ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
270 assert((num_dw + ctx->dma.cs.current.cdw) <= ctx->dma.cs.current.max_dw);
271 }
272
273 /* Wait for idle if either buffer has been used in the IB before to
274 * prevent read-after-write hazards.
275 */
276 if ((dst &&
277 ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, dst->buf,
278 RADEON_USAGE_READWRITE)) ||
279 (src &&
280 ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs, src->buf,
281 RADEON_USAGE_WRITE)))
282 r600_dma_emit_wait_idle(ctx);
283
284 /* If GPUVM is not supported, the CS checker needs 2 entries
285 * in the buffer list per packet, which has to be done manually.
286 */
287 if (ctx->screen->info.r600_has_virtual_memory) {
288 if (dst)
289 radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
290 RADEON_USAGE_WRITE, 0);
291 if (src)
292 radeon_add_to_buffer_list(ctx, &ctx->dma, src,
293 RADEON_USAGE_READ, 0);
294 }
295
296 /* this function is called before all DMA calls, so increment this. */
297 ctx->num_dma_calls++;
298 }
299
r600_preflush_suspend_features(struct r600_common_context * ctx)300 void r600_preflush_suspend_features(struct r600_common_context *ctx)
301 {
302 /* suspend queries */
303 if (!list_is_empty(&ctx->active_queries))
304 r600_suspend_queries(ctx);
305
306 ctx->streamout.suspended = false;
307 if (ctx->streamout.begin_emitted) {
308 r600_emit_streamout_end(ctx);
309 ctx->streamout.suspended = true;
310 }
311 }
312
r600_postflush_resume_features(struct r600_common_context * ctx)313 void r600_postflush_resume_features(struct r600_common_context *ctx)
314 {
315 if (ctx->streamout.suspended) {
316 ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
317 r600_streamout_buffers_dirty(ctx);
318 }
319
320 /* resume queries */
321 if (!list_is_empty(&ctx->active_queries))
322 r600_resume_queries(ctx);
323 }
324
r600_fence_server_sync(struct pipe_context * ctx,struct pipe_fence_handle * fence)325 static void r600_fence_server_sync(struct pipe_context *ctx,
326 struct pipe_fence_handle *fence)
327 {
328 /* radeon synchronizes all rings by default and will not implement
329 * fence imports.
330 */
331 }
332
r600_flush_from_st(struct pipe_context * ctx,struct pipe_fence_handle ** fence,unsigned flags)333 static void r600_flush_from_st(struct pipe_context *ctx,
334 struct pipe_fence_handle **fence,
335 unsigned flags)
336 {
337 struct pipe_screen *screen = ctx->screen;
338 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
339 struct radeon_winsys *ws = rctx->ws;
340 struct pipe_fence_handle *gfx_fence = NULL;
341 struct pipe_fence_handle *sdma_fence = NULL;
342 bool deferred_fence = false;
343 unsigned rflags = PIPE_FLUSH_ASYNC;
344
345 if (flags & PIPE_FLUSH_END_OF_FRAME)
346 rflags |= PIPE_FLUSH_END_OF_FRAME;
347
348 /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
349 if (rctx->dma.cs.priv)
350 rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
351
352 if (!radeon_emitted(&rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
353 if (fence)
354 ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
355 if (!(flags & PIPE_FLUSH_DEFERRED))
356 ws->cs_sync_flush(&rctx->gfx.cs);
357 } else {
358 /* Instead of flushing, create a deferred fence. Constraints:
359 * - the gallium frontend must allow a deferred flush.
360 * - the gallium frontend must request a fence.
361 * Thread safety in fence_finish must be ensured by the gallium frontend.
362 */
363 if (flags & PIPE_FLUSH_DEFERRED && fence) {
364 gfx_fence = rctx->ws->cs_get_next_fence(&rctx->gfx.cs);
365 deferred_fence = true;
366 } else {
367 rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
368 }
369 }
370
371 /* Both engines can signal out of order, so we need to keep both fences. */
372 if (fence) {
373 struct r600_multi_fence *multi_fence =
374 CALLOC_STRUCT(r600_multi_fence);
375 if (!multi_fence) {
376 ws->fence_reference(&sdma_fence, NULL);
377 ws->fence_reference(&gfx_fence, NULL);
378 goto finish;
379 }
380
381 multi_fence->reference.count = 1;
382 /* If both fences are NULL, fence_finish will always return true. */
383 multi_fence->gfx = gfx_fence;
384 multi_fence->sdma = sdma_fence;
385
386 if (deferred_fence) {
387 multi_fence->gfx_unflushed.ctx = rctx;
388 multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
389 }
390
391 screen->fence_reference(screen, fence, NULL);
392 *fence = (struct pipe_fence_handle*)multi_fence;
393 }
394 finish:
395 if (!(flags & PIPE_FLUSH_DEFERRED)) {
396 if (rctx->dma.cs.priv)
397 ws->cs_sync_flush(&rctx->dma.cs);
398 ws->cs_sync_flush(&rctx->gfx.cs);
399 }
400 }
401
r600_flush_dma_ring(void * ctx,unsigned flags,struct pipe_fence_handle ** fence)402 static void r600_flush_dma_ring(void *ctx, unsigned flags,
403 struct pipe_fence_handle **fence)
404 {
405 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
406 struct radeon_cmdbuf *cs = &rctx->dma.cs;
407 struct radeon_saved_cs saved;
408 bool check_vm =
409 (rctx->screen->debug_flags & DBG_CHECK_VM) &&
410 rctx->check_vm_faults;
411
412 if (!radeon_emitted(cs, 0)) {
413 if (fence)
414 rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
415 return;
416 }
417
418 if (check_vm)
419 radeon_save_cs(rctx->ws, cs, &saved, true);
420
421 rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
422 if (fence)
423 rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
424
425 if (check_vm) {
426 /* Use conservative timeout 800ms, after which we won't wait any
427 * longer and assume the GPU is hung.
428 */
429 rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
430
431 rctx->check_vm_faults(rctx, &saved, RING_DMA);
432 radeon_clear_saved_cs(&saved);
433 }
434 }
435
436 /**
437 * Store a linearized copy of all chunks of \p cs together with the buffer
438 * list in \p saved.
439 */
radeon_save_cs(struct radeon_winsys * ws,struct radeon_cmdbuf * cs,struct radeon_saved_cs * saved,bool get_buffer_list)440 void radeon_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
441 struct radeon_saved_cs *saved, bool get_buffer_list)
442 {
443 uint32_t *buf;
444 unsigned i;
445
446 /* Save the IB chunks. */
447 saved->num_dw = cs->prev_dw + cs->current.cdw;
448 saved->ib = MALLOC(4 * saved->num_dw);
449 if (!saved->ib)
450 goto oom;
451
452 buf = saved->ib;
453 for (i = 0; i < cs->num_prev; ++i) {
454 memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
455 buf += cs->prev[i].cdw;
456 }
457 memcpy(buf, cs->current.buf, cs->current.cdw * 4);
458
459 if (!get_buffer_list)
460 return;
461
462 /* Save the buffer list. */
463 saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
464 saved->bo_list = CALLOC(saved->bo_count,
465 sizeof(saved->bo_list[0]));
466 if (!saved->bo_list) {
467 FREE(saved->ib);
468 goto oom;
469 }
470 ws->cs_get_buffer_list(cs, saved->bo_list);
471
472 return;
473
474 oom:
475 fprintf(stderr, "%s: out of memory\n", __func__);
476 memset(saved, 0, sizeof(*saved));
477 }
478
radeon_clear_saved_cs(struct radeon_saved_cs * saved)479 void radeon_clear_saved_cs(struct radeon_saved_cs *saved)
480 {
481 FREE(saved->ib);
482 FREE(saved->bo_list);
483
484 memset(saved, 0, sizeof(*saved));
485 }
486
r600_get_reset_status(struct pipe_context * ctx)487 static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
488 {
489 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
490
491 return rctx->ws->ctx_query_reset_status(rctx->ctx, false, NULL);
492 }
493
r600_set_debug_callback(struct pipe_context * ctx,const struct pipe_debug_callback * cb)494 static void r600_set_debug_callback(struct pipe_context *ctx,
495 const struct pipe_debug_callback *cb)
496 {
497 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
498
499 if (cb)
500 rctx->debug = *cb;
501 else
502 memset(&rctx->debug, 0, sizeof(rctx->debug));
503 }
504
r600_set_device_reset_callback(struct pipe_context * ctx,const struct pipe_device_reset_callback * cb)505 static void r600_set_device_reset_callback(struct pipe_context *ctx,
506 const struct pipe_device_reset_callback *cb)
507 {
508 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
509
510 if (cb)
511 rctx->device_reset_callback = *cb;
512 else
513 memset(&rctx->device_reset_callback, 0,
514 sizeof(rctx->device_reset_callback));
515 }
516
r600_check_device_reset(struct r600_common_context * rctx)517 bool r600_check_device_reset(struct r600_common_context *rctx)
518 {
519 enum pipe_reset_status status;
520
521 if (!rctx->device_reset_callback.reset)
522 return false;
523
524 if (!rctx->b.get_device_reset_status)
525 return false;
526
527 status = rctx->b.get_device_reset_status(&rctx->b);
528 if (status == PIPE_NO_RESET)
529 return false;
530
531 rctx->device_reset_callback.reset(rctx->device_reset_callback.data, status);
532 return true;
533 }
534
r600_dma_clear_buffer_fallback(struct pipe_context * ctx,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value)535 static void r600_dma_clear_buffer_fallback(struct pipe_context *ctx,
536 struct pipe_resource *dst,
537 uint64_t offset, uint64_t size,
538 unsigned value)
539 {
540 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
541
542 rctx->clear_buffer(ctx, dst, offset, size, value, R600_COHERENCY_NONE);
543 }
544
r600_resource_commit(struct pipe_context * pctx,struct pipe_resource * resource,unsigned level,struct pipe_box * box,bool commit)545 static bool r600_resource_commit(struct pipe_context *pctx,
546 struct pipe_resource *resource,
547 unsigned level, struct pipe_box *box,
548 bool commit)
549 {
550 struct r600_common_context *ctx = (struct r600_common_context *)pctx;
551 struct r600_resource *res = r600_resource(resource);
552
553 /*
554 * Since buffer commitment changes cannot be pipelined, we need to
555 * (a) flush any pending commands that refer to the buffer we're about
556 * to change, and
557 * (b) wait for threaded submit to finish, including those that were
558 * triggered by some other, earlier operation.
559 */
560 if (radeon_emitted(&ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
561 ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs,
562 res->buf, RADEON_USAGE_READWRITE)) {
563 ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
564 }
565 if (radeon_emitted(&ctx->dma.cs, 0) &&
566 ctx->ws->cs_is_buffer_referenced(&ctx->dma.cs,
567 res->buf, RADEON_USAGE_READWRITE)) {
568 ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
569 }
570
571 ctx->ws->cs_sync_flush(&ctx->dma.cs);
572 ctx->ws->cs_sync_flush(&ctx->gfx.cs);
573
574 assert(resource->target == PIPE_BUFFER);
575
576 return ctx->ws->buffer_commit(ctx->ws, res->buf, box->x, box->width, commit);
577 }
578
r600_common_context_init(struct r600_common_context * rctx,struct r600_common_screen * rscreen,unsigned context_flags)579 bool r600_common_context_init(struct r600_common_context *rctx,
580 struct r600_common_screen *rscreen,
581 unsigned context_flags)
582 {
583 slab_create_child(&rctx->pool_transfers, &rscreen->pool_transfers);
584 slab_create_child(&rctx->pool_transfers_unsync, &rscreen->pool_transfers);
585
586 rctx->screen = rscreen;
587 rctx->ws = rscreen->ws;
588 rctx->family = rscreen->family;
589 rctx->chip_class = rscreen->chip_class;
590
591 rctx->b.invalidate_resource = r600_invalidate_resource;
592 rctx->b.resource_commit = r600_resource_commit;
593 rctx->b.buffer_map = r600_buffer_transfer_map;
594 rctx->b.texture_map = r600_texture_transfer_map;
595 rctx->b.transfer_flush_region = r600_buffer_flush_region;
596 rctx->b.buffer_unmap = r600_buffer_transfer_unmap;
597 rctx->b.texture_unmap = r600_texture_transfer_unmap;
598 rctx->b.texture_subdata = u_default_texture_subdata;
599 rctx->b.flush = r600_flush_from_st;
600 rctx->b.set_debug_callback = r600_set_debug_callback;
601 rctx->b.fence_server_sync = r600_fence_server_sync;
602 rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback;
603
604 /* evergreen_compute.c has a special codepath for global buffers.
605 * Everything else can use the direct path.
606 */
607 if ((rscreen->chip_class == EVERGREEN || rscreen->chip_class == CAYMAN) &&
608 (context_flags & PIPE_CONTEXT_COMPUTE_ONLY))
609 rctx->b.buffer_subdata = u_default_buffer_subdata;
610 else
611 rctx->b.buffer_subdata = r600_buffer_subdata;
612
613 rctx->b.get_device_reset_status = r600_get_reset_status;
614 rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
615
616 r600_init_context_texture_functions(rctx);
617 r600_init_viewport_functions(rctx);
618 r600_streamout_init(rctx);
619 r600_query_init(rctx);
620 cayman_init_msaa(&rctx->b);
621
622 u_suballocator_init(&rctx->allocator_zeroed_memory, &rctx->b, rscreen->info.gart_page_size,
623 0, PIPE_USAGE_DEFAULT, 0, true);
624
625 rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
626 0, PIPE_USAGE_STREAM, 0);
627 if (!rctx->b.stream_uploader)
628 return false;
629
630 rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
631 0, PIPE_USAGE_DEFAULT, 0);
632 if (!rctx->b.const_uploader)
633 return false;
634
635 rctx->ctx = rctx->ws->ctx_create(rctx->ws);
636 if (!rctx->ctx)
637 return false;
638
639 if (rscreen->info.num_rings[RING_DMA] && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
640 rctx->ws->cs_create(&rctx->dma.cs, rctx->ctx, RING_DMA,
641 r600_flush_dma_ring, rctx, false);
642 rctx->dma.flush = r600_flush_dma_ring;
643 }
644
645 return true;
646 }
647
r600_common_context_cleanup(struct r600_common_context * rctx)648 void r600_common_context_cleanup(struct r600_common_context *rctx)
649 {
650 if (rctx->query_result_shader)
651 rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
652
653 rctx->ws->cs_destroy(&rctx->gfx.cs);
654 rctx->ws->cs_destroy(&rctx->dma.cs);
655 if (rctx->ctx)
656 rctx->ws->ctx_destroy(rctx->ctx);
657
658 if (rctx->b.stream_uploader)
659 u_upload_destroy(rctx->b.stream_uploader);
660 if (rctx->b.const_uploader)
661 u_upload_destroy(rctx->b.const_uploader);
662
663 slab_destroy_child(&rctx->pool_transfers);
664 slab_destroy_child(&rctx->pool_transfers_unsync);
665
666 u_suballocator_destroy(&rctx->allocator_zeroed_memory);
667 rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL);
668 rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
669 r600_resource_reference(&rctx->eop_bug_scratch, NULL);
670 }
671
672 /*
673 * pipe_screen
674 */
675
676 static const struct debug_named_value common_debug_options[] = {
677 /* logging */
678 { "tex", DBG_TEX, "Print texture info" },
679 { "nir", DBG_NIR, "Enable experimental NIR shaders" },
680 { "compute", DBG_COMPUTE, "Print compute info" },
681 { "vm", DBG_VM, "Print virtual addresses when creating resources" },
682 { "info", DBG_INFO, "Print driver information" },
683
684 /* shaders */
685 { "fs", DBG_FS, "Print fetch shaders" },
686 { "vs", DBG_VS, "Print vertex shaders" },
687 { "gs", DBG_GS, "Print geometry shaders" },
688 { "ps", DBG_PS, "Print pixel shaders" },
689 { "cs", DBG_CS, "Print compute shaders" },
690 { "tcs", DBG_TCS, "Print tessellation control shaders" },
691 { "tes", DBG_TES, "Print tessellation evaluation shaders" },
692 { "noir", DBG_NO_IR, "Don't print the LLVM IR"},
693 { "notgsi", DBG_NO_TGSI, "Don't print the TGSI"},
694 { "noasm", DBG_NO_ASM, "Don't print disassembled shaders"},
695 { "preoptir", DBG_PREOPT_IR, "Print the LLVM IR before initial optimizations" },
696 { "checkir", DBG_CHECK_IR, "Enable additional sanity checks on shader IR" },
697 { "nooptvariant", DBG_NO_OPT_VARIANT, "Disable compiling optimized shader variants." },
698
699 { "testdma", DBG_TEST_DMA, "Invoke SDMA tests and exit." },
700 { "testvmfaultcp", DBG_TEST_VMFAULT_CP, "Invoke a CP VM fault test and exit." },
701 { "testvmfaultsdma", DBG_TEST_VMFAULT_SDMA, "Invoke a SDMA VM fault test and exit." },
702 { "testvmfaultshader", DBG_TEST_VMFAULT_SHADER, "Invoke a shader VM fault test and exit." },
703
704 /* features */
705 { "nodma", DBG_NO_ASYNC_DMA, "Disable asynchronous DMA" },
706 { "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
707 /* GL uses the word INVALIDATE, gallium uses the word DISCARD */
708 { "noinvalrange", DBG_NO_DISCARD_RANGE, "Disable handling of INVALIDATE_RANGE map flags" },
709 { "no2d", DBG_NO_2D_TILING, "Disable 2D tiling" },
710 { "notiling", DBG_NO_TILING, "Disable tiling" },
711 { "switch_on_eop", DBG_SWITCH_ON_EOP, "Program WD/IA to switch on end-of-packet." },
712 { "forcedma", DBG_FORCE_DMA, "Use asynchronous DMA for all operations when possible." },
713 { "precompile", DBG_PRECOMPILE, "Compile one shader variant at shader creation." },
714 { "nowc", DBG_NO_WC, "Disable GTT write combining" },
715 { "check_vm", DBG_CHECK_VM, "Check VM faults and dump debug info." },
716 { "unsafemath", DBG_UNSAFE_MATH, "Enable unsafe math shader optimizations" },
717
718 DEBUG_NAMED_VALUE_END /* must be last */
719 };
720
r600_get_vendor(struct pipe_screen * pscreen)721 static const char* r600_get_vendor(struct pipe_screen* pscreen)
722 {
723 return "X.Org";
724 }
725
r600_get_device_vendor(struct pipe_screen * pscreen)726 static const char* r600_get_device_vendor(struct pipe_screen* pscreen)
727 {
728 return "AMD";
729 }
730
r600_get_family_name(const struct r600_common_screen * rscreen)731 static const char *r600_get_family_name(const struct r600_common_screen *rscreen)
732 {
733 switch (rscreen->info.family) {
734 case CHIP_R600: return "AMD R600";
735 case CHIP_RV610: return "AMD RV610";
736 case CHIP_RV630: return "AMD RV630";
737 case CHIP_RV670: return "AMD RV670";
738 case CHIP_RV620: return "AMD RV620";
739 case CHIP_RV635: return "AMD RV635";
740 case CHIP_RS780: return "AMD RS780";
741 case CHIP_RS880: return "AMD RS880";
742 case CHIP_RV770: return "AMD RV770";
743 case CHIP_RV730: return "AMD RV730";
744 case CHIP_RV710: return "AMD RV710";
745 case CHIP_RV740: return "AMD RV740";
746 case CHIP_CEDAR: return "AMD CEDAR";
747 case CHIP_REDWOOD: return "AMD REDWOOD";
748 case CHIP_JUNIPER: return "AMD JUNIPER";
749 case CHIP_CYPRESS: return "AMD CYPRESS";
750 case CHIP_HEMLOCK: return "AMD HEMLOCK";
751 case CHIP_PALM: return "AMD PALM";
752 case CHIP_SUMO: return "AMD SUMO";
753 case CHIP_SUMO2: return "AMD SUMO2";
754 case CHIP_BARTS: return "AMD BARTS";
755 case CHIP_TURKS: return "AMD TURKS";
756 case CHIP_CAICOS: return "AMD CAICOS";
757 case CHIP_CAYMAN: return "AMD CAYMAN";
758 case CHIP_ARUBA: return "AMD ARUBA";
759 default: return "AMD unknown";
760 }
761 }
762
r600_disk_cache_create(struct r600_common_screen * rscreen)763 static void r600_disk_cache_create(struct r600_common_screen *rscreen)
764 {
765 /* Don't use the cache if shader dumping is enabled. */
766 if (rscreen->debug_flags & DBG_ALL_SHADERS)
767 return;
768
769 struct mesa_sha1 ctx;
770 unsigned char sha1[20];
771 char cache_id[20 * 2 + 1];
772
773 _mesa_sha1_init(&ctx);
774 if (!disk_cache_get_function_identifier(r600_disk_cache_create,
775 &ctx))
776 return;
777
778 _mesa_sha1_final(&ctx, sha1);
779 disk_cache_format_hex_id(cache_id, sha1, 20 * 2);
780
781 /* These flags affect shader compilation. */
782 uint64_t shader_debug_flags =
783 rscreen->debug_flags &
784 (DBG_FS_CORRECT_DERIVS_AFTER_KILL |
785 DBG_UNSAFE_MATH);
786
787 rscreen->disk_shader_cache =
788 disk_cache_create(r600_get_family_name(rscreen),
789 cache_id,
790 shader_debug_flags);
791 }
792
r600_get_disk_shader_cache(struct pipe_screen * pscreen)793 static struct disk_cache *r600_get_disk_shader_cache(struct pipe_screen *pscreen)
794 {
795 struct r600_common_screen *rscreen = (struct r600_common_screen*)pscreen;
796 return rscreen->disk_shader_cache;
797 }
798
r600_get_name(struct pipe_screen * pscreen)799 static const char* r600_get_name(struct pipe_screen* pscreen)
800 {
801 struct r600_common_screen *rscreen = (struct r600_common_screen*)pscreen;
802
803 return rscreen->renderer_string;
804 }
805
r600_get_paramf(struct pipe_screen * pscreen,enum pipe_capf param)806 static float r600_get_paramf(struct pipe_screen* pscreen,
807 enum pipe_capf param)
808 {
809 switch (param) {
810 case PIPE_CAPF_MAX_LINE_WIDTH:
811 case PIPE_CAPF_MAX_LINE_WIDTH_AA:
812 case PIPE_CAPF_MAX_POINT_WIDTH:
813 case PIPE_CAPF_MAX_POINT_WIDTH_AA:
814 return 8191.0f;
815 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
816 return 16.0f;
817 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
818 return 16.0f;
819 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
820 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
821 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
822 return 0.0f;
823 }
824 return 0.0f;
825 }
826
r600_get_video_param(struct pipe_screen * screen,enum pipe_video_profile profile,enum pipe_video_entrypoint entrypoint,enum pipe_video_cap param)827 static int r600_get_video_param(struct pipe_screen *screen,
828 enum pipe_video_profile profile,
829 enum pipe_video_entrypoint entrypoint,
830 enum pipe_video_cap param)
831 {
832 switch (param) {
833 case PIPE_VIDEO_CAP_SUPPORTED:
834 return vl_profile_supported(screen, profile, entrypoint);
835 case PIPE_VIDEO_CAP_NPOT_TEXTURES:
836 return 1;
837 case PIPE_VIDEO_CAP_MAX_WIDTH:
838 case PIPE_VIDEO_CAP_MAX_HEIGHT:
839 return vl_video_buffer_max_size(screen);
840 case PIPE_VIDEO_CAP_PREFERED_FORMAT:
841 return PIPE_FORMAT_NV12;
842 case PIPE_VIDEO_CAP_PREFERS_INTERLACED:
843 return false;
844 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED:
845 return false;
846 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE:
847 return true;
848 case PIPE_VIDEO_CAP_MAX_LEVEL:
849 return vl_level_supported(screen, profile);
850 default:
851 return 0;
852 }
853 }
854
r600_get_llvm_processor_name(enum radeon_family family)855 const char *r600_get_llvm_processor_name(enum radeon_family family)
856 {
857 switch (family) {
858 case CHIP_R600:
859 case CHIP_RV630:
860 case CHIP_RV635:
861 case CHIP_RV670:
862 return "r600";
863 case CHIP_RV610:
864 case CHIP_RV620:
865 case CHIP_RS780:
866 case CHIP_RS880:
867 return "rs880";
868 case CHIP_RV710:
869 return "rv710";
870 case CHIP_RV730:
871 return "rv730";
872 case CHIP_RV740:
873 case CHIP_RV770:
874 return "rv770";
875 case CHIP_PALM:
876 case CHIP_CEDAR:
877 return "cedar";
878 case CHIP_SUMO:
879 case CHIP_SUMO2:
880 return "sumo";
881 case CHIP_REDWOOD:
882 return "redwood";
883 case CHIP_JUNIPER:
884 return "juniper";
885 case CHIP_HEMLOCK:
886 case CHIP_CYPRESS:
887 return "cypress";
888 case CHIP_BARTS:
889 return "barts";
890 case CHIP_TURKS:
891 return "turks";
892 case CHIP_CAICOS:
893 return "caicos";
894 case CHIP_CAYMAN:
895 case CHIP_ARUBA:
896 return "cayman";
897
898 default:
899 return "";
900 }
901 }
902
get_max_threads_per_block(struct r600_common_screen * screen,enum pipe_shader_ir ir_type)903 static unsigned get_max_threads_per_block(struct r600_common_screen *screen,
904 enum pipe_shader_ir ir_type)
905 {
906 if (ir_type != PIPE_SHADER_IR_TGSI &&
907 ir_type != PIPE_SHADER_IR_NIR)
908 return 256;
909 if (screen->chip_class >= EVERGREEN)
910 return 1024;
911 return 256;
912 }
913
r600_get_compute_param(struct pipe_screen * screen,enum pipe_shader_ir ir_type,enum pipe_compute_cap param,void * ret)914 static int r600_get_compute_param(struct pipe_screen *screen,
915 enum pipe_shader_ir ir_type,
916 enum pipe_compute_cap param,
917 void *ret)
918 {
919 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
920
921 //TODO: select these params by asic
922 switch (param) {
923 case PIPE_COMPUTE_CAP_IR_TARGET: {
924 const char *gpu;
925 const char *triple = "r600--";
926 gpu = r600_get_llvm_processor_name(rscreen->family);
927 if (ret) {
928 sprintf(ret, "%s-%s", gpu, triple);
929 }
930 /* +2 for dash and terminating NIL byte */
931 return (strlen(triple) + strlen(gpu) + 2) * sizeof(char);
932 }
933 case PIPE_COMPUTE_CAP_GRID_DIMENSION:
934 if (ret) {
935 uint64_t *grid_dimension = ret;
936 grid_dimension[0] = 3;
937 }
938 return 1 * sizeof(uint64_t);
939
940 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
941 if (ret) {
942 uint64_t *grid_size = ret;
943 grid_size[0] = 65535;
944 grid_size[1] = 65535;
945 grid_size[2] = 65535;
946 }
947 return 3 * sizeof(uint64_t) ;
948
949 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
950 if (ret) {
951 uint64_t *block_size = ret;
952 unsigned threads_per_block = get_max_threads_per_block(rscreen, ir_type);
953 block_size[0] = threads_per_block;
954 block_size[1] = threads_per_block;
955 block_size[2] = threads_per_block;
956 }
957 return 3 * sizeof(uint64_t);
958
959 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
960 if (ret) {
961 uint64_t *max_threads_per_block = ret;
962 *max_threads_per_block = get_max_threads_per_block(rscreen, ir_type);
963 }
964 return sizeof(uint64_t);
965 case PIPE_COMPUTE_CAP_ADDRESS_BITS:
966 if (ret) {
967 uint32_t *address_bits = ret;
968 address_bits[0] = 32;
969 }
970 return 1 * sizeof(uint32_t);
971
972 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
973 if (ret) {
974 uint64_t *max_global_size = ret;
975 uint64_t max_mem_alloc_size;
976
977 r600_get_compute_param(screen, ir_type,
978 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
979 &max_mem_alloc_size);
980
981 /* In OpenCL, the MAX_MEM_ALLOC_SIZE must be at least
982 * 1/4 of the MAX_GLOBAL_SIZE. Since the
983 * MAX_MEM_ALLOC_SIZE is fixed for older kernels,
984 * make sure we never report more than
985 * 4 * MAX_MEM_ALLOC_SIZE.
986 */
987 *max_global_size = MIN2(4 * max_mem_alloc_size,
988 MAX2(rscreen->info.gart_size,
989 rscreen->info.vram_size));
990 }
991 return sizeof(uint64_t);
992
993 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
994 if (ret) {
995 uint64_t *max_local_size = ret;
996 /* Value reported by the closed source driver. */
997 *max_local_size = 32768;
998 }
999 return sizeof(uint64_t);
1000
1001 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
1002 if (ret) {
1003 uint64_t *max_input_size = ret;
1004 /* Value reported by the closed source driver. */
1005 *max_input_size = 1024;
1006 }
1007 return sizeof(uint64_t);
1008
1009 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
1010 if (ret) {
1011 uint64_t *max_mem_alloc_size = ret;
1012
1013 *max_mem_alloc_size = rscreen->info.max_alloc_size;
1014 }
1015 return sizeof(uint64_t);
1016
1017 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
1018 if (ret) {
1019 uint32_t *max_clock_frequency = ret;
1020 *max_clock_frequency = rscreen->info.max_shader_clock;
1021 }
1022 return sizeof(uint32_t);
1023
1024 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
1025 if (ret) {
1026 uint32_t *max_compute_units = ret;
1027 *max_compute_units = rscreen->info.num_good_compute_units;
1028 }
1029 return sizeof(uint32_t);
1030
1031 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
1032 if (ret) {
1033 uint32_t *images_supported = ret;
1034 *images_supported = 0;
1035 }
1036 return sizeof(uint32_t);
1037 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
1038 break; /* unused */
1039 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE:
1040 if (ret) {
1041 uint32_t *subgroup_size = ret;
1042 *subgroup_size = r600_wavefront_size(rscreen->family);
1043 }
1044 return sizeof(uint32_t);
1045 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
1046 if (ret) {
1047 uint64_t *max_variable_threads_per_block = ret;
1048 *max_variable_threads_per_block = 0;
1049 }
1050 return sizeof(uint64_t);
1051 }
1052
1053 fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
1054 return 0;
1055 }
1056
r600_get_timestamp(struct pipe_screen * screen)1057 static uint64_t r600_get_timestamp(struct pipe_screen *screen)
1058 {
1059 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1060
1061 return 1000000 * rscreen->ws->query_value(rscreen->ws, RADEON_TIMESTAMP) /
1062 rscreen->info.clock_crystal_freq;
1063 }
1064
r600_fence_reference(struct pipe_screen * screen,struct pipe_fence_handle ** dst,struct pipe_fence_handle * src)1065 static void r600_fence_reference(struct pipe_screen *screen,
1066 struct pipe_fence_handle **dst,
1067 struct pipe_fence_handle *src)
1068 {
1069 struct radeon_winsys *ws = ((struct r600_common_screen*)screen)->ws;
1070 struct r600_multi_fence **rdst = (struct r600_multi_fence **)dst;
1071 struct r600_multi_fence *rsrc = (struct r600_multi_fence *)src;
1072
1073 if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
1074 ws->fence_reference(&(*rdst)->gfx, NULL);
1075 ws->fence_reference(&(*rdst)->sdma, NULL);
1076 FREE(*rdst);
1077 }
1078 *rdst = rsrc;
1079 }
1080
r600_fence_finish(struct pipe_screen * screen,struct pipe_context * ctx,struct pipe_fence_handle * fence,uint64_t timeout)1081 static bool r600_fence_finish(struct pipe_screen *screen,
1082 struct pipe_context *ctx,
1083 struct pipe_fence_handle *fence,
1084 uint64_t timeout)
1085 {
1086 struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
1087 struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
1088 struct r600_common_context *rctx;
1089 int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
1090
1091 ctx = threaded_context_unwrap_sync(ctx);
1092 rctx = ctx ? (struct r600_common_context*)ctx : NULL;
1093
1094 if (rfence->sdma) {
1095 if (!rws->fence_wait(rws, rfence->sdma, timeout))
1096 return false;
1097
1098 /* Recompute the timeout after waiting. */
1099 if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
1100 int64_t time = os_time_get_nano();
1101 timeout = abs_timeout > time ? abs_timeout - time : 0;
1102 }
1103 }
1104
1105 if (!rfence->gfx)
1106 return true;
1107
1108 /* Flush the gfx IB if it hasn't been flushed yet. */
1109 if (rctx &&
1110 rfence->gfx_unflushed.ctx == rctx &&
1111 rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
1112 rctx->gfx.flush(rctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
1113 rfence->gfx_unflushed.ctx = NULL;
1114
1115 if (!timeout)
1116 return false;
1117
1118 /* Recompute the timeout after all that. */
1119 if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
1120 int64_t time = os_time_get_nano();
1121 timeout = abs_timeout > time ? abs_timeout - time : 0;
1122 }
1123 }
1124
1125 return rws->fence_wait(rws, rfence->gfx, timeout);
1126 }
1127
r600_query_memory_info(struct pipe_screen * screen,struct pipe_memory_info * info)1128 static void r600_query_memory_info(struct pipe_screen *screen,
1129 struct pipe_memory_info *info)
1130 {
1131 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1132 struct radeon_winsys *ws = rscreen->ws;
1133 unsigned vram_usage, gtt_usage;
1134
1135 info->total_device_memory = rscreen->info.vram_size / 1024;
1136 info->total_staging_memory = rscreen->info.gart_size / 1024;
1137
1138 /* The real TTM memory usage is somewhat random, because:
1139 *
1140 * 1) TTM delays freeing memory, because it can only free it after
1141 * fences expire.
1142 *
1143 * 2) The memory usage can be really low if big VRAM evictions are
1144 * taking place, but the real usage is well above the size of VRAM.
1145 *
1146 * Instead, return statistics of this process.
1147 */
1148 vram_usage = ws->query_value(ws, RADEON_REQUESTED_VRAM_MEMORY) / 1024;
1149 gtt_usage = ws->query_value(ws, RADEON_REQUESTED_GTT_MEMORY) / 1024;
1150
1151 info->avail_device_memory =
1152 vram_usage <= info->total_device_memory ?
1153 info->total_device_memory - vram_usage : 0;
1154 info->avail_staging_memory =
1155 gtt_usage <= info->total_staging_memory ?
1156 info->total_staging_memory - gtt_usage : 0;
1157
1158 info->device_memory_evicted =
1159 ws->query_value(ws, RADEON_NUM_BYTES_MOVED) / 1024;
1160
1161 /* Just return the number of evicted 64KB pages. */
1162 info->nr_device_memory_evictions = info->device_memory_evicted / 64;
1163 }
1164
r600_resource_create_common(struct pipe_screen * screen,const struct pipe_resource * templ)1165 struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
1166 const struct pipe_resource *templ)
1167 {
1168 if (templ->target == PIPE_BUFFER) {
1169 return r600_buffer_create(screen, templ, 256);
1170 } else {
1171 return r600_texture_create(screen, templ);
1172 }
1173 }
1174
1175 static const void *
r600_get_compiler_options(struct pipe_screen * screen,enum pipe_shader_ir ir,enum pipe_shader_type shader)1176 r600_get_compiler_options(struct pipe_screen *screen,
1177 enum pipe_shader_ir ir,
1178 enum pipe_shader_type shader)
1179 {
1180 assert(ir == PIPE_SHADER_IR_NIR);
1181
1182 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
1183
1184 return &rscreen->nir_options;
1185 }
1186
1187 extern bool r600_lower_to_scalar_instr_filter(const nir_instr *instr, const void *);
1188
r600_resource_destroy(struct pipe_screen * screen,struct pipe_resource * res)1189 static void r600_resource_destroy(struct pipe_screen *screen,
1190 struct pipe_resource *res)
1191 {
1192 if (res->target == PIPE_BUFFER) {
1193 if (r600_resource(res)->compute_global_bo)
1194 r600_compute_global_buffer_destroy(screen, res);
1195 else
1196 r600_buffer_destroy(screen, res);
1197 } else {
1198 r600_texture_destroy(screen, res);
1199 }
1200 }
1201
r600_common_screen_init(struct r600_common_screen * rscreen,struct radeon_winsys * ws)1202 bool r600_common_screen_init(struct r600_common_screen *rscreen,
1203 struct radeon_winsys *ws)
1204 {
1205 char family_name[32] = {}, kernel_version[128] = {};
1206 struct utsname uname_data;
1207 const char *chip_name;
1208
1209 ws->query_info(ws, &rscreen->info, false, false);
1210 rscreen->ws = ws;
1211
1212 chip_name = r600_get_family_name(rscreen);
1213
1214 if (uname(&uname_data) == 0)
1215 snprintf(kernel_version, sizeof(kernel_version),
1216 " / %s", uname_data.release);
1217
1218 snprintf(rscreen->renderer_string, sizeof(rscreen->renderer_string),
1219 "%s (%sDRM %i.%i.%i%s"
1220 #ifdef LLVM_AVAILABLE
1221 ", LLVM " MESA_LLVM_VERSION_STRING
1222 #endif
1223 ")",
1224 chip_name, family_name, rscreen->info.drm_major,
1225 rscreen->info.drm_minor, rscreen->info.drm_patchlevel,
1226 kernel_version);
1227
1228 rscreen->b.get_name = r600_get_name;
1229 rscreen->b.get_vendor = r600_get_vendor;
1230 rscreen->b.get_device_vendor = r600_get_device_vendor;
1231 rscreen->b.get_disk_shader_cache = r600_get_disk_shader_cache;
1232 rscreen->b.get_compute_param = r600_get_compute_param;
1233 rscreen->b.get_paramf = r600_get_paramf;
1234 rscreen->b.get_timestamp = r600_get_timestamp;
1235 rscreen->b.get_compiler_options = r600_get_compiler_options;
1236 rscreen->b.fence_finish = r600_fence_finish;
1237 rscreen->b.fence_reference = r600_fence_reference;
1238 rscreen->b.resource_destroy = r600_resource_destroy;
1239 rscreen->b.resource_from_user_memory = r600_buffer_from_user_memory;
1240 rscreen->b.query_memory_info = r600_query_memory_info;
1241
1242 if (rscreen->info.has_video_hw.uvd_decode) {
1243 rscreen->b.get_video_param = rvid_get_video_param;
1244 rscreen->b.is_video_format_supported = rvid_is_format_supported;
1245 } else {
1246 rscreen->b.get_video_param = r600_get_video_param;
1247 rscreen->b.is_video_format_supported = vl_video_buffer_is_format_supported;
1248 }
1249
1250 r600_init_screen_texture_functions(rscreen);
1251 r600_init_screen_query_functions(rscreen);
1252
1253 rscreen->family = rscreen->info.family;
1254 rscreen->chip_class = rscreen->info.chip_class;
1255 rscreen->debug_flags |= debug_get_flags_option("R600_DEBUG", common_debug_options, 0);
1256
1257 r600_disk_cache_create(rscreen);
1258
1259 slab_create_parent(&rscreen->pool_transfers, sizeof(struct r600_transfer), 64);
1260
1261 rscreen->force_aniso = MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
1262 if (rscreen->force_aniso >= 0) {
1263 printf("radeon: Forcing anisotropy filter to %ix\n",
1264 /* round down to a power of two */
1265 1 << util_logbase2(rscreen->force_aniso));
1266 }
1267
1268 (void) mtx_init(&rscreen->aux_context_lock, mtx_plain);
1269 (void) mtx_init(&rscreen->gpu_load_mutex, mtx_plain);
1270
1271 if (rscreen->debug_flags & DBG_INFO) {
1272 printf("pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
1273 rscreen->info.pci_domain, rscreen->info.pci_bus,
1274 rscreen->info.pci_dev, rscreen->info.pci_func);
1275 printf("pci_id = 0x%x\n", rscreen->info.pci_id);
1276 printf("family = %i (%s)\n", rscreen->info.family,
1277 r600_get_family_name(rscreen));
1278 printf("chip_class = %i\n", rscreen->info.chip_class);
1279 printf("pte_fragment_size = %u\n", rscreen->info.pte_fragment_size);
1280 printf("gart_page_size = %u\n", rscreen->info.gart_page_size);
1281 printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size, 1024*1024));
1282 printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_size, 1024*1024));
1283 printf("vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_vis_size, 1024*1024));
1284 printf("max_alloc_size = %i MB\n",
1285 (int)DIV_ROUND_UP(rscreen->info.max_alloc_size, 1024*1024));
1286 printf("min_alloc_size = %u\n", rscreen->info.min_alloc_size);
1287 printf("has_dedicated_vram = %u\n", rscreen->info.has_dedicated_vram);
1288 printf("r600_has_virtual_memory = %i\n", rscreen->info.r600_has_virtual_memory);
1289 printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
1290 printf("uvd_decode = %u\n", rscreen->info.has_video_hw.uvd_decode);
1291 printf("num_rings[RING_DMA] = %i\n", rscreen->info.num_rings[RING_DMA]);
1292 printf("num_rings[RING_COMPUTE] = %u\n", rscreen->info.num_rings[RING_COMPUTE]);
1293 printf("uvd_fw_version = %u\n", rscreen->info.uvd_fw_version);
1294 printf("vce_fw_version = %u\n", rscreen->info.vce_fw_version);
1295 printf("me_fw_version = %i\n", rscreen->info.me_fw_version);
1296 printf("pfp_fw_version = %i\n", rscreen->info.pfp_fw_version);
1297 printf("ce_fw_version = %i\n", rscreen->info.ce_fw_version);
1298 printf("vce_harvest_config = %i\n", rscreen->info.vce_harvest_config);
1299 printf("clock_crystal_freq = %i\n", rscreen->info.clock_crystal_freq);
1300 printf("tcc_cache_line_size = %u\n", rscreen->info.tcc_cache_line_size);
1301 printf("drm = %i.%i.%i\n", rscreen->info.drm_major,
1302 rscreen->info.drm_minor, rscreen->info.drm_patchlevel);
1303 printf("has_userptr = %i\n", rscreen->info.has_userptr);
1304 printf("has_syncobj = %u\n", rscreen->info.has_syncobj);
1305
1306 printf("r600_max_quad_pipes = %i\n", rscreen->info.r600_max_quad_pipes);
1307 printf("max_shader_clock = %i\n", rscreen->info.max_shader_clock);
1308 printf("num_good_compute_units = %i\n", rscreen->info.num_good_compute_units);
1309 printf("max_se = %i\n", rscreen->info.max_se);
1310 printf("max_sh_per_se = %i\n", rscreen->info.max_sa_per_se);
1311
1312 printf("r600_gb_backend_map = %i\n", rscreen->info.r600_gb_backend_map);
1313 printf("r600_gb_backend_map_valid = %i\n", rscreen->info.r600_gb_backend_map_valid);
1314 printf("r600_num_banks = %i\n", rscreen->info.r600_num_banks);
1315 printf("num_render_backends = %i\n", rscreen->info.max_render_backends);
1316 printf("num_tile_pipes = %i\n", rscreen->info.num_tile_pipes);
1317 printf("pipe_interleave_bytes = %i\n", rscreen->info.pipe_interleave_bytes);
1318 printf("enabled_rb_mask = 0x%x\n", rscreen->info.enabled_rb_mask);
1319 printf("max_alignment = %u\n", (unsigned)rscreen->info.max_alignment);
1320 }
1321
1322 const struct nir_shader_compiler_options nir_options = {
1323 .fuse_ffma16 = true,
1324 .fuse_ffma32 = true,
1325 .fuse_ffma64 = true,
1326 .lower_flrp32 = true,
1327 .lower_flrp64 = true,
1328 .lower_fpow = true,
1329 .lower_fdiv = true,
1330 .lower_isign = true,
1331 .lower_fsign = true,
1332 .lower_fmod = true,
1333 .lower_doubles_options = nir_lower_fp64_full_software,
1334 .lower_int64_options = ~0,
1335 .lower_extract_byte = true,
1336 .lower_extract_word = true,
1337 .lower_insert_byte = true,
1338 .lower_insert_word = true,
1339 .lower_rotate = true,
1340 .max_unroll_iterations = 32,
1341 .lower_interpolate_at = true,
1342 .vectorize_io = true,
1343 .has_umad24 = true,
1344 .has_umul24 = true,
1345 .use_interpolated_input_intrinsics = true,
1346 .has_fsub = true,
1347 .has_isub = true,
1348 .lower_iabs = true,
1349 .lower_bitfield_extract = true,
1350 .lower_bitfield_insert_to_bitfield_select = true,
1351 .has_fused_comp_and_csel = true,
1352 .lower_find_msb_to_reverse = true,
1353 .lower_to_scalar = true,
1354 .lower_to_scalar_filter = r600_lower_to_scalar_instr_filter,
1355 .linker_ignore_precision = true,
1356 };
1357
1358 rscreen->nir_options = nir_options;
1359
1360 return true;
1361 }
1362
r600_destroy_common_screen(struct r600_common_screen * rscreen)1363 void r600_destroy_common_screen(struct r600_common_screen *rscreen)
1364 {
1365 r600_perfcounters_destroy(rscreen);
1366 r600_gpu_load_kill_thread(rscreen);
1367
1368 mtx_destroy(&rscreen->gpu_load_mutex);
1369 mtx_destroy(&rscreen->aux_context_lock);
1370 rscreen->aux_context->destroy(rscreen->aux_context);
1371
1372 slab_destroy_parent(&rscreen->pool_transfers);
1373
1374 disk_cache_destroy(rscreen->disk_shader_cache);
1375 rscreen->ws->destroy(rscreen->ws);
1376 FREE(rscreen);
1377 }
1378
r600_can_dump_shader(struct r600_common_screen * rscreen,unsigned processor)1379 bool r600_can_dump_shader(struct r600_common_screen *rscreen,
1380 unsigned processor)
1381 {
1382 return rscreen->debug_flags & (1 << processor);
1383 }
1384
r600_extra_shader_checks(struct r600_common_screen * rscreen,unsigned processor)1385 bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor)
1386 {
1387 return (rscreen->debug_flags & DBG_CHECK_IR) ||
1388 r600_can_dump_shader(rscreen, processor);
1389 }
1390
r600_screen_clear_buffer(struct r600_common_screen * rscreen,struct pipe_resource * dst,uint64_t offset,uint64_t size,unsigned value)1391 void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
1392 uint64_t offset, uint64_t size, unsigned value)
1393 {
1394 struct r600_common_context *rctx = (struct r600_common_context*)rscreen->aux_context;
1395
1396 mtx_lock(&rscreen->aux_context_lock);
1397 rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
1398 rscreen->aux_context->flush(rscreen->aux_context, NULL, 0);
1399 mtx_unlock(&rscreen->aux_context_lock);
1400 }
1401