1 /*
2 * Copyright 2011 Adam Rak <adam.rak@streamnovation.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Rak <adam.rak@streamnovation.com>
25 */
26
27 #ifdef HAVE_OPENCL
28 #include <gelf.h>
29 #include <libelf.h>
30 #endif
31 #include <stdio.h>
32 #include <errno.h>
33 #include "pipe/p_defines.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_context.h"
36 #include "util/u_blitter.h"
37 #include "util/list.h"
38 #include "util/u_transfer.h"
39 #include "util/u_surface.h"
40 #include "util/u_pack_color.h"
41 #include "util/u_memory.h"
42 #include "util/u_inlines.h"
43 #include "util/u_framebuffer.h"
44 #include "tgsi/tgsi_parse.h"
45 #include "pipebuffer/pb_buffer.h"
46 #include "evergreend.h"
47 #include "r600_shader.h"
48 #include "r600_pipe.h"
49 #include "r600_formats.h"
50 #include "evergreen_compute.h"
51 #include "evergreen_compute_internal.h"
52 #include "compute_memory_pool.h"
53 #include "sb/sb_public.h"
54 #include <inttypes.h>
55
56 /**
57 RAT0 is for global binding write
58 VTX1 is for global binding read
59
60 for wrting images RAT1...
61 for reading images TEX2...
62 TEX2-RAT1 is paired
63
64 TEX2... consumes the same fetch resources, that VTX2... would consume
65
66 CONST0 and VTX0 is for parameters
67 CONST0 is binding smaller input parameter buffer, and for constant indexing,
68 also constant cached
69 VTX0 is for indirect/non-constant indexing, or if the input is bigger than
70 the constant cache can handle
71
72 RAT-s are limited to 12, so we can only bind at most 11 texture for writing
73 because we reserve RAT0 for global bindings. With byteaddressing enabled,
74 we should reserve another one too.=> 10 image binding for writing max.
75
76 from Nvidia OpenCL:
77 CL_DEVICE_MAX_READ_IMAGE_ARGS: 128
78 CL_DEVICE_MAX_WRITE_IMAGE_ARGS: 8
79
80 so 10 for writing is enough. 176 is the max for reading according to the docs
81
82 writable images should be listed first < 10, so their id corresponds to RAT(id+1)
83 writable images will consume TEX slots, VTX slots too because of linear indexing
84
85 */
86
87 #ifdef HAVE_OPENCL
radeon_shader_binary_init(struct r600_shader_binary * b)88 static void radeon_shader_binary_init(struct r600_shader_binary *b)
89 {
90 memset(b, 0, sizeof(*b));
91 }
92
radeon_shader_binary_clean(struct r600_shader_binary * b)93 static void radeon_shader_binary_clean(struct r600_shader_binary *b)
94 {
95 if (!b)
96 return;
97 FREE(b->code);
98 FREE(b->config);
99 FREE(b->rodata);
100 FREE(b->global_symbol_offsets);
101 FREE(b->relocs);
102 FREE(b->disasm_string);
103 }
104 #endif
105
r600_compute_buffer_alloc_vram(struct r600_screen * screen,unsigned size)106 struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen,
107 unsigned size)
108 {
109 struct pipe_resource *buffer = NULL;
110 assert(size);
111
112 buffer = pipe_buffer_create((struct pipe_screen*) screen,
113 0, PIPE_USAGE_IMMUTABLE, size);
114
115 return (struct r600_resource *)buffer;
116 }
117
118
evergreen_set_rat(struct r600_pipe_compute * pipe,unsigned id,struct r600_resource * bo,int start,int size)119 static void evergreen_set_rat(struct r600_pipe_compute *pipe,
120 unsigned id,
121 struct r600_resource *bo,
122 int start,
123 int size)
124 {
125 struct pipe_surface rat_templ;
126 struct r600_surface *surf = NULL;
127 struct r600_context *rctx = NULL;
128
129 assert(id < 12);
130 assert((size & 3) == 0);
131 assert((start & 0xFF) == 0);
132
133 rctx = pipe->ctx;
134
135 COMPUTE_DBG(rctx->screen, "bind rat: %i \n", id);
136
137 /* Create the RAT surface */
138 memset(&rat_templ, 0, sizeof(rat_templ));
139 rat_templ.format = PIPE_FORMAT_R32_UINT;
140 rat_templ.u.tex.level = 0;
141 rat_templ.u.tex.first_layer = 0;
142 rat_templ.u.tex.last_layer = 0;
143
144 /* Add the RAT the list of color buffers. Drop the old buffer first. */
145 pipe_surface_reference(&pipe->ctx->framebuffer.state.cbufs[id], NULL);
146 pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface(
147 (struct pipe_context *)pipe->ctx,
148 (struct pipe_resource *)bo, &rat_templ);
149
150 /* Update the number of color buffers */
151 pipe->ctx->framebuffer.state.nr_cbufs =
152 MAX2(id + 1, pipe->ctx->framebuffer.state.nr_cbufs);
153
154 /* Update the cb_target_mask
155 * XXX: I think this is a potential spot for bugs once we start doing
156 * GL interop. cb_target_mask may be modified in the 3D sections
157 * of this driver. */
158 pipe->ctx->compute_cb_target_mask |= (0xf << (id * 4));
159
160 surf = (struct r600_surface*)pipe->ctx->framebuffer.state.cbufs[id];
161 evergreen_init_color_surface_rat(rctx, surf);
162 }
163
evergreen_cs_set_vertex_buffer(struct r600_context * rctx,unsigned vb_index,unsigned offset,struct pipe_resource * buffer)164 static void evergreen_cs_set_vertex_buffer(struct r600_context *rctx,
165 unsigned vb_index,
166 unsigned offset,
167 struct pipe_resource *buffer)
168 {
169 struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
170 struct pipe_vertex_buffer *vb = &state->vb[vb_index];
171 vb->stride = 1;
172 vb->buffer_offset = offset;
173 vb->buffer.resource = buffer;
174 vb->is_user_buffer = false;
175
176 /* The vertex instructions in the compute shaders use the texture cache,
177 * so we need to invalidate it. */
178 rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
179 state->enabled_mask |= 1 << vb_index;
180 state->dirty_mask |= 1 << vb_index;
181 r600_mark_atom_dirty(rctx, &state->atom);
182 }
183
evergreen_cs_set_constant_buffer(struct r600_context * rctx,unsigned cb_index,unsigned offset,unsigned size,struct pipe_resource * buffer)184 static void evergreen_cs_set_constant_buffer(struct r600_context *rctx,
185 unsigned cb_index,
186 unsigned offset,
187 unsigned size,
188 struct pipe_resource *buffer)
189 {
190 struct pipe_constant_buffer cb;
191 cb.buffer_size = size;
192 cb.buffer_offset = offset;
193 cb.buffer = buffer;
194 cb.user_buffer = NULL;
195
196 rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, false, &cb);
197 }
198
199 /* We need to define these R600 registers here, because we can't include
200 * evergreend.h and r600d.h.
201 */
202 #define R_028868_SQ_PGM_RESOURCES_VS 0x028868
203 #define R_028850_SQ_PGM_RESOURCES_PS 0x028850
204
205 #ifdef HAVE_OPENCL
parse_symbol_table(Elf_Data * symbol_table_data,const GElf_Shdr * symbol_table_header,struct r600_shader_binary * binary)206 static void parse_symbol_table(Elf_Data *symbol_table_data,
207 const GElf_Shdr *symbol_table_header,
208 struct r600_shader_binary *binary)
209 {
210 GElf_Sym symbol;
211 unsigned i = 0;
212 unsigned symbol_count =
213 symbol_table_header->sh_size / symbol_table_header->sh_entsize;
214
215 /* We are over allocating this list, because symbol_count gives the
216 * total number of symbols, and we will only be filling the list
217 * with offsets of global symbols. The memory savings from
218 * allocating the correct size of this list will be small, and
219 * I don't think it is worth the cost of pre-computing the number
220 * of global symbols.
221 */
222 binary->global_symbol_offsets = CALLOC(symbol_count, sizeof(uint64_t));
223
224 while (gelf_getsym(symbol_table_data, i++, &symbol)) {
225 unsigned i;
226 if (GELF_ST_BIND(symbol.st_info) != STB_GLOBAL ||
227 symbol.st_shndx == 0 /* Undefined symbol */) {
228 continue;
229 }
230
231 binary->global_symbol_offsets[binary->global_symbol_count] =
232 symbol.st_value;
233
234 /* Sort the list using bubble sort. This list will usually
235 * be small. */
236 for (i = binary->global_symbol_count; i > 0; --i) {
237 uint64_t lhs = binary->global_symbol_offsets[i - 1];
238 uint64_t rhs = binary->global_symbol_offsets[i];
239 if (lhs < rhs) {
240 break;
241 }
242 binary->global_symbol_offsets[i] = lhs;
243 binary->global_symbol_offsets[i - 1] = rhs;
244 }
245 ++binary->global_symbol_count;
246 }
247 }
248
249
parse_relocs(Elf * elf,Elf_Data * relocs,Elf_Data * symbols,unsigned symbol_sh_link,struct r600_shader_binary * binary)250 static void parse_relocs(Elf *elf, Elf_Data *relocs, Elf_Data *symbols,
251 unsigned symbol_sh_link,
252 struct r600_shader_binary *binary)
253 {
254 unsigned i;
255
256 if (!relocs || !symbols || !binary->reloc_count) {
257 return;
258 }
259 binary->relocs = CALLOC(binary->reloc_count,
260 sizeof(struct r600_shader_reloc));
261 for (i = 0; i < binary->reloc_count; i++) {
262 GElf_Sym symbol;
263 GElf_Rel rel;
264 char *symbol_name;
265 struct r600_shader_reloc *reloc = &binary->relocs[i];
266
267 gelf_getrel(relocs, i, &rel);
268 gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &symbol);
269 symbol_name = elf_strptr(elf, symbol_sh_link, symbol.st_name);
270
271 reloc->offset = rel.r_offset;
272 strncpy(reloc->name, symbol_name, sizeof(reloc->name)-1);
273 reloc->name[sizeof(reloc->name)-1] = 0;
274 }
275 }
276
r600_elf_read(const char * elf_data,unsigned elf_size,struct r600_shader_binary * binary)277 static void r600_elf_read(const char *elf_data, unsigned elf_size,
278 struct r600_shader_binary *binary)
279 {
280 char *elf_buffer;
281 Elf *elf;
282 Elf_Scn *section = NULL;
283 Elf_Data *symbols = NULL, *relocs = NULL;
284 size_t section_str_index;
285 unsigned symbol_sh_link = 0;
286
287 /* One of the libelf implementations
288 * (http://www.mr511.de/software/english.htm) requires calling
289 * elf_version() before elf_memory().
290 */
291 elf_version(EV_CURRENT);
292 elf_buffer = MALLOC(elf_size);
293 memcpy(elf_buffer, elf_data, elf_size);
294
295 elf = elf_memory(elf_buffer, elf_size);
296
297 elf_getshdrstrndx(elf, §ion_str_index);
298
299 while ((section = elf_nextscn(elf, section))) {
300 const char *name;
301 Elf_Data *section_data = NULL;
302 GElf_Shdr section_header;
303 if (gelf_getshdr(section, §ion_header) != §ion_header) {
304 fprintf(stderr, "Failed to read ELF section header\n");
305 return;
306 }
307 name = elf_strptr(elf, section_str_index, section_header.sh_name);
308 if (!strcmp(name, ".text")) {
309 section_data = elf_getdata(section, section_data);
310 binary->code_size = section_data->d_size;
311 binary->code = MALLOC(binary->code_size * sizeof(unsigned char));
312 memcpy(binary->code, section_data->d_buf, binary->code_size);
313 } else if (!strcmp(name, ".AMDGPU.config")) {
314 section_data = elf_getdata(section, section_data);
315 binary->config_size = section_data->d_size;
316 binary->config = MALLOC(binary->config_size * sizeof(unsigned char));
317 memcpy(binary->config, section_data->d_buf, binary->config_size);
318 } else if (!strcmp(name, ".AMDGPU.disasm")) {
319 /* Always read disassembly if it's available. */
320 section_data = elf_getdata(section, section_data);
321 binary->disasm_string = strndup(section_data->d_buf,
322 section_data->d_size);
323 } else if (!strncmp(name, ".rodata", 7)) {
324 section_data = elf_getdata(section, section_data);
325 binary->rodata_size = section_data->d_size;
326 binary->rodata = MALLOC(binary->rodata_size * sizeof(unsigned char));
327 memcpy(binary->rodata, section_data->d_buf, binary->rodata_size);
328 } else if (!strncmp(name, ".symtab", 7)) {
329 symbols = elf_getdata(section, section_data);
330 symbol_sh_link = section_header.sh_link;
331 parse_symbol_table(symbols, §ion_header, binary);
332 } else if (!strcmp(name, ".rel.text")) {
333 relocs = elf_getdata(section, section_data);
334 binary->reloc_count = section_header.sh_size /
335 section_header.sh_entsize;
336 }
337 }
338
339 parse_relocs(elf, relocs, symbols, symbol_sh_link, binary);
340
341 if (elf){
342 elf_end(elf);
343 }
344 FREE(elf_buffer);
345
346 /* Cache the config size per symbol */
347 if (binary->global_symbol_count) {
348 binary->config_size_per_symbol =
349 binary->config_size / binary->global_symbol_count;
350 } else {
351 binary->global_symbol_count = 1;
352 binary->config_size_per_symbol = binary->config_size;
353 }
354 }
355
r600_shader_binary_config_start(const struct r600_shader_binary * binary,uint64_t symbol_offset)356 static const unsigned char *r600_shader_binary_config_start(
357 const struct r600_shader_binary *binary,
358 uint64_t symbol_offset)
359 {
360 unsigned i;
361 for (i = 0; i < binary->global_symbol_count; ++i) {
362 if (binary->global_symbol_offsets[i] == symbol_offset) {
363 unsigned offset = i * binary->config_size_per_symbol;
364 return binary->config + offset;
365 }
366 }
367 return binary->config;
368 }
369
r600_shader_binary_read_config(const struct r600_shader_binary * binary,struct r600_bytecode * bc,uint64_t symbol_offset,boolean * use_kill)370 static void r600_shader_binary_read_config(const struct r600_shader_binary *binary,
371 struct r600_bytecode *bc,
372 uint64_t symbol_offset,
373 boolean *use_kill)
374 {
375 unsigned i;
376 const unsigned char *config =
377 r600_shader_binary_config_start(binary, symbol_offset);
378
379 for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
380 unsigned reg =
381 util_le32_to_cpu(*(uint32_t*)(config + i));
382 unsigned value =
383 util_le32_to_cpu(*(uint32_t*)(config + i + 4));
384 switch (reg) {
385 /* R600 / R700 */
386 case R_028850_SQ_PGM_RESOURCES_PS:
387 case R_028868_SQ_PGM_RESOURCES_VS:
388 /* Evergreen / Northern Islands */
389 case R_028844_SQ_PGM_RESOURCES_PS:
390 case R_028860_SQ_PGM_RESOURCES_VS:
391 case R_0288D4_SQ_PGM_RESOURCES_LS:
392 bc->ngpr = MAX2(bc->ngpr, G_028844_NUM_GPRS(value));
393 bc->nstack = MAX2(bc->nstack, G_028844_STACK_SIZE(value));
394 break;
395 case R_02880C_DB_SHADER_CONTROL:
396 *use_kill = G_02880C_KILL_ENABLE(value);
397 break;
398 case R_0288E8_SQ_LDS_ALLOC:
399 bc->nlds_dw = value;
400 break;
401 }
402 }
403 }
404
r600_create_shader(struct r600_bytecode * bc,const struct r600_shader_binary * binary,boolean * use_kill)405 static unsigned r600_create_shader(struct r600_bytecode *bc,
406 const struct r600_shader_binary *binary,
407 boolean *use_kill)
408
409 {
410 assert(binary->code_size % 4 == 0);
411 bc->bytecode = CALLOC(1, binary->code_size);
412 memcpy(bc->bytecode, binary->code, binary->code_size);
413 bc->ndw = binary->code_size / 4;
414
415 r600_shader_binary_read_config(binary, bc, 0, use_kill);
416 return 0;
417 }
418
419 #endif
420
r600_destroy_shader(struct r600_bytecode * bc)421 static void r600_destroy_shader(struct r600_bytecode *bc)
422 {
423 FREE(bc->bytecode);
424 }
425
evergreen_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * cso)426 static void *evergreen_create_compute_state(struct pipe_context *ctx,
427 const struct pipe_compute_state *cso)
428 {
429 struct r600_context *rctx = (struct r600_context *)ctx;
430 struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
431 #ifdef HAVE_OPENCL
432 const struct pipe_binary_program_header *header;
433 void *p;
434 boolean use_kill;
435 #endif
436
437 shader->ctx = rctx;
438 shader->local_size = cso->req_local_mem;
439 shader->private_size = cso->req_private_mem;
440 shader->input_size = cso->req_input_mem;
441
442 shader->ir_type = cso->ir_type;
443
444 if (shader->ir_type == PIPE_SHADER_IR_TGSI ||
445 shader->ir_type == PIPE_SHADER_IR_NIR) {
446 shader->sel = r600_create_shader_state_tokens(ctx, cso->prog, cso->ir_type, PIPE_SHADER_COMPUTE);
447
448 /* Precompile the shader with the expected shader key, to reduce jank at
449 * draw time. Also produces output for shader-db.
450 */
451 bool dirty;
452 r600_shader_select(ctx, shader->sel, &dirty, true);
453
454 return shader;
455 }
456 #ifdef HAVE_OPENCL
457 COMPUTE_DBG(rctx->screen, "*** evergreen_create_compute_state\n");
458 header = cso->prog;
459 radeon_shader_binary_init(&shader->binary);
460 r600_elf_read(header->blob, header->num_bytes, &shader->binary);
461 r600_create_shader(&shader->bc, &shader->binary, &use_kill);
462
463 /* Upload code + ROdata */
464 shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen,
465 shader->bc.ndw * 4);
466 p = r600_buffer_map_sync_with_rings(
467 &rctx->b, shader->code_bo,
468 PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
469 //TODO: use util_memcpy_cpu_to_le32 ?
470 memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
471 rctx->b.ws->buffer_unmap(rctx->b.ws, shader->code_bo->buf);
472 #endif
473
474 return shader;
475 }
476
evergreen_delete_compute_state(struct pipe_context * ctx,void * state)477 static void evergreen_delete_compute_state(struct pipe_context *ctx, void *state)
478 {
479 struct r600_context *rctx = (struct r600_context *)ctx;
480 struct r600_pipe_compute *shader = state;
481
482 COMPUTE_DBG(rctx->screen, "*** evergreen_delete_compute_state\n");
483
484 if (!shader)
485 return;
486
487 if (shader->ir_type == PIPE_SHADER_IR_TGSI ||
488 shader->ir_type == PIPE_SHADER_IR_NIR) {
489 r600_delete_shader_selector(ctx, shader->sel);
490 } else {
491 #ifdef HAVE_OPENCL
492 radeon_shader_binary_clean(&shader->binary);
493 pipe_resource_reference((struct pipe_resource**)&shader->code_bo, NULL);
494 pipe_resource_reference((struct pipe_resource**)&shader->kernel_param, NULL);
495 #endif
496 r600_destroy_shader(&shader->bc);
497 }
498 FREE(shader);
499 }
500
evergreen_bind_compute_state(struct pipe_context * ctx,void * state)501 static void evergreen_bind_compute_state(struct pipe_context *ctx, void *state)
502 {
503 struct r600_context *rctx = (struct r600_context *)ctx;
504 struct r600_pipe_compute *cstate = (struct r600_pipe_compute *)state;
505 COMPUTE_DBG(rctx->screen, "*** evergreen_bind_compute_state\n");
506
507 if (!state) {
508 rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
509 return;
510 }
511
512 if (cstate->ir_type == PIPE_SHADER_IR_TGSI ||
513 cstate->ir_type == PIPE_SHADER_IR_NIR) {
514 bool compute_dirty;
515 if (r600_shader_select(ctx, cstate->sel, &compute_dirty, false))
516 R600_ERR("Failed to select compute shader\n");
517 }
518
519 rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
520 }
521
522 /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
523 * kernel parameters there are implicit parameters that need to be stored
524 * in the vertex buffer as well. Here is how these parameters are organized in
525 * the buffer:
526 *
527 * DWORDS 0-2: Number of work groups in each dimension (x,y,z)
528 * DWORDS 3-5: Number of global work items in each dimension (x,y,z)
529 * DWORDS 6-8: Number of work items within each work group in each dimension
530 * (x,y,z)
531 * DWORDS 9+ : Kernel parameters
532 */
evergreen_compute_upload_input(struct pipe_context * ctx,const struct pipe_grid_info * info)533 static void evergreen_compute_upload_input(struct pipe_context *ctx,
534 const struct pipe_grid_info *info)
535 {
536 struct r600_context *rctx = (struct r600_context *)ctx;
537 struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
538 unsigned i;
539 /* We need to reserve 9 dwords (36 bytes) for implicit kernel
540 * parameters.
541 */
542 unsigned input_size;
543 uint32_t *num_work_groups_start;
544 uint32_t *global_size_start;
545 uint32_t *local_size_start;
546 uint32_t *kernel_parameters_start;
547 struct pipe_box box;
548 struct pipe_transfer *transfer = NULL;
549
550 if (!shader)
551 return;
552 if (shader->input_size == 0) {
553 return;
554 }
555 input_size = shader->input_size + 36;
556 if (!shader->kernel_param) {
557 /* Add space for the grid dimensions */
558 shader->kernel_param = (struct r600_resource *)
559 pipe_buffer_create(ctx->screen, 0,
560 PIPE_USAGE_IMMUTABLE, input_size);
561 }
562
563 u_box_1d(0, input_size, &box);
564 num_work_groups_start = ctx->buffer_map(ctx,
565 (struct pipe_resource*)shader->kernel_param,
566 0, PIPE_MAP_WRITE | PIPE_MAP_DISCARD_RANGE,
567 &box, &transfer);
568 global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
569 local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
570 kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
571
572 /* Copy the work group size */
573 memcpy(num_work_groups_start, info->grid, 3 * sizeof(uint));
574
575 /* Copy the global size */
576 for (i = 0; i < 3; i++) {
577 global_size_start[i] = info->grid[i] * info->block[i];
578 }
579
580 /* Copy the local dimensions */
581 memcpy(local_size_start, info->block, 3 * sizeof(uint));
582
583 /* Copy the kernel inputs */
584 memcpy(kernel_parameters_start, info->input, shader->input_size);
585
586 for (i = 0; i < (input_size / 4); i++) {
587 COMPUTE_DBG(rctx->screen, "input %i : %u\n", i,
588 ((unsigned*)num_work_groups_start)[i]);
589 }
590
591 ctx->buffer_unmap(ctx, transfer);
592
593 /* ID=0 and ID=3 are reserved for the parameters.
594 * LLVM will preferably use ID=0, but it does not work for dynamic
595 * indices. */
596 evergreen_cs_set_vertex_buffer(rctx, 3, 0,
597 (struct pipe_resource*)shader->kernel_param);
598 evergreen_cs_set_constant_buffer(rctx, 0, 0, input_size,
599 (struct pipe_resource*)shader->kernel_param);
600 }
601
evergreen_emit_dispatch(struct r600_context * rctx,const struct pipe_grid_info * info,uint32_t indirect_grid[3])602 static void evergreen_emit_dispatch(struct r600_context *rctx,
603 const struct pipe_grid_info *info,
604 uint32_t indirect_grid[3])
605 {
606 int i;
607 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
608 struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
609 bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
610 unsigned num_waves;
611 unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
612 unsigned wave_divisor = (16 * num_pipes);
613 int group_size = 1;
614 unsigned lds_size = shader->local_size / 4;
615
616 if (shader->ir_type != PIPE_SHADER_IR_TGSI &&
617 shader->ir_type != PIPE_SHADER_IR_NIR)
618 lds_size += shader->bc.nlds_dw;
619
620 /* Calculate group_size */
621 for (i = 0; i < 3; i++) {
622 group_size *= info->block[i];
623 }
624
625 /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
626 num_waves = (info->block[0] * info->block[1] * info->block[2] +
627 wave_divisor - 1) / wave_divisor;
628
629 COMPUTE_DBG(rctx->screen, "Using %u pipes, "
630 "%u wavefronts per thread block, "
631 "allocating %u dwords lds.\n",
632 num_pipes, num_waves, lds_size);
633
634 radeon_set_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
635
636 radeon_set_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
637 radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
638 radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
639 radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
640
641 radeon_set_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
642 group_size);
643
644 radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
645 radeon_emit(cs, info->block[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
646 radeon_emit(cs, info->block[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
647 radeon_emit(cs, info->block[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
648
649 if (rctx->b.gfx_level < CAYMAN) {
650 assert(lds_size <= 8192);
651 } else {
652 /* Cayman appears to have a slightly smaller limit, see the
653 * value of CM_R_0286FC_SPI_LDS_MGMT.NUM_LS_LDS */
654 assert(lds_size <= 8160);
655 }
656
657 radeon_compute_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC,
658 lds_size | (num_waves << 14));
659
660 if (info->indirect) {
661 radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, render_cond_bit));
662 radeon_emit(cs, indirect_grid[0]);
663 radeon_emit(cs, indirect_grid[1]);
664 radeon_emit(cs, indirect_grid[2]);
665 radeon_emit(cs, 1);
666 } else {
667 /* Dispatch packet */
668 radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, render_cond_bit));
669 radeon_emit(cs, info->grid[0]);
670 radeon_emit(cs, info->grid[1]);
671 radeon_emit(cs, info->grid[2]);
672 /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
673 radeon_emit(cs, 1);
674 }
675
676 if (rctx->is_debug)
677 eg_trace_emit(rctx);
678 }
679
compute_setup_cbs(struct r600_context * rctx)680 static void compute_setup_cbs(struct r600_context *rctx)
681 {
682 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
683 unsigned i;
684
685 /* Emit colorbuffers. */
686 /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */
687 for (i = 0; i < 8 && i < rctx->framebuffer.state.nr_cbufs; i++) {
688 struct r600_surface *cb = (struct r600_surface*)rctx->framebuffer.state.cbufs[i];
689 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
690 (struct r600_resource*)cb->base.texture,
691 RADEON_USAGE_READWRITE |
692 RADEON_PRIO_SHADER_RW_BUFFER);
693
694 radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
695 radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
696 radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
697 radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
698 radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
699 radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
700 radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
701 radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
702
703 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
704 radeon_emit(cs, reloc);
705
706 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
707 radeon_emit(cs, reloc);
708 }
709 for (; i < 8 ; i++)
710 radeon_compute_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
711 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
712 for (; i < 12; i++)
713 radeon_compute_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C,
714 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
715
716 /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
717 radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK,
718 rctx->compute_cb_target_mask);
719 }
720
compute_emit_cs(struct r600_context * rctx,const struct pipe_grid_info * info)721 static void compute_emit_cs(struct r600_context *rctx,
722 const struct pipe_grid_info *info)
723 {
724 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
725 bool compute_dirty = false;
726 struct r600_pipe_shader *current;
727 struct r600_shader_atomic combined_atomics[8];
728 uint8_t atomic_used_mask;
729 uint32_t indirect_grid[3] = { 0, 0, 0 };
730
731 /* make sure that the gfx ring is only one active */
732 if (radeon_emitted(&rctx->b.dma.cs, 0)) {
733 rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
734 }
735
736 r600_update_compressed_resource_state(rctx, true);
737
738 if (!rctx->cmd_buf_is_compute) {
739 rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
740 rctx->cmd_buf_is_compute = true;
741 }
742
743 if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI||
744 rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_NIR) {
745 if (r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty, false)) {
746 R600_ERR("Failed to select compute shader\n");
747 return;
748 }
749
750 current = rctx->cs_shader_state.shader->sel->current;
751 if (compute_dirty) {
752 rctx->cs_shader_state.atom.num_dw = current->command_buffer.num_dw;
753 r600_context_add_resource_size(&rctx->b.b, (struct pipe_resource *)current->bo);
754 r600_set_atom_dirty(rctx, &rctx->cs_shader_state.atom, true);
755 }
756
757 bool need_buf_const = current->shader.uses_tex_buffers ||
758 current->shader.has_txq_cube_array_z_comp;
759
760 if (info->indirect) {
761 struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
762 unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_MAP_READ);
763 unsigned offset = info->indirect_offset / 4;
764 indirect_grid[0] = data[offset];
765 indirect_grid[1] = data[offset + 1];
766 indirect_grid[2] = data[offset + 2];
767 }
768 for (int i = 0; i < 3; i++) {
769 rctx->cs_block_grid_sizes[i] = info->block[i];
770 rctx->cs_block_grid_sizes[i + 4] = info->indirect ? indirect_grid[i] : info->grid[i];
771 }
772 rctx->cs_block_grid_sizes[3] = rctx->cs_block_grid_sizes[7] = 0;
773 rctx->driver_consts[PIPE_SHADER_COMPUTE].cs_block_grid_size_dirty = true;
774
775 evergreen_emit_atomic_buffer_setup_count(rctx, current, combined_atomics, &atomic_used_mask);
776 r600_need_cs_space(rctx, 0, true, util_bitcount(atomic_used_mask));
777
778 if (need_buf_const) {
779 eg_setup_buffer_constants(rctx, PIPE_SHADER_COMPUTE);
780 }
781 r600_update_driver_const_buffers(rctx, true);
782
783 evergreen_emit_atomic_buffer_setup(rctx, true, combined_atomics, atomic_used_mask);
784 if (atomic_used_mask) {
785 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
786 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
787 }
788 } else
789 r600_need_cs_space(rctx, 0, true, 0);
790
791 /* Initialize all the compute-related registers.
792 *
793 * See evergreen_init_atom_start_compute_cs() in this file for the list
794 * of registers initialized by the start_compute_cs_cmd atom.
795 */
796 r600_emit_command_buffer(cs, &rctx->start_compute_cs_cmd);
797
798 /* emit config state */
799 if (rctx->b.gfx_level == EVERGREEN) {
800 if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI||
801 rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_NIR) {
802 radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3);
803 radeon_emit(cs, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx->r6xx_num_clause_temp_gprs));
804 radeon_emit(cs, 0);
805 radeon_emit(cs, 0);
806 radeon_set_config_reg(cs, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (1 << 8));
807 } else
808 r600_emit_atom(rctx, &rctx->config_state.atom);
809 }
810
811 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
812 r600_flush_emit(rctx);
813
814 if (rctx->cs_shader_state.shader->ir_type != PIPE_SHADER_IR_TGSI &&
815 rctx->cs_shader_state.shader->ir_type != PIPE_SHADER_IR_NIR) {
816
817 compute_setup_cbs(rctx);
818
819 /* Emit vertex buffer state */
820 rctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(rctx->cs_vertex_buffer_state.dirty_mask);
821 r600_emit_atom(rctx, &rctx->cs_vertex_buffer_state.atom);
822 } else {
823 uint32_t rat_mask;
824
825 rat_mask = evergreen_construct_rat_mask(rctx, &rctx->cb_misc_state, 0);
826 radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK,
827 rat_mask);
828 }
829
830 r600_emit_atom(rctx, &rctx->b.render_cond_atom);
831
832 /* Emit constant buffer state */
833 r600_emit_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
834
835 /* Emit sampler state */
836 r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].states.atom);
837
838 /* Emit sampler view (texture resource) state */
839 r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom);
840
841 /* Emit images state */
842 r600_emit_atom(rctx, &rctx->compute_images.atom);
843
844 /* Emit buffers state */
845 r600_emit_atom(rctx, &rctx->compute_buffers.atom);
846
847 /* Emit shader state */
848 r600_emit_atom(rctx, &rctx->cs_shader_state.atom);
849
850 /* Emit dispatch state and dispatch packet */
851 evergreen_emit_dispatch(rctx, info, indirect_grid);
852
853 /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
854 */
855 rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
856 R600_CONTEXT_INV_VERTEX_CACHE |
857 R600_CONTEXT_INV_TEX_CACHE;
858 r600_flush_emit(rctx);
859 rctx->b.flags = 0;
860
861 if (rctx->b.gfx_level >= CAYMAN) {
862 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
863 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
864 /* DEALLOC_STATE prevents the GPU from hanging when a
865 * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT
866 * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set.
867 */
868 radeon_emit(cs, PKT3C(PKT3_DEALLOC_STATE, 0, 0));
869 radeon_emit(cs, 0);
870 }
871 if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI ||
872 rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_NIR)
873 evergreen_emit_atomic_buffer_save(rctx, true, combined_atomics, &atomic_used_mask);
874
875 #if 0
876 COMPUTE_DBG(rctx->screen, "cdw: %i\n", cs->cdw);
877 for (i = 0; i < cs->cdw; i++) {
878 COMPUTE_DBG(rctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]);
879 }
880 #endif
881
882 }
883
884
885 /**
886 * Emit function for r600_cs_shader_state atom
887 */
evergreen_emit_cs_shader(struct r600_context * rctx,struct r600_atom * atom)888 void evergreen_emit_cs_shader(struct r600_context *rctx,
889 struct r600_atom *atom)
890 {
891 struct r600_cs_shader_state *state =
892 (struct r600_cs_shader_state*)atom;
893 struct r600_pipe_compute *shader = state->shader;
894 struct radeon_cmdbuf *cs = &rctx->b.gfx.cs;
895 uint64_t va;
896 struct r600_resource *code_bo;
897 unsigned ngpr, nstack;
898
899 if (shader->ir_type == PIPE_SHADER_IR_TGSI ||
900 shader->ir_type == PIPE_SHADER_IR_NIR) {
901 code_bo = shader->sel->current->bo;
902 va = shader->sel->current->bo->gpu_address;
903 ngpr = shader->sel->current->shader.bc.ngpr;
904 nstack = shader->sel->current->shader.bc.nstack;
905 } else {
906 code_bo = shader->code_bo;
907 va = shader->code_bo->gpu_address + state->pc;
908 ngpr = shader->bc.ngpr;
909 nstack = shader->bc.nstack;
910 }
911
912 radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
913 radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
914 radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
915 S_0288D4_NUM_GPRS(ngpr) |
916 S_0288D4_DX10_CLAMP(1) |
917 S_0288D4_STACK_SIZE(nstack));
918 radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
919
920 radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0));
921 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
922 code_bo, RADEON_USAGE_READ |
923 RADEON_PRIO_SHADER_BINARY));
924 }
925
evergreen_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * info)926 static void evergreen_launch_grid(struct pipe_context *ctx,
927 const struct pipe_grid_info *info)
928 {
929 struct r600_context *rctx = (struct r600_context *)ctx;
930 #ifdef HAVE_OPENCL
931 struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
932 boolean use_kill;
933
934 if (shader->ir_type != PIPE_SHADER_IR_TGSI &&
935 shader->ir_type != PIPE_SHADER_IR_NIR) {
936 rctx->cs_shader_state.pc = info->pc;
937 /* Get the config information for this kernel. */
938 r600_shader_binary_read_config(&shader->binary, &shader->bc,
939 info->pc, &use_kill);
940 } else {
941 use_kill = false;
942 rctx->cs_shader_state.pc = 0;
943 }
944 #endif
945
946 COMPUTE_DBG(rctx->screen, "*** evergreen_launch_grid: pc = %u\n", info->pc);
947
948
949 evergreen_compute_upload_input(ctx, info);
950 compute_emit_cs(rctx, info);
951 }
952
evergreen_set_compute_resources(struct pipe_context * ctx,unsigned start,unsigned count,struct pipe_surface ** surfaces)953 static void evergreen_set_compute_resources(struct pipe_context *ctx,
954 unsigned start, unsigned count,
955 struct pipe_surface **surfaces)
956 {
957 struct r600_context *rctx = (struct r600_context *)ctx;
958 struct r600_surface **resources = (struct r600_surface **)surfaces;
959
960 COMPUTE_DBG(rctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
961 start, count);
962
963 for (unsigned i = 0; i < count; i++) {
964 /* The First four vertex buffers are reserved for parameters and
965 * global buffers. */
966 unsigned vtx_id = 4 + i;
967 if (resources[i]) {
968 struct r600_resource_global *buffer =
969 (struct r600_resource_global*)
970 resources[i]->base.texture;
971 if (resources[i]->base.writable) {
972 assert(i+1 < 12);
973
974 evergreen_set_rat(rctx->cs_shader_state.shader, i+1,
975 (struct r600_resource *)resources[i]->base.texture,
976 buffer->chunk->start_in_dw*4,
977 resources[i]->base.texture->width0);
978 }
979
980 evergreen_cs_set_vertex_buffer(rctx, vtx_id,
981 buffer->chunk->start_in_dw * 4,
982 resources[i]->base.texture);
983 }
984 }
985 }
986
evergreen_set_global_binding(struct pipe_context * ctx,unsigned first,unsigned n,struct pipe_resource ** resources,uint32_t ** handles)987 static void evergreen_set_global_binding(struct pipe_context *ctx,
988 unsigned first, unsigned n,
989 struct pipe_resource **resources,
990 uint32_t **handles)
991 {
992 struct r600_context *rctx = (struct r600_context *)ctx;
993 struct compute_memory_pool *pool = rctx->screen->global_pool;
994 struct r600_resource_global **buffers =
995 (struct r600_resource_global **)resources;
996 unsigned i;
997
998 COMPUTE_DBG(rctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
999 first, n);
1000
1001 if (!resources) {
1002 /* XXX: Unset */
1003 return;
1004 }
1005
1006 /* We mark these items for promotion to the pool if they
1007 * aren't already there */
1008 for (i = first; i < first + n; i++) {
1009 struct compute_memory_item *item = buffers[i]->chunk;
1010
1011 if (!is_item_in_pool(item))
1012 buffers[i]->chunk->status |= ITEM_FOR_PROMOTING;
1013 }
1014
1015 if (compute_memory_finalize_pending(pool, ctx) == -1) {
1016 /* XXX: Unset */
1017 return;
1018 }
1019
1020 for (i = first; i < first + n; i++)
1021 {
1022 uint32_t buffer_offset;
1023 uint32_t handle;
1024 assert(resources[i]->target == PIPE_BUFFER);
1025 assert(resources[i]->bind & PIPE_BIND_GLOBAL);
1026
1027 buffer_offset = util_le32_to_cpu(*(handles[i]));
1028 handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4;
1029
1030 *(handles[i]) = util_cpu_to_le32(handle);
1031 }
1032
1033 /* globals for writing */
1034 evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
1035 /* globals for reading */
1036 evergreen_cs_set_vertex_buffer(rctx, 1, 0,
1037 (struct pipe_resource*)pool->bo);
1038
1039 /* constants for reading, LLVM puts them in text segment */
1040 evergreen_cs_set_vertex_buffer(rctx, 2, 0,
1041 (struct pipe_resource*)rctx->cs_shader_state.shader->code_bo);
1042 }
1043
1044 /**
1045 * This function initializes all the compute specific registers that need to
1046 * be initialized for each compute command stream. Registers that are common
1047 * to both compute and 3D will be initialized at the beginning of each compute
1048 * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
1049 * packet requires that the shader type bit be set, we must initialize all
1050 * context registers needed for compute in this function. The registers
1051 * initialized by the start_cs_cmd atom can be found in evergreen_state.c in the
1052 * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
1053 * on the GPU family.
1054 */
evergreen_init_atom_start_compute_cs(struct r600_context * rctx)1055 void evergreen_init_atom_start_compute_cs(struct r600_context *rctx)
1056 {
1057 struct r600_command_buffer *cb = &rctx->start_compute_cs_cmd;
1058 int num_threads;
1059 int num_stack_entries;
1060
1061 /* since all required registers are initialized in the
1062 * start_compute_cs_cmd atom, we can EMIT_EARLY here.
1063 */
1064 r600_init_command_buffer(cb, 256);
1065 cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
1066
1067 /* We're setting config registers here. */
1068 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
1069 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1070
1071 switch (rctx->b.family) {
1072 case CHIP_CEDAR:
1073 default:
1074 num_threads = 128;
1075 num_stack_entries = 256;
1076 break;
1077 case CHIP_REDWOOD:
1078 num_threads = 128;
1079 num_stack_entries = 256;
1080 break;
1081 case CHIP_JUNIPER:
1082 num_threads = 128;
1083 num_stack_entries = 512;
1084 break;
1085 case CHIP_CYPRESS:
1086 case CHIP_HEMLOCK:
1087 num_threads = 128;
1088 num_stack_entries = 512;
1089 break;
1090 case CHIP_PALM:
1091 num_threads = 128;
1092 num_stack_entries = 256;
1093 break;
1094 case CHIP_SUMO:
1095 num_threads = 128;
1096 num_stack_entries = 256;
1097 break;
1098 case CHIP_SUMO2:
1099 num_threads = 128;
1100 num_stack_entries = 512;
1101 break;
1102 case CHIP_BARTS:
1103 num_threads = 128;
1104 num_stack_entries = 512;
1105 break;
1106 case CHIP_TURKS:
1107 num_threads = 128;
1108 num_stack_entries = 256;
1109 break;
1110 case CHIP_CAICOS:
1111 num_threads = 128;
1112 num_stack_entries = 256;
1113 break;
1114 }
1115
1116 /* The primitive type always needs to be POINTLIST for compute. */
1117 r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
1118 V_008958_DI_PT_POINTLIST);
1119
1120 if (rctx->b.gfx_level < CAYMAN) {
1121
1122 /* These registers control which simds can be used by each stage.
1123 * The default for these registers is 0xffffffff, which means
1124 * all simds are available for each stage. It's possible we may
1125 * want to play around with these in the future, but for now
1126 * the default value is fine.
1127 *
1128 * R_008E20_SQ_STATIC_THREAD_MGMT1
1129 * R_008E24_SQ_STATIC_THREAD_MGMT2
1130 * R_008E28_SQ_STATIC_THREAD_MGMT3
1131 */
1132
1133 /* XXX: We may need to adjust the thread and stack resource
1134 * values for 3D/compute interop */
1135
1136 r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
1137
1138 /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1
1139 * Set the number of threads used by the PS/VS/GS/ES stage to
1140 * 0.
1141 */
1142 r600_store_value(cb, 0);
1143
1144 /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2
1145 * Set the number of threads used by the CS (aka LS) stage to
1146 * the maximum number of threads and set the number of threads
1147 * for the HS stage to 0. */
1148 r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads));
1149
1150 /* R_008C20_SQ_STACK_RESOURCE_MGMT_1
1151 * Set the Control Flow stack entries to 0 for PS/VS stages */
1152 r600_store_value(cb, 0);
1153
1154 /* R_008C24_SQ_STACK_RESOURCE_MGMT_2
1155 * Set the Control Flow stack entries to 0 for GS/ES stages */
1156 r600_store_value(cb, 0);
1157
1158 /* R_008C28_SQ_STACK_RESOURCE_MGMT_3
1159 * Set the Contol Flow stack entries to 0 for the HS stage, and
1160 * set it to the maximum value for the CS (aka LS) stage. */
1161 r600_store_value(cb,
1162 S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
1163 }
1164 /* Give the compute shader all the available LDS space.
1165 * NOTE: This only sets the maximum number of dwords that a compute
1166 * shader can allocate. When a shader is executed, we still need to
1167 * allocate the appropriate amount of LDS dwords using the
1168 * CM_R_0288E8_SQ_LDS_ALLOC register.
1169 */
1170 if (rctx->b.gfx_level < CAYMAN) {
1171 r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT,
1172 S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192));
1173 } else {
1174 r600_store_context_reg(cb, CM_R_0286FC_SPI_LDS_MGMT,
1175 S_0286FC_NUM_PS_LDS(0) |
1176 S_0286FC_NUM_LS_LDS(255)); /* 255 * 32 = 8160 dwords */
1177 }
1178
1179 /* Context Registers */
1180
1181 if (rctx->b.gfx_level < CAYMAN) {
1182 /* workaround for hw issues with dyn gpr - must set all limits
1183 * to 240 instead of 0, 0x1e == 240 / 8
1184 */
1185 r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1,
1186 S_028838_PS_GPRS(0x1e) |
1187 S_028838_VS_GPRS(0x1e) |
1188 S_028838_GS_GPRS(0x1e) |
1189 S_028838_ES_GPRS(0x1e) |
1190 S_028838_HS_GPRS(0x1e) |
1191 S_028838_LS_GPRS(0x1e));
1192 }
1193
1194 /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */
1195 r600_store_context_reg(cb, R_028A40_VGT_GS_MODE,
1196 S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1));
1197
1198 r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/);
1199
1200 r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL,
1201 S_0286E8_TID_IN_GROUP_ENA(1) |
1202 S_0286E8_TGID_ENA(1) |
1203 S_0286E8_DISABLE_INDEX_PACK(1));
1204
1205 /* The LOOP_CONST registers are an optimizations for loops that allows
1206 * you to store the initial counter, increment value, and maximum
1207 * counter value in a register so that hardware can calculate the
1208 * correct number of iterations for the loop, so that you don't need
1209 * to have the loop counter in your shader code. We don't currently use
1210 * this optimization, so we must keep track of the counter in the
1211 * shader and use a break instruction to exit loops. However, the
1212 * hardware will still uses this register to determine when to exit a
1213 * loop, so we need to initialize the counter to 0, set the increment
1214 * value to 1 and the maximum counter value to the 4095 (0xfff) which
1215 * is the maximum value allowed. This gives us a maximum of 4096
1216 * iterations for our loops, but hopefully our break instruction will
1217 * execute before some time before the 4096th iteration.
1218 */
1219 eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
1220 }
1221
evergreen_init_compute_state_functions(struct r600_context * rctx)1222 void evergreen_init_compute_state_functions(struct r600_context *rctx)
1223 {
1224 rctx->b.b.create_compute_state = evergreen_create_compute_state;
1225 rctx->b.b.delete_compute_state = evergreen_delete_compute_state;
1226 rctx->b.b.bind_compute_state = evergreen_bind_compute_state;
1227 // rctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
1228 rctx->b.b.set_compute_resources = evergreen_set_compute_resources;
1229 rctx->b.b.set_global_binding = evergreen_set_global_binding;
1230 rctx->b.b.launch_grid = evergreen_launch_grid;
1231
1232 }
1233
r600_compute_global_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)1234 void *r600_compute_global_transfer_map(struct pipe_context *ctx,
1235 struct pipe_resource *resource,
1236 unsigned level,
1237 unsigned usage,
1238 const struct pipe_box *box,
1239 struct pipe_transfer **ptransfer)
1240 {
1241 struct r600_context *rctx = (struct r600_context*)ctx;
1242 struct compute_memory_pool *pool = rctx->screen->global_pool;
1243 struct r600_resource_global* buffer =
1244 (struct r600_resource_global*)resource;
1245
1246 struct compute_memory_item *item = buffer->chunk;
1247 struct pipe_resource *dst = NULL;
1248 unsigned offset = box->x;
1249
1250 if (usage & PIPE_MAP_READ)
1251 buffer->chunk->status |= ITEM_MAPPED_FOR_READING;
1252
1253 if (usage & PIPE_MAP_WRITE)
1254 buffer->chunk->status |= ITEM_MAPPED_FOR_WRITING;
1255
1256 if (is_item_in_pool(item)) {
1257 compute_memory_demote_item(pool, item, ctx);
1258 }
1259 else {
1260 if (item->real_buffer == NULL) {
1261 item->real_buffer =
1262 r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4);
1263 }
1264 }
1265
1266 dst = (struct pipe_resource*)item->real_buffer;
1267
1268 COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
1269 "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
1270 "width = %u, height = %u, depth = %u)\n", level, usage,
1271 box->x, box->y, box->z, box->width, box->height,
1272 box->depth);
1273 COMPUTE_DBG(rctx->screen, "Buffer id = %"PRIi64" offset = "
1274 "%u (box.x)\n", item->id, box->x);
1275
1276
1277 assert(resource->target == PIPE_BUFFER);
1278 assert(resource->bind & PIPE_BIND_GLOBAL);
1279 assert(box->x >= 0);
1280 assert(box->y == 0);
1281 assert(box->z == 0);
1282
1283 if (buffer->base.b.is_user_ptr)
1284 return NULL;
1285
1286 ///TODO: do it better, mapping is not possible if the pool is too big
1287 return pipe_buffer_map_range(ctx, dst,
1288 offset, box->width, usage & ~PIPE_MAP_READ, ptransfer);
1289 }
1290
r600_compute_global_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * transfer)1291 void r600_compute_global_transfer_unmap(struct pipe_context *ctx,
1292 struct pipe_transfer *transfer)
1293 {
1294 /* struct r600_resource_global are not real resources, they just map
1295 * to an offset within the compute memory pool. The function
1296 * r600_compute_global_transfer_map() maps the memory pool
1297 * resource rather than the struct r600_resource_global passed to
1298 * it as an argument and then initializes ptransfer->resource with
1299 * the memory pool resource (via pipe_buffer_map_range).
1300 * When transfer_unmap is called it uses the memory pool's
1301 * vtable which calls r600_buffer_transfer_map() rather than
1302 * this function.
1303 */
1304 assert (!"This function should not be called");
1305 }
1306
r600_compute_global_buffer_destroy(struct pipe_screen * screen,struct pipe_resource * res)1307 void r600_compute_global_buffer_destroy(struct pipe_screen *screen,
1308 struct pipe_resource *res)
1309 {
1310 struct r600_resource_global* buffer = NULL;
1311 struct r600_screen* rscreen = NULL;
1312
1313 assert(res->target == PIPE_BUFFER);
1314 assert(res->bind & PIPE_BIND_GLOBAL);
1315
1316 buffer = (struct r600_resource_global*)res;
1317 rscreen = (struct r600_screen*)screen;
1318
1319 compute_memory_free(rscreen->global_pool, buffer->chunk->id);
1320 buffer->chunk = NULL;
1321
1322 if (buffer->base.b.is_user_ptr)
1323 r600_buffer_destroy(screen, res);
1324 else
1325 free(res);
1326 }
1327
r600_compute_global_buffer_create(struct pipe_screen * screen,const struct pipe_resource * templ)1328 struct pipe_resource *r600_compute_global_buffer_create(struct pipe_screen *screen,
1329 const struct pipe_resource *templ)
1330 {
1331 struct r600_resource_global* result = NULL;
1332 struct r600_screen* rscreen = NULL;
1333 int size_in_dw = 0;
1334
1335 assert(templ->target == PIPE_BUFFER);
1336 assert(templ->bind & PIPE_BIND_GLOBAL);
1337 assert(templ->array_size == 1 || templ->array_size == 0);
1338 assert(templ->depth0 == 1 || templ->depth0 == 0);
1339 assert(templ->height0 == 1 || templ->height0 == 0);
1340
1341 result = (struct r600_resource_global*)
1342 CALLOC(sizeof(struct r600_resource_global), 1);
1343 rscreen = (struct r600_screen*)screen;
1344
1345 COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
1346 COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
1347 templ->array_size);
1348
1349 result->base.b.b = *templ;
1350 result->base.b.b.screen = screen;
1351 result->base.compute_global_bo = true;
1352 pipe_reference_init(&result->base.b.b.reference, 1);
1353
1354 size_in_dw = (templ->width0+3) / 4;
1355
1356 result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
1357
1358 if (result->chunk == NULL)
1359 {
1360 free(result);
1361 return NULL;
1362 }
1363
1364 return &result->base.b.b;
1365 }
1366