• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #include "pipe/p_state.h"
28 
29 #include "freedreno_resource.h"
30 
31 #include "fd5_compute.h"
32 #include "fd5_context.h"
33 #include "fd5_emit.h"
34 
35 struct fd5_compute_stateobj {
36 	struct ir3_shader *shader;
37 };
38 
39 
40 static void *
fd5_create_compute_state(struct pipe_context * pctx,const struct pipe_compute_state * cso)41 fd5_create_compute_state(struct pipe_context *pctx,
42 		const struct pipe_compute_state *cso)
43 {
44 	struct fd_context *ctx = fd_context(pctx);
45 
46 	/* req_input_mem will only be non-zero for cl kernels (ie. clover).
47 	 * This isn't a perfect test because I guess it is possible (but
48 	 * uncommon) for none for the kernel parameters to be a global,
49 	 * but ctx->set_global_bindings() can't fail, so this is the next
50 	 * best place to fail if we need a newer version of kernel driver:
51 	 */
52 	if ((cso->req_input_mem > 0) &&
53 			fd_device_version(ctx->dev) < FD_VERSION_BO_IOVA) {
54 		return NULL;
55 	}
56 
57 	struct ir3_compiler *compiler = ctx->screen->compiler;
58 	struct fd5_compute_stateobj *so = CALLOC_STRUCT(fd5_compute_stateobj);
59 	so->shader = ir3_shader_create_compute(compiler, cso, &ctx->debug, pctx->screen);
60 	return so;
61 }
62 
63 static void
fd5_delete_compute_state(struct pipe_context * pctx,void * hwcso)64 fd5_delete_compute_state(struct pipe_context *pctx, void *hwcso)
65 {
66 	struct fd5_compute_stateobj *so = hwcso;
67 	ir3_shader_state_delete(pctx, so->shader);
68 	free(so);
69 }
70 
71 /* maybe move to fd5_program? */
72 static void
cs_program_emit(struct fd_ringbuffer * ring,struct ir3_shader_variant * v,const struct pipe_grid_info * info)73 cs_program_emit(struct fd_ringbuffer *ring, struct ir3_shader_variant *v,
74 		const struct pipe_grid_info *info)
75 {
76 	const unsigned *local_size = info->block;
77 	const struct ir3_info *i = &v->info;
78 	enum a3xx_threadsize thrsz;
79 	unsigned instrlen = v->instrlen;
80 
81 	/* if shader is more than 32*16 instructions, don't preload it.  Similar
82 	 * to the combined restriction of 64*16 for VS+FS
83 	 */
84 	if (instrlen > 32)
85 		instrlen = 0;
86 
87 	/* maybe the limit should be 1024.. basically if we can't have full
88 	 * occupancy, use TWO_QUAD mode to reduce divergence penalty.
89 	 */
90 	if ((local_size[0] * local_size[1] * local_size[2]) < 512) {
91 		thrsz = TWO_QUADS;
92 	} else {
93 		thrsz = FOUR_QUADS;
94 	}
95 
96 	OUT_PKT4(ring, REG_A5XX_SP_SP_CNTL, 1);
97 	OUT_RING(ring, 0x00000000);        /* SP_SP_CNTL */
98 
99 	OUT_PKT4(ring, REG_A5XX_HLSQ_CONTROL_0_REG, 1);
100 	OUT_RING(ring, A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(TWO_QUADS) |
101 		A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(thrsz) |
102 		0x00000880 /* XXX */);
103 
104 	OUT_PKT4(ring, REG_A5XX_SP_CS_CTRL_REG0, 1);
105 	OUT_RING(ring, A5XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
106 		A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
107 		A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
108 		A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(0x3) |  // XXX need to figure this out somehow..
109 		0x6 /* XXX */);
110 
111 	OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CONFIG, 1);
112 	OUT_RING(ring, A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(0) |
113 		A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(0) |
114 		A5XX_HLSQ_CS_CONFIG_ENABLED);
115 
116 	OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CNTL, 1);
117 	OUT_RING(ring, A5XX_HLSQ_CS_CNTL_INSTRLEN(instrlen) |
118 		COND(v->has_ssbo, A5XX_HLSQ_CS_CNTL_SSBO_ENABLE));
119 
120 	OUT_PKT4(ring, REG_A5XX_SP_CS_CONFIG, 1);
121 	OUT_RING(ring, A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(0) |
122 		A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(0) |
123 		A5XX_SP_CS_CONFIG_ENABLED);
124 
125 	assert(v->constlen % 4 == 0);
126 	unsigned constlen = v->constlen / 4;
127 	OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CONSTLEN, 2);
128 	OUT_RING(ring, constlen);          /* HLSQ_CS_CONSTLEN */
129 	OUT_RING(ring, instrlen);          /* HLSQ_CS_INSTRLEN */
130 
131 	OUT_PKT4(ring, REG_A5XX_SP_CS_OBJ_START_LO, 2);
132 	OUT_RELOC(ring, v->bo, 0, 0, 0);   /* SP_CS_OBJ_START_LO/HI */
133 
134 	OUT_PKT4(ring, REG_A5XX_HLSQ_UPDATE_CNTL, 1);
135 	OUT_RING(ring, 0x1f00000);
136 
137 	uint32_t local_invocation_id, work_group_id;
138 	local_invocation_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
139 	work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
140 
141 	OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CNTL_0, 2);
142 	OUT_RING(ring, A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
143 		A5XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
144 		A5XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
145 		A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
146 	OUT_RING(ring, 0x1);               /* HLSQ_CS_CNTL_1 */
147 
148 	if (instrlen > 0)
149 		fd5_emit_shader(ring, v);
150 }
151 
152 static void
fd5_launch_grid(struct fd_context * ctx,const struct pipe_grid_info * info)153 fd5_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
154 {
155 	struct fd5_compute_stateobj *so = ctx->compute;
156 	struct ir3_shader_key key = {};
157 	struct ir3_shader_variant *v;
158 	struct fd_ringbuffer *ring = ctx->batch->draw;
159 	unsigned nglobal = 0;
160 
161 	v = ir3_shader_variant(so->shader, key, false, &ctx->debug);
162 	if (!v)
163 		return;
164 
165 	if (ctx->dirty_shader[PIPE_SHADER_COMPUTE] & FD_DIRTY_SHADER_PROG)
166 		cs_program_emit(ring, v, info);
167 
168 	fd5_emit_cs_state(ctx, ring, v);
169 	fd5_emit_cs_consts(v, ring, ctx, info);
170 
171 	foreach_bit(i, ctx->global_bindings.enabled_mask)
172 		nglobal++;
173 
174 	if (nglobal > 0) {
175 		/* global resources don't otherwise get an OUT_RELOC(), since
176 		 * the raw ptr address is emitted ir ir3_emit_cs_consts().
177 		 * So to make the kernel aware that these buffers are referenced
178 		 * by the batch, emit dummy reloc's as part of a no-op packet
179 		 * payload:
180 		 */
181 		OUT_PKT7(ring, CP_NOP, 2 * nglobal);
182 		foreach_bit(i, ctx->global_bindings.enabled_mask) {
183 			struct pipe_resource *prsc = ctx->global_bindings.buf[i];
184 			OUT_RELOC(ring, fd_resource(prsc)->bo, 0, 0, 0);
185 		}
186 	}
187 
188 	const unsigned *local_size = info->block; // v->shader->nir->info->cs.local_size;
189 	const unsigned *num_groups = info->grid;
190 	/* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
191 	const unsigned work_dim = info->work_dim ? info->work_dim : 3;
192 	OUT_PKT4(ring, REG_A5XX_HLSQ_CS_NDRANGE_0, 7);
193 	OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
194 		A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
195 		A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
196 		A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
197 	OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
198 	OUT_RING(ring, 0);            /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
199 	OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
200 	OUT_RING(ring, 0);            /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
201 	OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
202 	OUT_RING(ring, 0);            /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
203 
204 	OUT_PKT4(ring, REG_A5XX_HLSQ_CS_KERNEL_GROUP_X, 3);
205 	OUT_RING(ring, 1);            /* HLSQ_CS_KERNEL_GROUP_X */
206 	OUT_RING(ring, 1);            /* HLSQ_CS_KERNEL_GROUP_Y */
207 	OUT_RING(ring, 1);            /* HLSQ_CS_KERNEL_GROUP_Z */
208 
209 	if (info->indirect) {
210 		struct fd_resource *rsc = fd_resource(info->indirect);
211 
212 		fd5_emit_flush(ctx, ring);
213 
214 		OUT_PKT7(ring, CP_EXEC_CS_INDIRECT, 4);
215 		OUT_RING(ring, 0x00000000);
216 		OUT_RELOC(ring, rsc->bo, info->indirect_offset, 0, 0);  /* ADDR_LO/HI */
217 		OUT_RING(ring, A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
218 				A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
219 				A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
220 	} else {
221 		OUT_PKT7(ring, CP_EXEC_CS, 4);
222 		OUT_RING(ring, 0x00000000);
223 		OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(info->grid[0]));
224 		OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(info->grid[1]));
225 		OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(info->grid[2]));
226 	}
227 }
228 
229 void
fd5_compute_init(struct pipe_context * pctx)230 fd5_compute_init(struct pipe_context *pctx)
231 {
232 	struct fd_context *ctx = fd_context(pctx);
233 	ctx->launch_grid = fd5_launch_grid;
234 	pctx->create_compute_state = fd5_create_compute_state;
235 	pctx->delete_compute_state = fd5_delete_compute_state;
236 }
237