1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "si_build_pm4.h"
8 #include "util/u_upload_mgr.h"
9 #include "util/u_viewport.h"
10
11 #define GFX6_MAX_VIEWPORT_SIZE 16384
12 #define GFX12_MAX_VIEWPORT_SIZE 32768 /* TODO: this should be 64K, but maxx/maxy doesn't have enough bits */
13
si_get_small_prim_cull_info(struct si_context * sctx,struct si_small_prim_cull_info * out)14 static void si_get_small_prim_cull_info(struct si_context *sctx, struct si_small_prim_cull_info *out)
15 {
16 /* This is needed by the small primitive culling, because it's done
17 * in screen space.
18 */
19 struct si_small_prim_cull_info info;
20 unsigned num_samples = si_get_num_coverage_samples(sctx);
21 assert(num_samples >= 1);
22
23 info.scale[0] = sctx->viewports.states[0].scale[0];
24 info.scale[1] = sctx->viewports.states[0].scale[1];
25 info.translate[0] = sctx->viewports.states[0].translate[0];
26 info.translate[1] = sctx->viewports.states[0].translate[1];
27
28 /* The viewport shouldn't flip the X axis for the small prim culling to work. */
29 assert(-info.scale[0] + info.translate[0] <= info.scale[0] + info.translate[0]);
30
31 /* Compute the line width used by the rasterizer. */
32 float line_width = sctx->queued.named.rasterizer->line_width;
33 if (num_samples == 1)
34 line_width = roundf(line_width);
35 line_width = MAX2(line_width, 1);
36
37 float half_line_width = line_width * 0.5;
38 if (info.scale[0] == 0 || info.scale[1] == 0) {
39 info.clip_half_line_width[0] = 0;
40 info.clip_half_line_width[1] = 0;
41 } else {
42 info.clip_half_line_width[0] = half_line_width / fabs(info.scale[0]);
43 info.clip_half_line_width[1] = half_line_width / fabs(info.scale[1]);
44 }
45
46 /* If the Y axis is inverted (OpenGL default framebuffer), reverse it.
47 * This is because the viewport transformation inverts the clip space
48 * bounding box, so min becomes max, which breaks small primitive
49 * culling.
50 */
51 if (sctx->viewport0_y_inverted) {
52 info.scale[1] = -info.scale[1];
53 info.translate[1] = -info.translate[1];
54 }
55
56 /* This is what the hardware does. */
57 if (!sctx->queued.named.rasterizer->half_pixel_center) {
58 info.translate[0] += 0.5;
59 info.translate[1] += 0.5;
60 }
61
62 memcpy(info.scale_no_aa, info.scale, sizeof(info.scale));
63 memcpy(info.translate_no_aa, info.translate, sizeof(info.translate));
64
65 /* Scale the framebuffer up, so that samples become pixels and small
66 * primitive culling is the same for all sample counts.
67 * This only works with the standard DX sample positions, because
68 * the samples are evenly spaced on both X and Y axes.
69 */
70 for (unsigned i = 0; i < 2; i++) {
71 info.scale[i] *= num_samples;
72 info.translate[i] *= num_samples;
73 }
74
75 *out = info;
76 }
77
si_emit_cull_state(struct si_context * sctx,unsigned index)78 static void si_emit_cull_state(struct si_context *sctx, unsigned index)
79 {
80 assert(sctx->screen->use_ngg_culling);
81
82 struct si_small_prim_cull_info info;
83 si_get_small_prim_cull_info(sctx, &info);
84
85 if (!sctx->small_prim_cull_info_buf ||
86 memcmp(&info, &sctx->last_small_prim_cull_info, sizeof(info))) {
87 unsigned offset = 0;
88
89 u_upload_data(sctx->b.const_uploader, 0, sizeof(info),
90 si_optimal_tcc_alignment(sctx, sizeof(info)), &info, &offset,
91 (struct pipe_resource **)&sctx->small_prim_cull_info_buf);
92
93 sctx->small_prim_cull_info_address = sctx->small_prim_cull_info_buf->gpu_address + offset;
94 sctx->last_small_prim_cull_info = info;
95 }
96
97 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->small_prim_cull_info_buf,
98 RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER);
99
100 if (sctx->gfx_level >= GFX12) {
101 gfx12_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
102 GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
103 sctx->small_prim_cull_info_address);
104 } else if (sctx->screen->info.has_set_sh_pairs_packed) {
105 gfx11_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
106 GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
107 sctx->small_prim_cull_info_address);
108 } else {
109 radeon_begin(&sctx->gfx_cs);
110 radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
111 sctx->small_prim_cull_info_address);
112 radeon_end();
113 }
114
115 /* Better subpixel precision increases the efficiency of small
116 * primitive culling. (more precision means a tighter bounding box
117 * around primitives and more accurate elimination)
118 */
119 unsigned log_samples = util_logbase2(si_get_num_coverage_samples(sctx));
120 unsigned precision = 7 - sctx->viewports.as_scissor[0].quant_mode * 2 - log_samples;
121 assert((precision & ~0x7) == 0);
122
123 SET_FIELD(sctx->current_gs_state, GS_STATE_SMALL_PRIM_PRECISION, precision);
124 SET_FIELD(sctx->current_gs_state, GS_STATE_SMALL_PRIM_PRECISION_LOG_SAMPLES, log_samples);
125 }
126
si_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * state)127 static void si_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
128 unsigned num_scissors, const struct pipe_scissor_state *state)
129 {
130 struct si_context *ctx = (struct si_context *)pctx;
131 int i;
132
133 for (i = 0; i < num_scissors; i++)
134 ctx->scissors[start_slot + i] = state[i];
135
136 if (!ctx->queued.named.rasterizer->scissor_enable)
137 return;
138
139 si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
140 }
141
142 /* Since the guard band disables clipping, we have to clip per-pixel
143 * using a scissor.
144 */
si_get_scissor_from_viewport(struct si_context * ctx,const struct pipe_viewport_state * vp,struct si_signed_scissor * scissor)145 static void si_get_scissor_from_viewport(struct si_context *ctx,
146 const struct pipe_viewport_state *vp,
147 struct si_signed_scissor *scissor)
148 {
149 float tmp, minx, miny, maxx, maxy;
150
151 /* Convert (-1, -1) and (1, 1) from clip space into window space. */
152 minx = -vp->scale[0] + vp->translate[0];
153 miny = -vp->scale[1] + vp->translate[1];
154 maxx = vp->scale[0] + vp->translate[0];
155 maxy = vp->scale[1] + vp->translate[1];
156
157 /* Handle inverted viewports. */
158 if (minx > maxx) {
159 tmp = minx;
160 minx = maxx;
161 maxx = tmp;
162 }
163 if (miny > maxy) {
164 tmp = miny;
165 miny = maxy;
166 maxy = tmp;
167 }
168
169 /* Convert to integer and round up the max bounds. */
170 scissor->minx = minx;
171 scissor->miny = miny;
172 scissor->maxx = ceilf(maxx);
173 scissor->maxy = ceilf(maxy);
174 }
175
si_clamp_scissor(struct si_context * ctx,struct pipe_scissor_state * out,struct si_signed_scissor * scissor)176 static void si_clamp_scissor(struct si_context *ctx, struct pipe_scissor_state *out,
177 struct si_signed_scissor *scissor)
178 {
179 unsigned max_scissor = ctx->gfx_level >= GFX12 ? GFX12_MAX_VIEWPORT_SIZE : GFX6_MAX_VIEWPORT_SIZE;
180
181 out->minx = CLAMP(scissor->minx, 0, max_scissor);
182 out->miny = CLAMP(scissor->miny, 0, max_scissor);
183 out->maxx = CLAMP(scissor->maxx, 0, max_scissor);
184 out->maxy = CLAMP(scissor->maxy, 0, max_scissor);
185 }
186
si_clip_scissor(struct pipe_scissor_state * out,struct pipe_scissor_state * clip)187 static void si_clip_scissor(struct pipe_scissor_state *out, struct pipe_scissor_state *clip)
188 {
189 out->minx = MAX2(out->minx, clip->minx);
190 out->miny = MAX2(out->miny, clip->miny);
191 out->maxx = MIN2(out->maxx, clip->maxx);
192 out->maxy = MIN2(out->maxy, clip->maxy);
193 }
194
si_scissor_make_union(struct si_signed_scissor * out,struct si_signed_scissor * in)195 static void si_scissor_make_union(struct si_signed_scissor *out, struct si_signed_scissor *in)
196 {
197 out->minx = MIN2(out->minx, in->minx);
198 out->miny = MIN2(out->miny, in->miny);
199 out->maxx = MAX2(out->maxx, in->maxx);
200 out->maxy = MAX2(out->maxy, in->maxy);
201 out->quant_mode = MIN2(out->quant_mode, in->quant_mode);
202 }
203
si_emit_one_scissor(struct si_context * ctx,struct radeon_cmdbuf * cs,struct si_signed_scissor * vp_scissor,struct pipe_scissor_state * scissor)204 static void si_emit_one_scissor(struct si_context *ctx, struct radeon_cmdbuf *cs,
205 struct si_signed_scissor *vp_scissor,
206 struct pipe_scissor_state *scissor)
207 {
208 struct pipe_scissor_state final;
209
210 if (ctx->vs_disables_clipping_viewport) {
211 final.minx = final.miny = 0;
212 final.maxx = final.maxy = ctx->gfx_level >= GFX12 ? GFX12_MAX_VIEWPORT_SIZE : GFX6_MAX_VIEWPORT_SIZE;
213 } else {
214 si_clamp_scissor(ctx, &final, vp_scissor);
215 }
216
217 if (scissor)
218 si_clip_scissor(&final, scissor);
219
220 radeon_begin(cs);
221 if (ctx->gfx_level >= GFX12) {
222 if (final.maxx == 0 || final.maxy == 0) {
223 /* An empty scissor must be done like this because the bottom-right bounds are inclusive. */
224 radeon_emit(S_028250_TL_X(1) | S_028250_TL_Y_GFX12(1));
225 radeon_emit(S_028254_BR_X(0) | S_028254_BR_Y(0));
226 } else {
227 radeon_emit(S_028250_TL_X(final.minx) | S_028250_TL_Y_GFX12(final.miny));
228 radeon_emit(S_028254_BR_X(final.maxx - 1) | S_028254_BR_Y(final.maxy - 1));
229 }
230 } else {
231 /* Workaround for a hw bug on GFX6 that occurs when PA_SU_HARDWARE_SCREEN_OFFSET != 0 and
232 * any_scissor.BR_X/Y <= 0.
233 */
234 if (ctx->gfx_level == GFX6 && (final.maxx == 0 || final.maxy == 0)) {
235 radeon_emit(S_028250_TL_X(1) | S_028250_TL_Y_GFX6(1) | S_028250_WINDOW_OFFSET_DISABLE(1));
236 radeon_emit(S_028254_BR_X(1) | S_028254_BR_Y(1));
237 } else {
238 radeon_emit(S_028250_TL_X(final.minx) | S_028250_TL_Y_GFX6(final.miny) |
239 S_028250_WINDOW_OFFSET_DISABLE(1));
240 radeon_emit(S_028254_BR_X(final.maxx) | S_028254_BR_Y(final.maxy));
241 }
242 }
243 radeon_end();
244 }
245
si_emit_guardband(struct si_context * sctx,unsigned index)246 static void si_emit_guardband(struct si_context *sctx, unsigned index)
247 {
248 const struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
249 struct si_signed_scissor vp_as_scissor;
250 struct pipe_viewport_state vp;
251 float left, top, right, bottom, max_range, guardband_x, guardband_y;
252
253 if (sctx->vs_writes_viewport_index) {
254 /* Shaders can draw to any viewport. Make a union of all
255 * viewports. */
256 vp_as_scissor = sctx->viewports.as_scissor[0];
257 for (unsigned i = 1; i < SI_MAX_VIEWPORTS; i++) {
258 si_scissor_make_union(&vp_as_scissor, &sctx->viewports.as_scissor[i]);
259 }
260 } else {
261 vp_as_scissor = sctx->viewports.as_scissor[0];
262 }
263
264 /* Blits don't set the viewport state. The vertex shader determines
265 * the viewport size by scaling the coordinates, so we don't know
266 * how large the viewport is. Assume the worst case.
267 */
268 if (sctx->vs_disables_clipping_viewport)
269 vp_as_scissor.quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
270
271 /* Determine the optimal hardware screen offset to center the viewport
272 * within the viewport range in order to maximize the guardband size.
273 */
274 int hw_screen_offset_x = (vp_as_scissor.maxx + vp_as_scissor.minx) / 2;
275 int hw_screen_offset_y = (vp_as_scissor.maxy + vp_as_scissor.miny) / 2;
276
277 /* GFX6-GFX7 need to align the offset to an ubertile consisting of all SEs. */
278 const unsigned hw_screen_offset_alignment =
279 sctx->gfx_level >= GFX11 ? 32 :
280 sctx->gfx_level >= GFX8 ? 16 : MAX2(sctx->screen->se_tile_repeat, 16);
281 const unsigned max_hw_screen_offset = sctx->gfx_level >= GFX12 ? 32752 : 8176;
282
283 /* Indexed by quantization modes */
284 static int max_viewport_size[] = {65536, 16384, 4096};
285
286 /* Ensure that the whole viewport stays representable in
287 * absolute coordinates.
288 * See comment in si_set_viewport_states.
289 */
290 assert(vp_as_scissor.maxx <= max_viewport_size[vp_as_scissor.quant_mode] &&
291 vp_as_scissor.maxy <= max_viewport_size[vp_as_scissor.quant_mode]);
292
293 hw_screen_offset_x = CLAMP(hw_screen_offset_x, 0, max_hw_screen_offset);
294 hw_screen_offset_y = CLAMP(hw_screen_offset_y, 0, max_hw_screen_offset);
295
296 /* Align the screen offset by dropping the low bits. */
297 hw_screen_offset_x &= ~(hw_screen_offset_alignment - 1);
298 hw_screen_offset_y &= ~(hw_screen_offset_alignment - 1);
299
300 /* Apply the offset to center the viewport and maximize the guardband. */
301 vp_as_scissor.minx -= hw_screen_offset_x;
302 vp_as_scissor.maxx -= hw_screen_offset_x;
303 vp_as_scissor.miny -= hw_screen_offset_y;
304 vp_as_scissor.maxy -= hw_screen_offset_y;
305
306 /* Reconstruct the viewport transformation from the scissor. */
307 vp.translate[0] = (vp_as_scissor.minx + vp_as_scissor.maxx) / 2.0;
308 vp.translate[1] = (vp_as_scissor.miny + vp_as_scissor.maxy) / 2.0;
309 vp.scale[0] = vp_as_scissor.maxx - vp.translate[0];
310 vp.scale[1] = vp_as_scissor.maxy - vp.translate[1];
311
312 /* Treat a 0x0 viewport as 1x1 to prevent division by zero. */
313 if (vp_as_scissor.minx == vp_as_scissor.maxx)
314 vp.scale[0] = 0.5;
315 if (vp_as_scissor.miny == vp_as_scissor.maxy)
316 vp.scale[1] = 0.5;
317
318 /* Find the biggest guard band that is inside the supported viewport
319 * range. The guard band is specified as a horizontal and vertical
320 * distance from (0,0) in clip space.
321 *
322 * This is done by applying the inverse viewport transformation
323 * on the viewport limits to get those limits in clip space.
324 *
325 * The viewport range is [-max_viewport_size/2 - 1, max_viewport_size/2].
326 * (-1 to the min coord because max_viewport_size is odd and ViewportBounds
327 * Min/Max are -32768, 32767).
328 */
329 assert(vp_as_scissor.quant_mode < ARRAY_SIZE(max_viewport_size));
330 max_range = max_viewport_size[vp_as_scissor.quant_mode] / 2;
331 left = (-max_range - 1 - vp.translate[0]) / vp.scale[0];
332 right = (max_range - vp.translate[0]) / vp.scale[0];
333 top = (-max_range - 1 - vp.translate[1]) / vp.scale[1];
334 bottom = (max_range - vp.translate[1]) / vp.scale[1];
335
336 assert(left <= -1 && top <= -1 && right >= 1 && bottom >= 1);
337
338 guardband_x = MIN2(-left, right);
339 guardband_y = MIN2(-top, bottom);
340
341 float discard_x = 1.0;
342 float discard_y = 1.0;
343 float distance = sctx->current_clip_discard_distance;
344
345 /* Add half the point size / line width */
346 discard_x += distance / (2.0 * vp.scale[0]);
347 discard_y += distance / (2.0 * vp.scale[1]);
348
349 /* Discard primitives that would lie entirely outside the viewport area. */
350 discard_x = MIN2(discard_x, guardband_x);
351 discard_y = MIN2(discard_y, guardband_y);
352
353 unsigned pa_su_vtx_cntl = S_028BE4_PIX_CENTER(rs->half_pixel_center) |
354 S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
355 S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH +
356 vp_as_scissor.quant_mode);
357 unsigned pa_su_hardware_screen_offset = S_028234_HW_SCREEN_OFFSET_X(hw_screen_offset_x >> 4) |
358 S_028234_HW_SCREEN_OFFSET_Y(hw_screen_offset_y >> 4);
359
360 /* If any of the GB registers is updated, all of them must be updated.
361 * R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, R_028BEC_PA_CL_GB_VERT_DISC_ADJ
362 * R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ, R_028BF4_PA_CL_GB_HORZ_DISC_ADJ
363 */
364 if (sctx->gfx_level >= GFX12) {
365 radeon_begin(&sctx->gfx_cs);
366 gfx12_begin_context_regs();
367 gfx12_opt_set_context_reg(R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
368 pa_su_vtx_cntl);
369 gfx12_opt_set_context_reg4(R_02842C_PA_CL_GB_VERT_CLIP_ADJ,
370 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
371 fui(guardband_y), fui(discard_y),
372 fui(guardband_x), fui(discard_x));
373 gfx12_opt_set_context_reg(R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
374 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
375 pa_su_hardware_screen_offset);
376 gfx12_end_context_regs();
377 radeon_end(); /* don't track context rolls on GFX12 */
378 } else if (sctx->screen->info.has_set_context_pairs_packed) {
379 radeon_begin(&sctx->gfx_cs);
380 gfx11_begin_packed_context_regs();
381 gfx11_opt_set_context_reg(R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
382 pa_su_vtx_cntl);
383 gfx11_opt_set_context_reg4(R_028BE8_PA_CL_GB_VERT_CLIP_ADJ,
384 SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
385 fui(guardband_y), fui(discard_y),
386 fui(guardband_x), fui(discard_x));
387 gfx11_opt_set_context_reg(R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
388 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
389 pa_su_hardware_screen_offset);
390 gfx11_end_packed_context_regs();
391 radeon_end(); /* don't track context rolls on GFX11 */
392 } else {
393 radeon_begin(&sctx->gfx_cs);
394 radeon_opt_set_context_reg5(R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
395 pa_su_vtx_cntl,
396 fui(guardband_y), fui(discard_y),
397 fui(guardband_x), fui(discard_x));
398 radeon_opt_set_context_reg(R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
399 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
400 pa_su_hardware_screen_offset);
401 radeon_end_update_context_roll();
402 }
403 }
404
si_emit_scissors(struct si_context * ctx,unsigned index)405 static void si_emit_scissors(struct si_context *ctx, unsigned index)
406 {
407 struct radeon_cmdbuf *cs = &ctx->gfx_cs;
408 struct pipe_scissor_state *states = ctx->scissors;
409 bool scissor_enabled = ctx->queued.named.rasterizer->scissor_enable;
410
411 /* The simple case: Only 1 viewport is active. */
412 if (!ctx->vs_writes_viewport_index) {
413 struct si_signed_scissor *vp = &ctx->viewports.as_scissor[0];
414
415 radeon_begin(cs);
416 radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
417 radeon_end();
418
419 si_emit_one_scissor(ctx, cs, vp, scissor_enabled ? &states[0] : NULL);
420 return;
421 }
422
423 /* All registers in the array need to be updated if any of them is changed.
424 * This is a hardware requirement.
425 */
426 radeon_begin(cs);
427 radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, SI_MAX_VIEWPORTS * 2);
428 radeon_end();
429
430 for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
431 si_emit_one_scissor(ctx, cs, &ctx->viewports.as_scissor[i],
432 scissor_enabled ? &states[i] : NULL);
433 }
434 }
435
si_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)436 static void si_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
437 unsigned num_viewports, const struct pipe_viewport_state *state)
438 {
439 struct si_context *ctx = (struct si_context *)pctx;
440 int i;
441
442 for (i = 0; i < num_viewports; i++) {
443 unsigned index = start_slot + i;
444 struct si_signed_scissor *scissor = &ctx->viewports.as_scissor[index];
445
446 ctx->viewports.states[index] = state[i];
447
448 si_get_scissor_from_viewport(ctx, &state[i], scissor);
449
450 int max_corner = MAX2(
451 MAX2(abs(scissor->maxx), abs(scissor->maxy)),
452 MAX2(abs(scissor->minx), abs(scissor->miny)));
453
454 /* Determine the best quantization mode (subpixel precision),
455 * but also leave enough space for the guardband.
456 *
457 * Note that primitive binning requires QUANT_MODE == 16_8 on Vega10
458 * and Raven1 for line and rectangle primitive types to work correctly.
459 * Always use 16_8 if primitive binning is possible to occur.
460 */
461 if ((ctx->family == CHIP_VEGA10 || ctx->family == CHIP_RAVEN) && ctx->screen->dpbb_allowed)
462 max_corner = 16384; /* Use QUANT_MODE == 16_8. */
463
464 /* Another constraint is that all coordinates in the viewport
465 * are representable in fixed point with respect to the
466 * surface origin.
467 *
468 * It means that PA_SU_HARDWARE_SCREEN_OFFSET can't be given
469 * an offset that would make the upper corner of the viewport
470 * greater than the maximum representable number post
471 * quantization, ie 2^quant_bits.
472 *
473 * This does not matter for 14.10 and 16.8 formats since the
474 * offset is already limited at 8k, but it means we can't use
475 * 12.12 if we are drawing to some pixels outside the lower
476 * 4k x 4k of the render target.
477 */
478
479 if (max_corner <= 1024) /* 4K scanline area for guardband */
480 scissor->quant_mode = SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH;
481 else if (max_corner <= 4096) /* 16K scanline area for guardband */
482 scissor->quant_mode = SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH;
483 else /* 64K scanline area for guardband */
484 scissor->quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
485 }
486
487 if (start_slot == 0) {
488 ctx->viewport0_y_inverted = state->scale[1] < 0;
489 si_update_ngg_cull_face_state(ctx);
490
491 /* NGG cull state uses the viewport and quant mode. */
492 if (ctx->screen->use_ngg_culling)
493 si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
494 }
495
496 si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
497 si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
498 si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
499 }
500
gfx6_emit_one_viewport(struct si_context * ctx,struct pipe_viewport_state * state)501 static void gfx6_emit_one_viewport(struct si_context *ctx, struct pipe_viewport_state *state)
502 {
503 struct radeon_cmdbuf *cs = &ctx->gfx_cs;
504
505 radeon_begin(cs);
506 radeon_emit(fui(state->scale[0]));
507 radeon_emit(fui(state->translate[0]));
508 radeon_emit(fui(state->scale[1]));
509 radeon_emit(fui(state->translate[1]));
510 radeon_emit(fui(state->scale[2]));
511 radeon_emit(fui(state->translate[2]));
512 radeon_end();
513 }
514
gfx6_emit_viewports(struct si_context * ctx)515 static void gfx6_emit_viewports(struct si_context *ctx)
516 {
517 struct radeon_cmdbuf *cs = &ctx->gfx_cs;
518 struct pipe_viewport_state *states = ctx->viewports.states;
519
520 /* The simple case: Only 1 viewport is active. */
521 if (!ctx->vs_writes_viewport_index) {
522 radeon_begin(cs);
523 radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, 6);
524 radeon_end();
525
526 gfx6_emit_one_viewport(ctx, &states[0]);
527 return;
528 }
529
530 /* All registers in the array need to be updated if any of them is changed.
531 * This is a hardware requirement.
532 */
533 radeon_begin(cs);
534 radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE + 0, SI_MAX_VIEWPORTS * 6);
535 radeon_end();
536
537 for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++)
538 gfx6_emit_one_viewport(ctx, &states[i]);
539 }
540
si_viewport_zmin_zmax(const struct pipe_viewport_state * vp,bool halfz,bool window_space_position,float * zmin,float * zmax)541 static inline void si_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
542 bool window_space_position, float *zmin, float *zmax)
543 {
544 if (window_space_position) {
545 *zmin = 0;
546 *zmax = 1;
547 return;
548 }
549 util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
550 }
551
gfx6_emit_depth_ranges(struct si_context * ctx)552 static void gfx6_emit_depth_ranges(struct si_context *ctx)
553 {
554 struct radeon_cmdbuf *cs = &ctx->gfx_cs;
555 struct pipe_viewport_state *states = ctx->viewports.states;
556 bool clip_halfz = ctx->queued.named.rasterizer->clip_halfz;
557 bool window_space = ctx->vs_disables_clipping_viewport;
558 float zmin, zmax;
559
560 /* The simple case: Only 1 viewport is active. */
561 if (!ctx->vs_writes_viewport_index) {
562 si_viewport_zmin_zmax(&states[0], clip_halfz, window_space, &zmin, &zmax);
563
564 radeon_begin(cs);
565 radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, 2);
566 radeon_emit(fui(zmin));
567 radeon_emit(fui(zmax));
568 radeon_end();
569 return;
570 }
571
572 /* All registers in the array need to be updated if any of them is changed.
573 * This is a hardware requirement.
574 */
575 radeon_begin(cs);
576 radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, SI_MAX_VIEWPORTS * 2);
577 for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
578 si_viewport_zmin_zmax(&states[i], clip_halfz, window_space, &zmin, &zmax);
579 radeon_emit(fui(zmin));
580 radeon_emit(fui(zmax));
581 }
582 radeon_end();
583 }
584
gfx6_emit_viewport_states(struct si_context * ctx,unsigned index)585 static void gfx6_emit_viewport_states(struct si_context *ctx, unsigned index)
586 {
587 gfx6_emit_viewports(ctx);
588 gfx6_emit_depth_ranges(ctx);
589 }
590
gfx12_emit_viewport_states(struct si_context * ctx,unsigned index)591 static void gfx12_emit_viewport_states(struct si_context *ctx, unsigned index)
592 {
593 struct radeon_cmdbuf *cs = &ctx->gfx_cs;
594 struct pipe_viewport_state *states = ctx->viewports.states;
595 bool clip_halfz = ctx->queued.named.rasterizer->clip_halfz;
596 bool window_space = ctx->vs_disables_clipping_viewport;
597 float zmin, zmax;
598
599 /* The simple case: Only 1 viewport is active. */
600 if (!ctx->vs_writes_viewport_index) {
601 si_viewport_zmin_zmax(&states[0], clip_halfz, window_space, &zmin, &zmax);
602
603 radeon_begin(cs);
604 radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, 8);
605 radeon_emit(fui(states[0].scale[0]));
606 radeon_emit(fui(states[0].translate[0]));
607 radeon_emit(fui(states[0].scale[1]));
608 radeon_emit(fui(states[0].translate[1]));
609 radeon_emit(fui(states[0].scale[2]));
610 radeon_emit(fui(states[0].translate[2]));
611 radeon_emit(fui(zmin));
612 radeon_emit(fui(zmax));
613 radeon_end();
614 return;
615 }
616
617 /* All registers in the array need to be updated if any of them is changed.
618 * This is (or was) a hardware requirement.
619 */
620 radeon_begin(cs);
621 radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, SI_MAX_VIEWPORTS * 8);
622
623 for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
624 si_viewport_zmin_zmax(&states[i], clip_halfz, window_space, &zmin, &zmax);
625
626 radeon_emit(fui(states[i].scale[0]));
627 radeon_emit(fui(states[i].translate[0]));
628 radeon_emit(fui(states[i].scale[1]));
629 radeon_emit(fui(states[i].translate[1]));
630 radeon_emit(fui(states[i].scale[2]));
631 radeon_emit(fui(states[i].translate[2]));
632 radeon_emit(fui(zmin));
633 radeon_emit(fui(zmax));
634 }
635 radeon_end();
636 }
637
638 /**
639 * This reacts to 2 state changes:
640 * - VS.writes_viewport_index
641 * - VS output position in window space (enable/disable)
642 *
643 * Normally, we only emit 1 viewport and 1 scissor if no shader is using
644 * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
645 * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
646 * called to emit the rest.
647 */
si_update_vs_viewport_state(struct si_context * ctx)648 void si_update_vs_viewport_state(struct si_context *ctx)
649 {
650 struct si_shader_ctx_state *vs = si_get_vs(ctx);
651 struct si_shader_info *info = vs->cso ? &vs->cso->info : NULL;
652 bool vs_window_space;
653
654 if (!info)
655 return;
656
657 /* When the VS disables clipping and viewport transformation. */
658 vs_window_space = vs->cso->stage == MESA_SHADER_VERTEX && info->base.vs.window_space_position;
659
660 if (ctx->vs_disables_clipping_viewport != vs_window_space) {
661 ctx->vs_disables_clipping_viewport = vs_window_space;
662 si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
663 si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
664 si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
665 }
666
667 /* Viewport index handling. */
668 if (ctx->vs_writes_viewport_index == info->writes_viewport_index)
669 return;
670
671 /* This changes how the guardband is computed. */
672 ctx->vs_writes_viewport_index = info->writes_viewport_index;
673 si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
674
675 /* Emit scissors and viewports that were enabled by having
676 * the ViewportIndex output.
677 */
678 if (info->writes_viewport_index) {
679 si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
680 si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
681 }
682 }
683
si_emit_window_rectangles(struct si_context * sctx,unsigned index)684 static void si_emit_window_rectangles(struct si_context *sctx, unsigned index)
685 {
686 /* There are four clipping rectangles. Their corner coordinates are inclusive.
687 * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
688 * on whether the pixel is inside cliprects 0-3, respectively. For example,
689 * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
690 * the number 3 (binary 0011).
691 *
692 * If CLIPRECT_RULE & (1 << number), the pixel is rasterized.
693 */
694 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
695 static const unsigned outside[4] = {
696 /* outside rectangle 0 */
697 V_02820C_OUT | V_02820C_IN_1 | V_02820C_IN_2 | V_02820C_IN_21 | V_02820C_IN_3 |
698 V_02820C_IN_31 | V_02820C_IN_32 | V_02820C_IN_321,
699 /* outside rectangles 0, 1 */
700 V_02820C_OUT | V_02820C_IN_2 | V_02820C_IN_3 | V_02820C_IN_32,
701 /* outside rectangles 0, 1, 2 */
702 V_02820C_OUT | V_02820C_IN_3,
703 /* outside rectangles 0, 1, 2, 3 */
704 V_02820C_OUT,
705 };
706 const unsigned disabled = 0xffff; /* all inside and outside cases */
707 unsigned num_rectangles = sctx->num_window_rectangles;
708 struct pipe_scissor_state *rects = sctx->window_rectangles;
709 unsigned rule;
710
711 assert(num_rectangles <= 4);
712
713 if (num_rectangles == 0)
714 rule = disabled;
715 else if (sctx->window_rectangles_include)
716 rule = ~outside[num_rectangles - 1];
717 else
718 rule = outside[num_rectangles - 1];
719
720 if (sctx->gfx_level >= GFX12) {
721 radeon_begin(cs);
722 gfx12_begin_context_regs();
723 gfx12_opt_set_context_reg(R_02820C_PA_SC_CLIPRECT_RULE, SI_TRACKED_PA_SC_CLIPRECT_RULE, rule);
724
725 if (num_rectangles) {
726 for (unsigned i = 0; i < num_rectangles; i++) {
727 gfx12_set_context_reg(R_028210_PA_SC_CLIPRECT_0_TL + i * 8,
728 S_028210_TL_X(rects[i].minx) | S_028210_TL_Y(rects[i].miny));
729 gfx12_set_context_reg(R_028214_PA_SC_CLIPRECT_0_BR + i * 8,
730 S_028214_BR_X(rects[i].maxx) | S_028214_BR_Y(rects[i].maxy));
731 }
732
733 for (unsigned i = 0; i < num_rectangles; i++) {
734 gfx12_set_context_reg(R_028374_PA_SC_CLIPRECT_0_EXT + i * 4,
735 S_028374_TL_X_EXT(rects[i].minx >> 15) |
736 S_028374_TL_Y_EXT(rects[i].miny >> 15) |
737 S_028374_BR_X_EXT(rects[i].maxx >> 15) |
738 S_028374_BR_Y_EXT(rects[i].maxy >> 15));
739 }
740 }
741 gfx12_end_context_regs();
742 radeon_end();
743 } else {
744 radeon_begin(cs);
745 radeon_opt_set_context_reg(R_02820C_PA_SC_CLIPRECT_RULE, SI_TRACKED_PA_SC_CLIPRECT_RULE,
746 rule);
747 if (num_rectangles) {
748 radeon_set_context_reg_seq(R_028210_PA_SC_CLIPRECT_0_TL, num_rectangles * 2);
749 for (unsigned i = 0; i < num_rectangles; i++) {
750 radeon_emit(S_028210_TL_X(rects[i].minx) | S_028210_TL_Y(rects[i].miny));
751 radeon_emit(S_028214_BR_X(rects[i].maxx) | S_028214_BR_Y(rects[i].maxy));
752 }
753 }
754 radeon_end();
755 }
756 }
757
si_set_window_rectangles(struct pipe_context * ctx,bool include,unsigned num_rectangles,const struct pipe_scissor_state * rects)758 static void si_set_window_rectangles(struct pipe_context *ctx, bool include,
759 unsigned num_rectangles,
760 const struct pipe_scissor_state *rects)
761 {
762 struct si_context *sctx = (struct si_context *)ctx;
763
764 sctx->num_window_rectangles = num_rectangles;
765 sctx->window_rectangles_include = include;
766 if (num_rectangles) {
767 memcpy(sctx->window_rectangles, rects, sizeof(*rects) * num_rectangles);
768 }
769
770 si_mark_atom_dirty(sctx, &sctx->atoms.s.window_rectangles);
771 }
772
si_init_viewport_functions(struct si_context * ctx)773 void si_init_viewport_functions(struct si_context *ctx)
774 {
775 ctx->atoms.s.guardband.emit = si_emit_guardband;
776 ctx->atoms.s.scissors.emit = si_emit_scissors;
777 if (ctx->gfx_level >= GFX12)
778 ctx->atoms.s.viewports.emit = gfx12_emit_viewport_states;
779 else
780 ctx->atoms.s.viewports.emit = gfx6_emit_viewport_states;
781 ctx->atoms.s.window_rectangles.emit = si_emit_window_rectangles;
782 ctx->atoms.s.ngg_cull_state.emit = si_emit_cull_state;
783
784 ctx->b.set_scissor_states = si_set_scissor_states;
785 ctx->b.set_viewport_states = si_set_viewport_states;
786 ctx->b.set_window_rectangles = si_set_window_rectangles;
787
788 for (unsigned i = 0; i < 16; i++)
789 ctx->viewports.as_scissor[i].quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
790 }
791