• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "si_build_pm4.h"
8 #include "util/u_upload_mgr.h"
9 #include "util/u_viewport.h"
10 
11 #define SI_MAX_SCISSOR 16384
12 
si_get_small_prim_cull_info(struct si_context * sctx,struct si_small_prim_cull_info * out)13 static void si_get_small_prim_cull_info(struct si_context *sctx, struct si_small_prim_cull_info *out)
14 {
15    /* This is needed by the small primitive culling, because it's done
16     * in screen space.
17     */
18    struct si_small_prim_cull_info info;
19    unsigned num_samples = si_get_num_coverage_samples(sctx);
20    assert(num_samples >= 1);
21 
22    info.scale[0] = sctx->viewports.states[0].scale[0];
23    info.scale[1] = sctx->viewports.states[0].scale[1];
24    info.translate[0] = sctx->viewports.states[0].translate[0];
25    info.translate[1] = sctx->viewports.states[0].translate[1];
26 
27    /* The viewport shouldn't flip the X axis for the small prim culling to work. */
28    assert(-info.scale[0] + info.translate[0] <= info.scale[0] + info.translate[0]);
29 
30    /* Compute the line width used by the rasterizer. */
31    float line_width = sctx->queued.named.rasterizer->line_width;
32    if (num_samples == 1)
33       line_width = roundf(line_width);
34    line_width = MAX2(line_width, 1);
35 
36    float half_line_width = line_width * 0.5;
37    if (info.scale[0] == 0 || info.scale[1] == 0) {
38      info.clip_half_line_width[0] = 0;
39      info.clip_half_line_width[1] = 0;
40    } else {
41      info.clip_half_line_width[0] = half_line_width / fabs(info.scale[0]);
42      info.clip_half_line_width[1] = half_line_width / fabs(info.scale[1]);
43    }
44 
45    /* If the Y axis is inverted (OpenGL default framebuffer), reverse it.
46     * This is because the viewport transformation inverts the clip space
47     * bounding box, so min becomes max, which breaks small primitive
48     * culling.
49     */
50    if (sctx->viewport0_y_inverted) {
51       info.scale[1] = -info.scale[1];
52       info.translate[1] = -info.translate[1];
53    }
54 
55    /* This is what the hardware does. */
56    if (!sctx->queued.named.rasterizer->half_pixel_center) {
57       info.translate[0] += 0.5;
58       info.translate[1] += 0.5;
59    }
60 
61    memcpy(info.scale_no_aa, info.scale, sizeof(info.scale));
62    memcpy(info.translate_no_aa, info.translate, sizeof(info.translate));
63 
64    /* Scale the framebuffer up, so that samples become pixels and small
65     * primitive culling is the same for all sample counts.
66     * This only works with the standard DX sample positions, because
67     * the samples are evenly spaced on both X and Y axes.
68     */
69    for (unsigned i = 0; i < 2; i++) {
70       info.scale[i] *= num_samples;
71       info.translate[i] *= num_samples;
72    }
73 
74    *out = info;
75 }
76 
si_emit_cull_state(struct si_context * sctx,unsigned index)77 static void si_emit_cull_state(struct si_context *sctx, unsigned index)
78 {
79    assert(sctx->screen->use_ngg_culling);
80 
81    struct si_small_prim_cull_info info;
82    si_get_small_prim_cull_info(sctx, &info);
83 
84    if (!sctx->small_prim_cull_info_buf ||
85        memcmp(&info, &sctx->last_small_prim_cull_info, sizeof(info))) {
86       unsigned offset = 0;
87 
88       u_upload_data(sctx->b.const_uploader, 0, sizeof(info),
89                     si_optimal_tcc_alignment(sctx, sizeof(info)), &info, &offset,
90                     (struct pipe_resource **)&sctx->small_prim_cull_info_buf);
91 
92       sctx->small_prim_cull_info_address = sctx->small_prim_cull_info_buf->gpu_address + offset;
93       sctx->last_small_prim_cull_info = info;
94    }
95 
96    /* This will end up in SGPR6 as (value << 8), shifted by the hw. */
97    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->small_prim_cull_info_buf,
98                              RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER);
99 
100    if (sctx->screen->info.has_set_sh_pairs_packed) {
101       gfx11_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
102                             GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
103                             sctx->small_prim_cull_info_address);
104    } else {
105       radeon_begin(&sctx->gfx_cs);
106       radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
107                         sctx->small_prim_cull_info_address);
108       radeon_end();
109    }
110 
111    /* Better subpixel precision increases the efficiency of small
112     * primitive culling. (more precision means a tighter bounding box
113     * around primitives and more accurate elimination)
114     */
115    unsigned quant_mode = sctx->viewports.as_scissor[0].quant_mode;
116    float small_prim_precision_no_aa = 0;
117    unsigned num_samples = si_get_num_coverage_samples(sctx);
118 
119    if (quant_mode == SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH)
120       small_prim_precision_no_aa = 1.0 / 4096.0;
121    else if (quant_mode == SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH)
122       small_prim_precision_no_aa = 1.0 / 1024.0;
123    else
124       small_prim_precision_no_aa = 1.0 / 256.0;
125 
126    float small_prim_precision = num_samples * small_prim_precision_no_aa;
127 
128    /* Set VS_STATE.SMALL_PRIM_PRECISION for NGG culling.
129     *
130     * small_prim_precision is 1 / 2^n. We only need n between 5 (1/32) and 12 (1/4096).
131     * Such a floating point value can be packed into 4 bits as follows:
132     * If we pass the first 4 bits of the exponent to the shader and set the next 3 bits
133     * to 1, we'll get the number exactly because all other bits are always 0. See:
134     *                                                               1
135     * value  =  (0x70 | value.exponent[0:3]) << 23  =  ------------------------------
136     *                                                  2 ^ (15 - value.exponent[0:3])
137     *
138     * So pass only the first 4 bits of the float exponent to the shader.
139     */
140    SET_FIELD(sctx->current_gs_state, GS_STATE_SMALL_PRIM_PRECISION_NO_AA,
141              (fui(small_prim_precision_no_aa) >> 23) & 0xf);
142    SET_FIELD(sctx->current_gs_state, GS_STATE_SMALL_PRIM_PRECISION,
143              (fui(small_prim_precision) >> 23) & 0xf);
144 }
145 
si_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * state)146 static void si_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
147                                   unsigned num_scissors, const struct pipe_scissor_state *state)
148 {
149    struct si_context *ctx = (struct si_context *)pctx;
150    int i;
151 
152    for (i = 0; i < num_scissors; i++)
153       ctx->scissors[start_slot + i] = state[i];
154 
155    if (!ctx->queued.named.rasterizer->scissor_enable)
156       return;
157 
158    si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
159 }
160 
161 /* Since the guard band disables clipping, we have to clip per-pixel
162  * using a scissor.
163  */
si_get_scissor_from_viewport(struct si_context * ctx,const struct pipe_viewport_state * vp,struct si_signed_scissor * scissor)164 static void si_get_scissor_from_viewport(struct si_context *ctx,
165                                          const struct pipe_viewport_state *vp,
166                                          struct si_signed_scissor *scissor)
167 {
168    float tmp, minx, miny, maxx, maxy;
169 
170    /* Convert (-1, -1) and (1, 1) from clip space into window space. */
171    minx = -vp->scale[0] + vp->translate[0];
172    miny = -vp->scale[1] + vp->translate[1];
173    maxx = vp->scale[0] + vp->translate[0];
174    maxy = vp->scale[1] + vp->translate[1];
175 
176    /* Handle inverted viewports. */
177    if (minx > maxx) {
178       tmp = minx;
179       minx = maxx;
180       maxx = tmp;
181    }
182    if (miny > maxy) {
183       tmp = miny;
184       miny = maxy;
185       maxy = tmp;
186    }
187 
188    /* Convert to integer and round up the max bounds. */
189    scissor->minx = minx;
190    scissor->miny = miny;
191    scissor->maxx = ceilf(maxx);
192    scissor->maxy = ceilf(maxy);
193 }
194 
si_clamp_scissor(struct si_context * ctx,struct pipe_scissor_state * out,struct si_signed_scissor * scissor)195 static void si_clamp_scissor(struct si_context *ctx, struct pipe_scissor_state *out,
196                              struct si_signed_scissor *scissor)
197 {
198    out->minx = CLAMP(scissor->minx, 0, SI_MAX_SCISSOR);
199    out->miny = CLAMP(scissor->miny, 0, SI_MAX_SCISSOR);
200    out->maxx = CLAMP(scissor->maxx, 0, SI_MAX_SCISSOR);
201    out->maxy = CLAMP(scissor->maxy, 0, SI_MAX_SCISSOR);
202 }
203 
si_clip_scissor(struct pipe_scissor_state * out,struct pipe_scissor_state * clip)204 static void si_clip_scissor(struct pipe_scissor_state *out, struct pipe_scissor_state *clip)
205 {
206    out->minx = MAX2(out->minx, clip->minx);
207    out->miny = MAX2(out->miny, clip->miny);
208    out->maxx = MIN2(out->maxx, clip->maxx);
209    out->maxy = MIN2(out->maxy, clip->maxy);
210 }
211 
si_scissor_make_union(struct si_signed_scissor * out,struct si_signed_scissor * in)212 static void si_scissor_make_union(struct si_signed_scissor *out, struct si_signed_scissor *in)
213 {
214    out->minx = MIN2(out->minx, in->minx);
215    out->miny = MIN2(out->miny, in->miny);
216    out->maxx = MAX2(out->maxx, in->maxx);
217    out->maxy = MAX2(out->maxy, in->maxy);
218    out->quant_mode = MIN2(out->quant_mode, in->quant_mode);
219 }
220 
si_emit_one_scissor(struct si_context * ctx,struct radeon_cmdbuf * cs,struct si_signed_scissor * vp_scissor,struct pipe_scissor_state * scissor)221 static void si_emit_one_scissor(struct si_context *ctx, struct radeon_cmdbuf *cs,
222                                 struct si_signed_scissor *vp_scissor,
223                                 struct pipe_scissor_state *scissor)
224 {
225    struct pipe_scissor_state final;
226 
227    if (ctx->vs_disables_clipping_viewport) {
228       final.minx = final.miny = 0;
229       final.maxx = final.maxy = SI_MAX_SCISSOR;
230    } else {
231       si_clamp_scissor(ctx, &final, vp_scissor);
232    }
233 
234    if (scissor)
235       si_clip_scissor(&final, scissor);
236 
237    radeon_begin(cs);
238    /* Workaround for a hw bug on GFX6 that occurs when PA_SU_HARDWARE_SCREEN_OFFSET != 0 and
239     * any_scissor.BR_X/Y <= 0.
240     */
241    if (ctx->gfx_level == GFX6 && (final.maxx == 0 || final.maxy == 0)) {
242       radeon_emit(S_028250_TL_X(1) | S_028250_TL_Y(1) | S_028250_WINDOW_OFFSET_DISABLE(1));
243       radeon_emit(S_028254_BR_X(1) | S_028254_BR_Y(1));
244    } else {
245       radeon_emit(S_028250_TL_X(final.minx) | S_028250_TL_Y(final.miny) |
246                   S_028250_WINDOW_OFFSET_DISABLE(1));
247       radeon_emit(S_028254_BR_X(final.maxx) | S_028254_BR_Y(final.maxy));
248    }
249    radeon_end();
250 }
251 
si_emit_guardband(struct si_context * sctx,unsigned index)252 static void si_emit_guardband(struct si_context *sctx, unsigned index)
253 {
254    const struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
255    struct si_signed_scissor vp_as_scissor;
256    struct pipe_viewport_state vp;
257    float left, top, right, bottom, max_range, guardband_x, guardband_y;
258 
259    if (sctx->vs_writes_viewport_index) {
260       /* Shaders can draw to any viewport. Make a union of all
261        * viewports. */
262       vp_as_scissor = sctx->viewports.as_scissor[0];
263       for (unsigned i = 1; i < SI_MAX_VIEWPORTS; i++) {
264          si_scissor_make_union(&vp_as_scissor, &sctx->viewports.as_scissor[i]);
265       }
266    } else {
267       vp_as_scissor = sctx->viewports.as_scissor[0];
268    }
269 
270    /* Blits don't set the viewport state. The vertex shader determines
271     * the viewport size by scaling the coordinates, so we don't know
272     * how large the viewport is. Assume the worst case.
273     */
274    if (sctx->vs_disables_clipping_viewport)
275       vp_as_scissor.quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
276 
277    /* Determine the optimal hardware screen offset to center the viewport
278     * within the viewport range in order to maximize the guardband size.
279     */
280    int hw_screen_offset_x = (vp_as_scissor.maxx + vp_as_scissor.minx) / 2;
281    int hw_screen_offset_y = (vp_as_scissor.maxy + vp_as_scissor.miny) / 2;
282 
283    /* GFX6-GFX7 need to align the offset to an ubertile consisting of all SEs. */
284    const unsigned hw_screen_offset_alignment =
285       sctx->gfx_level >= GFX11 ? 32 :
286       sctx->gfx_level >= GFX8 ? 16 : MAX2(sctx->screen->se_tile_repeat, 16);
287    const unsigned max_hw_screen_offset = 8176;
288 
289    /* Indexed by quantization modes */
290    static int max_viewport_size[] = {65536, 16384, 4096};
291 
292    /* Ensure that the whole viewport stays representable in
293     * absolute coordinates.
294     * See comment in si_set_viewport_states.
295     */
296    assert(vp_as_scissor.maxx <= max_viewport_size[vp_as_scissor.quant_mode] &&
297           vp_as_scissor.maxy <= max_viewport_size[vp_as_scissor.quant_mode]);
298 
299    hw_screen_offset_x = CLAMP(hw_screen_offset_x, 0, max_hw_screen_offset);
300    hw_screen_offset_y = CLAMP(hw_screen_offset_y, 0, max_hw_screen_offset);
301 
302    /* Align the screen offset by dropping the low bits. */
303    hw_screen_offset_x &= ~(hw_screen_offset_alignment - 1);
304    hw_screen_offset_y &= ~(hw_screen_offset_alignment - 1);
305 
306    /* Apply the offset to center the viewport and maximize the guardband. */
307    vp_as_scissor.minx -= hw_screen_offset_x;
308    vp_as_scissor.maxx -= hw_screen_offset_x;
309    vp_as_scissor.miny -= hw_screen_offset_y;
310    vp_as_scissor.maxy -= hw_screen_offset_y;
311 
312    /* Reconstruct the viewport transformation from the scissor. */
313    vp.translate[0] = (vp_as_scissor.minx + vp_as_scissor.maxx) / 2.0;
314    vp.translate[1] = (vp_as_scissor.miny + vp_as_scissor.maxy) / 2.0;
315    vp.scale[0] = vp_as_scissor.maxx - vp.translate[0];
316    vp.scale[1] = vp_as_scissor.maxy - vp.translate[1];
317 
318    /* Treat a 0x0 viewport as 1x1 to prevent division by zero. */
319    if (vp_as_scissor.minx == vp_as_scissor.maxx)
320       vp.scale[0] = 0.5;
321    if (vp_as_scissor.miny == vp_as_scissor.maxy)
322       vp.scale[1] = 0.5;
323 
324    /* Find the biggest guard band that is inside the supported viewport
325     * range. The guard band is specified as a horizontal and vertical
326     * distance from (0,0) in clip space.
327     *
328     * This is done by applying the inverse viewport transformation
329     * on the viewport limits to get those limits in clip space.
330     *
331     * The viewport range is [-max_viewport_size/2 - 1, max_viewport_size/2].
332     * (-1 to the min coord because max_viewport_size is odd and ViewportBounds
333     * Min/Max are -32768, 32767).
334     */
335    assert(vp_as_scissor.quant_mode < ARRAY_SIZE(max_viewport_size));
336    max_range = max_viewport_size[vp_as_scissor.quant_mode] / 2;
337    left = (-max_range - 1 - vp.translate[0]) / vp.scale[0];
338    right = (max_range - vp.translate[0]) / vp.scale[0];
339    top = (-max_range - 1 - vp.translate[1]) / vp.scale[1];
340    bottom = (max_range - vp.translate[1]) / vp.scale[1];
341 
342    assert(left <= -1 && top <= -1 && right >= 1 && bottom >= 1);
343 
344    guardband_x = MIN2(-left, right);
345    guardband_y = MIN2(-top, bottom);
346 
347    float discard_x = 1.0;
348    float discard_y = 1.0;
349    float distance = sctx->current_clip_discard_distance;
350 
351    /* Add half the point size / line width */
352    discard_x += distance / (2.0 * vp.scale[0]);
353    discard_y += distance / (2.0 * vp.scale[1]);
354 
355    /* Discard primitives that would lie entirely outside the viewport area. */
356    discard_x = MIN2(discard_x, guardband_x);
357    discard_y = MIN2(discard_y, guardband_y);
358 
359    unsigned pa_su_vtx_cntl = S_028BE4_PIX_CENTER(rs->half_pixel_center) |
360                              S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
361                              S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH +
362                                                  vp_as_scissor.quant_mode);
363    unsigned pa_su_hardware_screen_offset = S_028234_HW_SCREEN_OFFSET_X(hw_screen_offset_x >> 4) |
364                                            S_028234_HW_SCREEN_OFFSET_Y(hw_screen_offset_y >> 4);
365 
366    /* If any of the GB registers is updated, all of them must be updated.
367     * R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, R_028BEC_PA_CL_GB_VERT_DISC_ADJ
368     * R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ, R_028BF4_PA_CL_GB_HORZ_DISC_ADJ
369     */
370    if (sctx->screen->info.has_set_context_pairs_packed) {
371       radeon_begin(&sctx->gfx_cs);
372       gfx11_begin_packed_context_regs();
373       gfx11_opt_set_context_reg(R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
374                                 pa_su_vtx_cntl);
375       gfx11_opt_set_context_reg4(R_028BE8_PA_CL_GB_VERT_CLIP_ADJ,
376                                  SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
377                                  fui(guardband_y), fui(discard_y),
378                                  fui(guardband_x), fui(discard_x));
379       gfx11_opt_set_context_reg(R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
380                                 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
381                                 pa_su_hardware_screen_offset);
382       gfx11_end_packed_context_regs();
383       radeon_end(); /* don't track context rolls on GFX11 */
384    } else {
385       radeon_begin(&sctx->gfx_cs);
386       radeon_opt_set_context_reg5(sctx, R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
387                                   pa_su_vtx_cntl,
388                                   fui(guardband_y), fui(discard_y),
389                                   fui(guardband_x), fui(discard_x));
390       radeon_opt_set_context_reg(sctx, R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
391                                  SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
392                                  pa_su_hardware_screen_offset);
393       radeon_end_update_context_roll(sctx);
394    }
395 }
396 
si_emit_scissors(struct si_context * ctx,unsigned index)397 static void si_emit_scissors(struct si_context *ctx, unsigned index)
398 {
399    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
400    struct pipe_scissor_state *states = ctx->scissors;
401    bool scissor_enabled = ctx->queued.named.rasterizer->scissor_enable;
402 
403    /* The simple case: Only 1 viewport is active. */
404    if (!ctx->vs_writes_viewport_index) {
405       struct si_signed_scissor *vp = &ctx->viewports.as_scissor[0];
406 
407       radeon_begin(cs);
408       radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
409       radeon_end();
410 
411       si_emit_one_scissor(ctx, cs, vp, scissor_enabled ? &states[0] : NULL);
412       return;
413    }
414 
415    /* All registers in the array need to be updated if any of them is changed.
416     * This is a hardware requirement.
417     */
418    radeon_begin(cs);
419    radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, SI_MAX_VIEWPORTS * 2);
420    radeon_end();
421 
422    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
423       si_emit_one_scissor(ctx, cs, &ctx->viewports.as_scissor[i],
424                           scissor_enabled ? &states[i] : NULL);
425    }
426 }
427 
si_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)428 static void si_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
429                                    unsigned num_viewports, const struct pipe_viewport_state *state)
430 {
431    struct si_context *ctx = (struct si_context *)pctx;
432    int i;
433 
434    for (i = 0; i < num_viewports; i++) {
435       unsigned index = start_slot + i;
436       struct si_signed_scissor *scissor = &ctx->viewports.as_scissor[index];
437 
438       ctx->viewports.states[index] = state[i];
439 
440       si_get_scissor_from_viewport(ctx, &state[i], scissor);
441 
442       int max_corner = MAX2(
443          MAX2(abs(scissor->maxx), abs(scissor->maxy)),
444          MAX2(abs(scissor->minx), abs(scissor->miny)));
445 
446       /* Determine the best quantization mode (subpixel precision),
447        * but also leave enough space for the guardband.
448        *
449        * Note that primitive binning requires QUANT_MODE == 16_8 on Vega10
450        * and Raven1 for line and rectangle primitive types to work correctly.
451        * Always use 16_8 if primitive binning is possible to occur.
452        */
453       if ((ctx->family == CHIP_VEGA10 || ctx->family == CHIP_RAVEN) && ctx->screen->dpbb_allowed)
454          max_corner = 16384; /* Use QUANT_MODE == 16_8. */
455 
456       /* Another constraint is that all coordinates in the viewport
457        * are representable in fixed point with respect to the
458        * surface origin.
459        *
460        * It means that PA_SU_HARDWARE_SCREEN_OFFSET can't be given
461        * an offset that would make the upper corner of the viewport
462        * greater than the maximum representable number post
463        * quantization, ie 2^quant_bits.
464        *
465        * This does not matter for 14.10 and 16.8 formats since the
466        * offset is already limited at 8k, but it means we can't use
467        * 12.12 if we are drawing to some pixels outside the lower
468        * 4k x 4k of the render target.
469        */
470 
471       if (max_corner <= 1024) /* 4K scanline area for guardband */
472          scissor->quant_mode = SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH;
473       else if (max_corner <= 4096) /* 16K scanline area for guardband */
474          scissor->quant_mode = SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH;
475       else /* 64K scanline area for guardband */
476          scissor->quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
477    }
478 
479    if (start_slot == 0) {
480       ctx->viewport0_y_inverted = state->scale[1] < 0;
481 
482       /* NGG cull state uses the viewport and quant mode. */
483       if (ctx->screen->use_ngg_culling)
484          si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
485    }
486 
487    si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
488    si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
489    si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
490 }
491 
si_emit_one_viewport(struct si_context * ctx,struct pipe_viewport_state * state)492 static void si_emit_one_viewport(struct si_context *ctx, struct pipe_viewport_state *state)
493 {
494    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
495 
496    radeon_begin(cs);
497    radeon_emit(fui(state->scale[0]));
498    radeon_emit(fui(state->translate[0]));
499    radeon_emit(fui(state->scale[1]));
500    radeon_emit(fui(state->translate[1]));
501    radeon_emit(fui(state->scale[2]));
502    radeon_emit(fui(state->translate[2]));
503    radeon_end();
504 }
505 
si_emit_viewports(struct si_context * ctx)506 static void si_emit_viewports(struct si_context *ctx)
507 {
508    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
509    struct pipe_viewport_state *states = ctx->viewports.states;
510 
511    /* The simple case: Only 1 viewport is active. */
512    if (!ctx->vs_writes_viewport_index) {
513       radeon_begin(cs);
514       radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, 6);
515       radeon_end();
516 
517       si_emit_one_viewport(ctx, &states[0]);
518       return;
519    }
520 
521    /* All registers in the array need to be updated if any of them is changed.
522     * This is a hardware requirement.
523     */
524    radeon_begin(cs);
525    radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE + 0, SI_MAX_VIEWPORTS * 6);
526    radeon_end();
527 
528    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++)
529       si_emit_one_viewport(ctx, &states[i]);
530 }
531 
si_viewport_zmin_zmax(const struct pipe_viewport_state * vp,bool halfz,bool window_space_position,float * zmin,float * zmax)532 static inline void si_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
533                                          bool window_space_position, float *zmin, float *zmax)
534 {
535    if (window_space_position) {
536       *zmin = 0;
537       *zmax = 1;
538       return;
539    }
540    util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
541 }
542 
si_emit_depth_ranges(struct si_context * ctx)543 static void si_emit_depth_ranges(struct si_context *ctx)
544 {
545    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
546    struct pipe_viewport_state *states = ctx->viewports.states;
547    bool clip_halfz = ctx->queued.named.rasterizer->clip_halfz;
548    bool window_space = ctx->vs_disables_clipping_viewport;
549    float zmin, zmax;
550 
551    /* The simple case: Only 1 viewport is active. */
552    if (!ctx->vs_writes_viewport_index) {
553       si_viewport_zmin_zmax(&states[0], clip_halfz, window_space, &zmin, &zmax);
554 
555       radeon_begin(cs);
556       radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, 2);
557       radeon_emit(fui(zmin));
558       radeon_emit(fui(zmax));
559       radeon_end();
560       return;
561    }
562 
563    /* All registers in the array need to be updated if any of them is changed.
564     * This is a hardware requirement.
565     */
566    radeon_begin(cs);
567    radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, SI_MAX_VIEWPORTS * 2);
568    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
569       si_viewport_zmin_zmax(&states[i], clip_halfz, window_space, &zmin, &zmax);
570       radeon_emit(fui(zmin));
571       radeon_emit(fui(zmax));
572    }
573    radeon_end();
574 }
575 
si_emit_viewport_states(struct si_context * ctx,unsigned index)576 static void si_emit_viewport_states(struct si_context *ctx, unsigned index)
577 {
578    si_emit_viewports(ctx);
579    si_emit_depth_ranges(ctx);
580 }
581 
582 /**
583  * This reacts to 2 state changes:
584  * - VS.writes_viewport_index
585  * - VS output position in window space (enable/disable)
586  *
587  * Normally, we only emit 1 viewport and 1 scissor if no shader is using
588  * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
589  * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
590  * called to emit the rest.
591  */
si_update_vs_viewport_state(struct si_context * ctx)592 void si_update_vs_viewport_state(struct si_context *ctx)
593 {
594    struct si_shader_ctx_state *vs = si_get_vs(ctx);
595    struct si_shader_info *info = vs->cso ? &vs->cso->info : NULL;
596    bool vs_window_space;
597 
598    if (!info)
599       return;
600 
601    /* When the VS disables clipping and viewport transformation. */
602    vs_window_space = vs->cso->stage == MESA_SHADER_VERTEX && info->base.vs.window_space_position;
603 
604    if (ctx->vs_disables_clipping_viewport != vs_window_space) {
605       ctx->vs_disables_clipping_viewport = vs_window_space;
606       si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
607       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
608       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
609    }
610 
611    /* Viewport index handling. */
612    if (ctx->vs_writes_viewport_index == info->writes_viewport_index)
613       return;
614 
615    /* This changes how the guardband is computed. */
616    ctx->vs_writes_viewport_index = info->writes_viewport_index;
617    si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
618 
619    /* Emit scissors and viewports that were enabled by having
620     * the ViewportIndex output.
621     */
622    if (info->writes_viewport_index) {
623       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
624       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
625    }
626 }
627 
si_emit_window_rectangles(struct si_context * sctx,unsigned index)628 static void si_emit_window_rectangles(struct si_context *sctx, unsigned index)
629 {
630    /* There are four clipping rectangles. Their corner coordinates are inclusive.
631     * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
632     * on whether the pixel is inside cliprects 0-3, respectively. For example,
633     * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
634     * the number 3 (binary 0011).
635     *
636     * If CLIPRECT_RULE & (1 << number), the pixel is rasterized.
637     */
638    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
639    static const unsigned outside[4] = {
640       /* outside rectangle 0 */
641       V_02820C_OUT | V_02820C_IN_1 | V_02820C_IN_2 | V_02820C_IN_21 | V_02820C_IN_3 |
642       V_02820C_IN_31 | V_02820C_IN_32 | V_02820C_IN_321,
643       /* outside rectangles 0, 1 */
644       V_02820C_OUT | V_02820C_IN_2 | V_02820C_IN_3 | V_02820C_IN_32,
645       /* outside rectangles 0, 1, 2 */
646       V_02820C_OUT | V_02820C_IN_3,
647       /* outside rectangles 0, 1, 2, 3 */
648       V_02820C_OUT,
649    };
650    const unsigned disabled = 0xffff; /* all inside and outside cases */
651    unsigned num_rectangles = sctx->num_window_rectangles;
652    struct pipe_scissor_state *rects = sctx->window_rectangles;
653    unsigned rule;
654 
655    assert(num_rectangles <= 4);
656 
657    if (num_rectangles == 0)
658       rule = disabled;
659    else if (sctx->window_rectangles_include)
660       rule = ~outside[num_rectangles - 1];
661    else
662       rule = outside[num_rectangles - 1];
663 
664    radeon_begin(cs);
665    radeon_opt_set_context_reg(sctx, R_02820C_PA_SC_CLIPRECT_RULE, SI_TRACKED_PA_SC_CLIPRECT_RULE,
666                               rule);
667    if (num_rectangles == 0) {
668       radeon_end();
669       return;
670    }
671 
672    radeon_set_context_reg_seq(R_028210_PA_SC_CLIPRECT_0_TL, num_rectangles * 2);
673    for (unsigned i = 0; i < num_rectangles; i++) {
674       radeon_emit(S_028210_TL_X(rects[i].minx) | S_028210_TL_Y(rects[i].miny));
675       radeon_emit(S_028214_BR_X(rects[i].maxx) | S_028214_BR_Y(rects[i].maxy));
676    }
677    radeon_end();
678 }
679 
si_set_window_rectangles(struct pipe_context * ctx,bool include,unsigned num_rectangles,const struct pipe_scissor_state * rects)680 static void si_set_window_rectangles(struct pipe_context *ctx, bool include,
681                                      unsigned num_rectangles,
682                                      const struct pipe_scissor_state *rects)
683 {
684    struct si_context *sctx = (struct si_context *)ctx;
685 
686    sctx->num_window_rectangles = num_rectangles;
687    sctx->window_rectangles_include = include;
688    if (num_rectangles) {
689       memcpy(sctx->window_rectangles, rects, sizeof(*rects) * num_rectangles);
690    }
691 
692    si_mark_atom_dirty(sctx, &sctx->atoms.s.window_rectangles);
693 }
694 
si_init_viewport_functions(struct si_context * ctx)695 void si_init_viewport_functions(struct si_context *ctx)
696 {
697    ctx->atoms.s.guardband.emit = si_emit_guardband;
698    ctx->atoms.s.scissors.emit = si_emit_scissors;
699    ctx->atoms.s.viewports.emit = si_emit_viewport_states;
700    ctx->atoms.s.window_rectangles.emit = si_emit_window_rectangles;
701    ctx->atoms.s.ngg_cull_state.emit = si_emit_cull_state;
702 
703    ctx->b.set_scissor_states = si_set_scissor_states;
704    ctx->b.set_viewport_states = si_set_viewport_states;
705    ctx->b.set_window_rectangles = si_set_window_rectangles;
706 
707    for (unsigned i = 0; i < 16; i++)
708       ctx->viewports.as_scissor[i].quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
709 }
710