• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 
30 #include "anv_private.h"
31 
32 #include "common/intel_aux_map.h"
33 #include "common/intel_sample_positions.h"
34 #include "genxml/gen_macros.h"
35 #include "genxml/genX_pack.h"
36 
37 #include "vk_util.h"
38 
39 /**
40  * Compute an \p n x \p m pixel hashing table usable as slice, subslice or
41  * pixel pipe hashing table.  The resulting table is the cyclic repetition of
42  * a fixed pattern with periodicity equal to \p period.
43  *
44  * If \p index is specified to be equal to \p period, a 2-way hashing table
45  * will be generated such that indices 0 and 1 are returned for the following
46  * fractions of entries respectively:
47  *
48  *   p_0 = ceil(period / 2) / period
49  *   p_1 = floor(period / 2) / period
50  *
51  * If \p index is even and less than \p period, a 3-way hashing table will be
52  * generated such that indices 0, 1 and 2 are returned for the following
53  * fractions of entries:
54  *
55  *   p_0 = (ceil(period / 2) - 1) / period
56  *   p_1 = floor(period / 2) / period
57  *   p_2 = 1 / period
58  *
59  * The equations above apply if \p flip is equal to 0, if it is equal to 1 p_0
60  * and p_1 will be swapped for the result.  Note that in the context of pixel
61  * pipe hashing this can be always 0 on Gfx12 platforms, since the hardware
62  * transparently remaps logical indices found on the table to physical pixel
63  * pipe indices from the highest to lowest EU count.
64  */
65 UNUSED static void
calculate_pixel_hashing_table(unsigned n,unsigned m,unsigned period,unsigned index,bool flip,uint32_t * p)66 calculate_pixel_hashing_table(unsigned n, unsigned m,
67                               unsigned period, unsigned index, bool flip,
68                               uint32_t *p)
69 {
70    for (unsigned i = 0; i < n; i++) {
71       for (unsigned j = 0; j < m; j++) {
72          const unsigned k = (i + j) % period;
73          p[j + m * i] = (k == index ? 2 : (k & 1) ^ flip);
74       }
75    }
76 }
77 
78 static void
genX(emit_slice_hashing_state)79 genX(emit_slice_hashing_state)(struct anv_device *device,
80                                struct anv_batch *batch)
81 {
82 #if GFX_VER == 11
83    assert(device->info.ppipe_subslices[2] == 0);
84 
85    if (device->info.ppipe_subslices[0] == device->info.ppipe_subslices[1])
86      return;
87 
88    if (!device->slice_hash.alloc_size) {
89       unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
90       device->slice_hash =
91          anv_state_pool_alloc(&device->dynamic_state_pool, size, 64);
92 
93       const bool flip = device->info.ppipe_subslices[0] <
94                      device->info.ppipe_subslices[1];
95       struct GENX(SLICE_HASH_TABLE) table;
96       calculate_pixel_hashing_table(16, 16, 3, 3, flip, table.Entry[0]);
97 
98       GENX(SLICE_HASH_TABLE_pack)(NULL, device->slice_hash.map, &table);
99    }
100 
101    anv_batch_emit(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
102       ptr.SliceHashStatePointerValid = true;
103       ptr.SliceHashTableStatePointer = device->slice_hash.offset;
104    }
105 
106    anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), mode) {
107       mode.SliceHashingTableEnable = true;
108    }
109 #elif GFX_VERx10 == 120
110    /* For each n calculate ppipes_of[n], equal to the number of pixel pipes
111     * present with n active dual subslices.
112     */
113    unsigned ppipes_of[3] = {};
114 
115    for (unsigned n = 0; n < ARRAY_SIZE(ppipes_of); n++) {
116       for (unsigned p = 0; p < ARRAY_SIZE(device->info.ppipe_subslices); p++)
117          ppipes_of[n] += (device->info.ppipe_subslices[p] == n);
118    }
119 
120    /* Gfx12 has three pixel pipes. */
121    assert(ppipes_of[0] + ppipes_of[1] + ppipes_of[2] == 3);
122 
123    if (ppipes_of[2] == 3 || ppipes_of[0] == 2) {
124       /* All three pixel pipes have the maximum number of active dual
125        * subslices, or there is only one active pixel pipe: Nothing to do.
126        */
127       return;
128    }
129 
130    anv_batch_emit(batch, GENX(3DSTATE_SUBSLICE_HASH_TABLE), p) {
131       p.SliceHashControl[0] = TABLE_0;
132 
133       if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
134          calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.TwoWayTableEntry[0]);
135       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
136          calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.TwoWayTableEntry[0]);
137 
138       if (ppipes_of[2] == 2 && ppipes_of[1] == 1)
139          calculate_pixel_hashing_table(8, 16, 5, 4, 0, p.ThreeWayTableEntry[0]);
140       else if (ppipes_of[2] == 2 && ppipes_of[0] == 1)
141          calculate_pixel_hashing_table(8, 16, 2, 2, 0, p.ThreeWayTableEntry[0]);
142       else if (ppipes_of[2] == 1 && ppipes_of[1] == 1 && ppipes_of[0] == 1)
143          calculate_pixel_hashing_table(8, 16, 3, 3, 0, p.ThreeWayTableEntry[0]);
144       else
145          unreachable("Illegal fusing.");
146    }
147 
148    anv_batch_emit(batch, GENX(3DSTATE_3D_MODE), p) {
149       p.SubsliceHashingTableEnable = true;
150       p.SubsliceHashingTableEnableMask = true;
151    }
152 #endif
153 }
154 
155 static VkResult
init_render_queue_state(struct anv_queue * queue)156 init_render_queue_state(struct anv_queue *queue)
157 {
158    struct anv_device *device = queue->device;
159    struct anv_batch batch;
160 
161    uint32_t cmds[64];
162    batch.start = batch.next = cmds;
163    batch.end = (void *) cmds + sizeof(cmds);
164 
165    anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) {
166 #if GFX_VER >= 9
167       ps.MaskBits = GFX_VER >= 12 ? 0x13 : 3;
168       ps.MediaSamplerDOPClockGateEnable = GFX_VER >= 12;
169 #endif
170       ps.PipelineSelection = _3D;
171    }
172 
173 #if GFX_VER == 9
174    anv_batch_write_reg(&batch, GENX(CACHE_MODE_1), cm1) {
175       cm1.FloatBlendOptimizationEnable = true;
176       cm1.FloatBlendOptimizationEnableMask = true;
177       cm1.MSCRAWHazardAvoidanceBit = true;
178       cm1.MSCRAWHazardAvoidanceBitMask = true;
179       cm1.PartialResolveDisableInVC = true;
180       cm1.PartialResolveDisableInVCMask = true;
181    }
182 #endif
183 
184    anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa);
185 
186    anv_batch_emit(&batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
187       rect.ClippedDrawingRectangleYMin = 0;
188       rect.ClippedDrawingRectangleXMin = 0;
189       rect.ClippedDrawingRectangleYMax = UINT16_MAX;
190       rect.ClippedDrawingRectangleXMax = UINT16_MAX;
191       rect.DrawingRectangleOriginY = 0;
192       rect.DrawingRectangleOriginX = 0;
193    }
194 
195 #if GFX_VER >= 8
196    anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck);
197 
198    genX(emit_sample_pattern)(&batch, 0, NULL);
199 
200    /* The BDW+ docs describe how to use the 3DSTATE_WM_HZ_OP instruction in the
201     * section titled, "Optimized Depth Buffer Clear and/or Stencil Buffer
202     * Clear." It mentions that the packet overrides GPU state for the clear
203     * operation and needs to be reset to 0s to clear the overrides. Depending
204     * on the kernel, we may not get a context with the state for this packet
205     * zeroed. Do it ourselves just in case. We've observed this to prevent a
206     * number of GPU hangs on ICL.
207     */
208    anv_batch_emit(&batch, GENX(3DSTATE_WM_HZ_OP), hzp);
209 #endif
210 
211 #if GFX_VER == 11
212    /* The default behavior of bit 5 "Headerless Message for Pre-emptable
213     * Contexts" in SAMPLER MODE register is set to 0, which means
214     * headerless sampler messages are not allowed for pre-emptable
215     * contexts. Set the bit 5 to 1 to allow them.
216     */
217    anv_batch_write_reg(&batch, GENX(SAMPLER_MODE), sm) {
218       sm.HeaderlessMessageforPreemptableContexts = true;
219       sm.HeaderlessMessageforPreemptableContextsMask = true;
220    }
221 
222    /* Bit 1 "Enabled Texel Offset Precision Fix" must be set in
223     * HALF_SLICE_CHICKEN7 register.
224     */
225    anv_batch_write_reg(&batch, GENX(HALF_SLICE_CHICKEN7), hsc7) {
226       hsc7.EnabledTexelOffsetPrecisionFix = true;
227       hsc7.EnabledTexelOffsetPrecisionFixMask = true;
228    }
229 
230    anv_batch_write_reg(&batch, GENX(TCCNTLREG), tcc) {
231       tcc.L3DataPartialWriteMergingEnable = true;
232       tcc.ColorZPartialWriteMergingEnable = true;
233       tcc.URBPartialWriteMergingEnable = true;
234       tcc.TCDisable = true;
235    }
236 #endif
237    genX(emit_slice_hashing_state)(device, &batch);
238 
239 #if GFX_VER >= 11
240    /* hardware specification recommends disabling repacking for
241     * the compatibility with decompression mechanism in display controller.
242     */
243    if (device->info.disable_ccs_repack) {
244       anv_batch_write_reg(&batch, GENX(CACHE_MODE_0), cm0) {
245          cm0.DisableRepackingforCompression = true;
246          cm0.DisableRepackingforCompressionMask = true;
247       }
248    }
249 
250    /* an unknown issue is causing vs push constants to become
251     * corrupted during object-level preemption. For now, restrict
252     * to command buffer level preemption to avoid rendering
253     * corruption.
254     */
255    anv_batch_write_reg(&batch, GENX(CS_CHICKEN1), cc1) {
256       cc1.ReplayMode = MidcmdbufferPreemption;
257       cc1.ReplayModeMask = true;
258    }
259 
260 #if GFX_VERx10 < 125
261 #define AA_LINE_QUALITY_REG GENX(3D_CHICKEN3)
262 #else
263 #define AA_LINE_QUALITY_REG GENX(CHICKEN_RASTER_1)
264 #endif
265 
266    /* Enable the new line drawing algorithm that produces higher quality
267     * lines.
268     */
269    anv_batch_write_reg(&batch, AA_LINE_QUALITY_REG, c3) {
270       c3.AALineQualityFix = true;
271       c3.AALineQualityFixMask = true;
272    }
273 #endif
274 
275 #if GFX_VER == 12
276    if (device->info.has_aux_map) {
277       uint64_t aux_base_addr = intel_aux_map_get_base(device->aux_map_ctx);
278       assert(aux_base_addr % (32 * 1024) == 0);
279       anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
280          lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num);
281          lri.DataDWord = aux_base_addr & 0xffffffff;
282       }
283       anv_batch_emit(&batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
284          lri.RegisterOffset = GENX(GFX_AUX_TABLE_BASE_ADDR_num) + 4;
285          lri.DataDWord = aux_base_addr >> 32;
286       }
287    }
288 #endif
289 
290    /* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
291     * 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
292     *
293     * This is only safe on kernels with context isolation support.
294     */
295    if (GFX_VER >= 8 && device->physical->has_context_isolation) {
296 #if GFX_VER >= 9
297       anv_batch_write_reg(&batch, GENX(CS_DEBUG_MODE2), csdm2) {
298          csdm2.CONSTANT_BUFFERAddressOffsetDisable = true;
299          csdm2.CONSTANT_BUFFERAddressOffsetDisableMask = true;
300       }
301 #elif GFX_VER == 8
302       anv_batch_write_reg(&batch, GENX(INSTPM), instpm) {
303          instpm.CONSTANT_BUFFERAddressOffsetDisable = true;
304          instpm.CONSTANT_BUFFERAddressOffsetDisableMask = true;
305       }
306 #endif
307    }
308 
309 #if GFX_VER >= 11
310    /* Starting with GFX version 11, SLM is no longer part of the L3$ config
311     * so it never changes throughout the lifetime of the VkDevice.
312     */
313    const struct intel_l3_config *cfg = intel_get_default_l3_config(&device->info);
314    genX(emit_l3_config)(&batch, device, cfg);
315    device->l3_config = cfg;
316 #endif
317 
318    anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
319 
320    assert(batch.next <= batch.end);
321 
322    return anv_queue_submit_simple_batch(queue, &batch);
323 }
324 
325 void
genX(init_physical_device_state)326 genX(init_physical_device_state)(ASSERTED struct anv_physical_device *device)
327 {
328    assert(device->info.verx10 == GFX_VERx10);
329 }
330 
331 VkResult
genX(init_device_state)332 genX(init_device_state)(struct anv_device *device)
333 {
334    VkResult res;
335 
336    device->slice_hash = (struct anv_state) { 0 };
337    for (uint32_t i = 0; i < device->queue_count; i++) {
338       struct anv_queue *queue = &device->queues[i];
339       switch (queue->family->engine_class) {
340       case I915_ENGINE_CLASS_RENDER:
341          res = init_render_queue_state(queue);
342          break;
343       default:
344          res = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
345          break;
346       }
347       if (res != VK_SUCCESS)
348          return res;
349    }
350 
351    return res;
352 }
353 
354 void
genX(emit_l3_config)355 genX(emit_l3_config)(struct anv_batch *batch,
356                      const struct anv_device *device,
357                      const struct intel_l3_config *cfg)
358 {
359    UNUSED const struct intel_device_info *devinfo = &device->info;
360 
361 #if GFX_VER >= 8
362 
363 #if GFX_VER >= 12
364 #define L3_ALLOCATION_REG GENX(L3ALLOC)
365 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
366 #else
367 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
368 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
369 #endif
370 
371    anv_batch_write_reg(batch, L3_ALLOCATION_REG, l3cr) {
372       if (cfg == NULL) {
373 #if GFX_VER >= 12
374          l3cr.L3FullWayAllocationEnable = true;
375 #else
376          unreachable("Invalid L3$ config");
377 #endif
378       } else {
379 #if GFX_VER < 11
380          l3cr.SLMEnable = cfg->n[INTEL_L3P_SLM];
381 #endif
382 #if GFX_VER == 11
383          /* Wa_1406697149: Bit 9 "Error Detection Behavior Control" must be
384           * set in L3CNTLREG register. The default setting of the bit is not
385           * the desirable behavior.
386           */
387          l3cr.ErrorDetectionBehaviorControl = true;
388          l3cr.UseFullWays = true;
389 #endif /* GFX_VER == 11 */
390          assert(cfg->n[INTEL_L3P_IS] == 0);
391          assert(cfg->n[INTEL_L3P_C] == 0);
392          assert(cfg->n[INTEL_L3P_T] == 0);
393          l3cr.URBAllocation = cfg->n[INTEL_L3P_URB];
394          l3cr.ROAllocation = cfg->n[INTEL_L3P_RO];
395          l3cr.DCAllocation = cfg->n[INTEL_L3P_DC];
396          l3cr.AllAllocation = cfg->n[INTEL_L3P_ALL];
397       }
398    }
399 
400 #else /* GFX_VER < 8 */
401 
402    const bool has_dc = cfg->n[INTEL_L3P_DC] || cfg->n[INTEL_L3P_ALL];
403    const bool has_is = cfg->n[INTEL_L3P_IS] || cfg->n[INTEL_L3P_RO] ||
404                        cfg->n[INTEL_L3P_ALL];
405    const bool has_c = cfg->n[INTEL_L3P_C] || cfg->n[INTEL_L3P_RO] ||
406                       cfg->n[INTEL_L3P_ALL];
407    const bool has_t = cfg->n[INTEL_L3P_T] || cfg->n[INTEL_L3P_RO] ||
408                       cfg->n[INTEL_L3P_ALL];
409 
410    assert(!cfg->n[INTEL_L3P_ALL]);
411 
412    /* When enabled SLM only uses a portion of the L3 on half of the banks,
413     * the matching space on the remaining banks has to be allocated to a
414     * client (URB for all validated configurations) set to the
415     * lower-bandwidth 2-bank address hashing mode.
416     */
417    const bool urb_low_bw = cfg->n[INTEL_L3P_SLM] && !devinfo->is_baytrail;
418    assert(!urb_low_bw || cfg->n[INTEL_L3P_URB] == cfg->n[INTEL_L3P_SLM]);
419 
420    /* Minimum number of ways that can be allocated to the URB. */
421    const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
422    assert(cfg->n[INTEL_L3P_URB] >= n0_urb);
423 
424    anv_batch_write_reg(batch, GENX(L3SQCREG1), l3sqc) {
425       l3sqc.ConvertDC_UC = !has_dc;
426       l3sqc.ConvertIS_UC = !has_is;
427       l3sqc.ConvertC_UC = !has_c;
428       l3sqc.ConvertT_UC = !has_t;
429 #if GFX_VERx10 == 75
430       l3sqc.L3SQGeneralPriorityCreditInitialization = SQGPCI_DEFAULT;
431 #else
432       l3sqc.L3SQGeneralPriorityCreditInitialization =
433          devinfo->is_baytrail ? BYT_SQGPCI_DEFAULT : SQGPCI_DEFAULT;
434 #endif
435       l3sqc.L3SQHighPriorityCreditInitialization = SQHPCI_DEFAULT;
436    }
437 
438    anv_batch_write_reg(batch, GENX(L3CNTLREG2), l3cr2) {
439       l3cr2.SLMEnable = cfg->n[INTEL_L3P_SLM];
440       l3cr2.URBLowBandwidth = urb_low_bw;
441       l3cr2.URBAllocation = cfg->n[INTEL_L3P_URB] - n0_urb;
442 #if !GFX_VERx10 == 75
443       l3cr2.ALLAllocation = cfg->n[INTEL_L3P_ALL];
444 #endif
445       l3cr2.ROAllocation = cfg->n[INTEL_L3P_RO];
446       l3cr2.DCAllocation = cfg->n[INTEL_L3P_DC];
447    }
448 
449    anv_batch_write_reg(batch, GENX(L3CNTLREG3), l3cr3) {
450       l3cr3.ISAllocation = cfg->n[INTEL_L3P_IS];
451       l3cr3.ISLowBandwidth = 0;
452       l3cr3.CAllocation = cfg->n[INTEL_L3P_C];
453       l3cr3.CLowBandwidth = 0;
454       l3cr3.TAllocation = cfg->n[INTEL_L3P_T];
455       l3cr3.TLowBandwidth = 0;
456    }
457 
458 #if GFX_VERx10 == 75
459    if (device->physical->cmd_parser_version >= 4) {
460       /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
461        * them disabled to avoid crashing the system hard.
462        */
463       anv_batch_write_reg(batch, GENX(SCRATCH1), s1) {
464          s1.L3AtomicDisable = !has_dc;
465       }
466       anv_batch_write_reg(batch, GENX(CHICKEN3), c3) {
467          c3.L3AtomicDisableMask = true;
468          c3.L3AtomicDisable = !has_dc;
469       }
470    }
471 #endif /* GFX_VERx10 == 75 */
472 
473 #endif /* GFX_VER < 8 */
474 }
475 
476 void
genX(emit_multisample)477 genX(emit_multisample)(struct anv_batch *batch, uint32_t samples,
478                        const VkSampleLocationEXT *locations)
479 {
480    anv_batch_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
481       ms.NumberofMultisamples       = __builtin_ffs(samples) - 1;
482 
483       ms.PixelLocation              = CENTER;
484 #if GFX_VER >= 8
485       /* The PRM says that this bit is valid only for DX9:
486        *
487        *    SW can choose to set this bit only for DX9 API. DX10/OGL API's
488        *    should not have any effect by setting or not setting this bit.
489        */
490       ms.PixelPositionOffsetEnable  = false;
491 #else
492 
493       if (locations) {
494          switch (samples) {
495          case 1:
496             INTEL_SAMPLE_POS_1X_ARRAY(ms.Sample, locations);
497             break;
498          case 2:
499             INTEL_SAMPLE_POS_2X_ARRAY(ms.Sample, locations);
500             break;
501          case 4:
502             INTEL_SAMPLE_POS_4X_ARRAY(ms.Sample, locations);
503             break;
504          case 8:
505             INTEL_SAMPLE_POS_8X_ARRAY(ms.Sample, locations);
506             break;
507          default:
508             break;
509          }
510       } else {
511          switch (samples) {
512          case 1:
513             INTEL_SAMPLE_POS_1X(ms.Sample);
514             break;
515          case 2:
516             INTEL_SAMPLE_POS_2X(ms.Sample);
517             break;
518          case 4:
519             INTEL_SAMPLE_POS_4X(ms.Sample);
520             break;
521          case 8:
522             INTEL_SAMPLE_POS_8X(ms.Sample);
523             break;
524          default:
525             break;
526          }
527       }
528 #endif
529    }
530 }
531 
532 #if GFX_VER >= 8
533 void
genX(emit_sample_pattern)534 genX(emit_sample_pattern)(struct anv_batch *batch, uint32_t samples,
535                           const VkSampleLocationEXT *locations)
536 {
537    /* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and
538     * VkPhysicalDeviceFeatures::standardSampleLocations.
539     */
540    anv_batch_emit(batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) {
541       if (locations) {
542          /* The Skylake PRM Vol. 2a "3DSTATE_SAMPLE_PATTERN" says:
543           *
544           *    "When programming the sample offsets (for NUMSAMPLES_4 or _8
545           *    and MSRASTMODE_xxx_PATTERN), the order of the samples 0 to 3
546           *    (or 7 for 8X, or 15 for 16X) must have monotonically increasing
547           *    distance from the pixel center. This is required to get the
548           *    correct centroid computation in the device."
549           *
550           * However, the Vulkan spec seems to require that the the samples
551           * occur in the order provided through the API. The standard sample
552           * patterns have the above property that they have monotonically
553           * increasing distances from the center but client-provided ones do
554           * not. As long as this only affects centroid calculations as the
555           * docs say, we should be ok because OpenGL and Vulkan only require
556           * that the centroid be some lit sample and that it's the same for
557           * all samples in a pixel; they have no requirement that it be the
558           * one closest to center.
559           */
560          switch (samples) {
561          case 1:
562             INTEL_SAMPLE_POS_1X_ARRAY(sp._1xSample, locations);
563             break;
564          case 2:
565             INTEL_SAMPLE_POS_2X_ARRAY(sp._2xSample, locations);
566             break;
567          case 4:
568             INTEL_SAMPLE_POS_4X_ARRAY(sp._4xSample, locations);
569             break;
570          case 8:
571             INTEL_SAMPLE_POS_8X_ARRAY(sp._8xSample, locations);
572             break;
573 #if GFX_VER >= 9
574          case 16:
575             INTEL_SAMPLE_POS_16X_ARRAY(sp._16xSample, locations);
576             break;
577 #endif
578          default:
579             break;
580          }
581       } else {
582          INTEL_SAMPLE_POS_1X(sp._1xSample);
583          INTEL_SAMPLE_POS_2X(sp._2xSample);
584          INTEL_SAMPLE_POS_4X(sp._4xSample);
585          INTEL_SAMPLE_POS_8X(sp._8xSample);
586 #if GFX_VER >= 9
587          INTEL_SAMPLE_POS_16X(sp._16xSample);
588 #endif
589       }
590    }
591 }
592 #endif
593 
594 #if GFX_VER >= 11
595 void
genX(emit_shading_rate)596 genX(emit_shading_rate)(struct anv_batch *batch,
597                         const struct anv_graphics_pipeline *pipeline,
598                         struct anv_state cps_states,
599                         struct anv_dynamic_state *dynamic_state)
600 {
601    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
602    const bool cps_enable = wm_prog_data && wm_prog_data->per_coarse_pixel_dispatch;
603 
604 #if GFX_VER == 11
605    anv_batch_emit(batch, GENX(3DSTATE_CPS), cps) {
606       cps.CoarsePixelShadingMode = cps_enable ? CPS_MODE_CONSTANT : CPS_MODE_NONE;
607       if (cps_enable) {
608          cps.MinCPSizeX = dynamic_state->fragment_shading_rate.width;
609          cps.MinCPSizeY = dynamic_state->fragment_shading_rate.height;
610       }
611    }
612 #elif GFX_VER == 12
613    for (uint32_t i = 0; i < dynamic_state->viewport.count; i++) {
614       uint32_t *cps_state_dwords =
615          cps_states.map + GENX(CPS_STATE_length) * 4 * i;
616       struct GENX(CPS_STATE) cps_state = {
617          .CoarsePixelShadingMode = cps_enable ? CPS_MODE_CONSTANT : CPS_MODE_NONE,
618       };
619 
620       if (cps_enable) {
621          cps_state.MinCPSizeX = dynamic_state->fragment_shading_rate.width;
622          cps_state.MinCPSizeY = dynamic_state->fragment_shading_rate.height;
623       }
624 
625       GENX(CPS_STATE_pack)(NULL, cps_state_dwords, &cps_state);
626    }
627 
628    anv_batch_emit(batch, GENX(3DSTATE_CPS_POINTERS), cps) {
629       cps.CoarsePixelShadingStateArrayPointer = cps_states.offset;
630    }
631 #endif
632 }
633 #endif /* GFX_VER >= 11 */
634 
635 static uint32_t
vk_to_intel_tex_filter(VkFilter filter,bool anisotropyEnable)636 vk_to_intel_tex_filter(VkFilter filter, bool anisotropyEnable)
637 {
638    switch (filter) {
639    default:
640       assert(!"Invalid filter");
641    case VK_FILTER_NEAREST:
642       return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_NEAREST;
643    case VK_FILTER_LINEAR:
644       return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_LINEAR;
645    }
646 }
647 
648 static uint32_t
vk_to_intel_max_anisotropy(float ratio)649 vk_to_intel_max_anisotropy(float ratio)
650 {
651    return (anv_clamp_f(ratio, 2, 16) - 2) / 2;
652 }
653 
654 static const uint32_t vk_to_intel_mipmap_mode[] = {
655    [VK_SAMPLER_MIPMAP_MODE_NEAREST]          = MIPFILTER_NEAREST,
656    [VK_SAMPLER_MIPMAP_MODE_LINEAR]           = MIPFILTER_LINEAR
657 };
658 
659 static const uint32_t vk_to_intel_tex_address[] = {
660    [VK_SAMPLER_ADDRESS_MODE_REPEAT]          = TCM_WRAP,
661    [VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = TCM_MIRROR,
662    [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE]   = TCM_CLAMP,
663    [VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
664    [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
665 };
666 
667 /* Vulkan specifies the result of shadow comparisons as:
668  *     1     if   ref <op> texel,
669  *     0     otherwise.
670  *
671  * The hardware does:
672  *     0     if texel <op> ref,
673  *     1     otherwise.
674  *
675  * So, these look a bit strange because there's both a negation
676  * and swapping of the arguments involved.
677  */
678 static const uint32_t vk_to_intel_shadow_compare_op[] = {
679    [VK_COMPARE_OP_NEVER]                        = PREFILTEROP_ALWAYS,
680    [VK_COMPARE_OP_LESS]                         = PREFILTEROP_LEQUAL,
681    [VK_COMPARE_OP_EQUAL]                        = PREFILTEROP_NOTEQUAL,
682    [VK_COMPARE_OP_LESS_OR_EQUAL]                = PREFILTEROP_LESS,
683    [VK_COMPARE_OP_GREATER]                      = PREFILTEROP_GEQUAL,
684    [VK_COMPARE_OP_NOT_EQUAL]                    = PREFILTEROP_EQUAL,
685    [VK_COMPARE_OP_GREATER_OR_EQUAL]             = PREFILTEROP_GREATER,
686    [VK_COMPARE_OP_ALWAYS]                       = PREFILTEROP_NEVER,
687 };
688 
689 #if GFX_VER >= 9
690 static const uint32_t vk_to_intel_sampler_reduction_mode[] = {
691    [VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT] = STD_FILTER,
692    [VK_SAMPLER_REDUCTION_MODE_MIN_EXT]              = MINIMUM,
693    [VK_SAMPLER_REDUCTION_MODE_MAX_EXT]              = MAXIMUM,
694 };
695 #endif
696 
genX(CreateSampler)697 VkResult genX(CreateSampler)(
698     VkDevice                                    _device,
699     const VkSamplerCreateInfo*                  pCreateInfo,
700     const VkAllocationCallbacks*                pAllocator,
701     VkSampler*                                  pSampler)
702 {
703    ANV_FROM_HANDLE(anv_device, device, _device);
704    struct anv_sampler *sampler;
705 
706    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
707 
708    sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),
709                               VK_OBJECT_TYPE_SAMPLER);
710    if (!sampler)
711       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
712 
713    sampler->n_planes = 1;
714 
715    uint32_t border_color_stride = GFX_VERx10 == 75 ? 512 : 64;
716    uint32_t border_color_offset;
717    ASSERTED bool has_custom_color = false;
718    if (pCreateInfo->borderColor <= VK_BORDER_COLOR_INT_OPAQUE_WHITE) {
719       border_color_offset = device->border_colors.offset +
720                             pCreateInfo->borderColor *
721                             border_color_stride;
722    } else {
723       assert(GFX_VER >= 8);
724       sampler->custom_border_color =
725          anv_state_reserved_pool_alloc(&device->custom_border_colors);
726       border_color_offset = sampler->custom_border_color.offset;
727    }
728 
729 #if GFX_VER >= 9
730    unsigned sampler_reduction_mode = STD_FILTER;
731    bool enable_sampler_reduction = false;
732 #endif
733 
734    vk_foreach_struct(ext, pCreateInfo->pNext) {
735       switch (ext->sType) {
736       case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: {
737          VkSamplerYcbcrConversionInfo *pSamplerConversion =
738             (VkSamplerYcbcrConversionInfo *) ext;
739          ANV_FROM_HANDLE(anv_ycbcr_conversion, conversion,
740                          pSamplerConversion->conversion);
741 
742          /* Ignore conversion for non-YUV formats. This fulfills a requirement
743           * for clients that want to utilize same code path for images with
744           * external formats (VK_FORMAT_UNDEFINED) and "regular" RGBA images
745           * where format is known.
746           */
747          if (conversion == NULL || !conversion->format->can_ycbcr)
748             break;
749 
750          sampler->n_planes = conversion->format->n_planes;
751          sampler->conversion = conversion;
752          break;
753       }
754 #if GFX_VER >= 9
755       case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO: {
756          VkSamplerReductionModeCreateInfo *sampler_reduction =
757             (VkSamplerReductionModeCreateInfo *) ext;
758          sampler_reduction_mode =
759             vk_to_intel_sampler_reduction_mode[sampler_reduction->reductionMode];
760          enable_sampler_reduction = true;
761          break;
762       }
763 #endif
764       case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT: {
765          VkSamplerCustomBorderColorCreateInfoEXT *custom_border_color =
766             (VkSamplerCustomBorderColorCreateInfoEXT *) ext;
767          if (sampler->custom_border_color.map == NULL)
768             break;
769          struct gfx8_border_color *cbc = sampler->custom_border_color.map;
770          if (custom_border_color->format == VK_FORMAT_B4G4R4A4_UNORM_PACK16) {
771             /* B4G4R4A4_UNORM_PACK16 is treated as R4G4B4A4_UNORM_PACK16 with
772              * a swizzle, but this does not carry over to the sampler for
773              * border colors, so we need to do the swizzle ourselves here.
774              */
775             cbc->uint32[0] = custom_border_color->customBorderColor.uint32[2];
776             cbc->uint32[1] = custom_border_color->customBorderColor.uint32[1];
777             cbc->uint32[2] = custom_border_color->customBorderColor.uint32[0];
778             cbc->uint32[3] = custom_border_color->customBorderColor.uint32[3];
779          } else {
780             /* Both structs share the same layout, so just copy them over. */
781             memcpy(cbc, &custom_border_color->customBorderColor,
782                    sizeof(VkClearColorValue));
783          }
784          has_custom_color = true;
785          break;
786       }
787       default:
788          anv_debug_ignored_stype(ext->sType);
789          break;
790       }
791    }
792 
793    assert((sampler->custom_border_color.map == NULL) || has_custom_color);
794 
795    if (device->physical->has_bindless_samplers) {
796       /* If we have bindless, allocate enough samplers.  We allocate 32 bytes
797        * for each sampler instead of 16 bytes because we want all bindless
798        * samplers to be 32-byte aligned so we don't have to use indirect
799        * sampler messages on them.
800        */
801       sampler->bindless_state =
802          anv_state_pool_alloc(&device->dynamic_state_pool,
803                               sampler->n_planes * 32, 32);
804    }
805 
806    for (unsigned p = 0; p < sampler->n_planes; p++) {
807       const bool plane_has_chroma =
808          sampler->conversion && sampler->conversion->format->planes[p].has_chroma;
809       const VkFilter min_filter =
810          plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->minFilter;
811       const VkFilter mag_filter =
812          plane_has_chroma ? sampler->conversion->chroma_filter : pCreateInfo->magFilter;
813       const bool enable_min_filter_addr_rounding = min_filter != VK_FILTER_NEAREST;
814       const bool enable_mag_filter_addr_rounding = mag_filter != VK_FILTER_NEAREST;
815       /* From Broadwell PRM, SAMPLER_STATE:
816        *   "Mip Mode Filter must be set to MIPFILTER_NONE for Planar YUV surfaces."
817        */
818       const bool isl_format_is_planar_yuv = sampler->conversion &&
819          isl_format_is_yuv(sampler->conversion->format->planes[0].isl_format) &&
820          isl_format_is_planar(sampler->conversion->format->planes[0].isl_format);
821 
822       const uint32_t mip_filter_mode =
823          isl_format_is_planar_yuv ?
824          MIPFILTER_NONE : vk_to_intel_mipmap_mode[pCreateInfo->mipmapMode];
825 
826       struct GENX(SAMPLER_STATE) sampler_state = {
827          .SamplerDisable = false,
828          .TextureBorderColorMode = DX10OGL,
829 
830 #if GFX_VER >= 11
831          .CPSLODCompensationEnable = true,
832 #endif
833 
834 #if GFX_VER >= 8
835          .LODPreClampMode = CLAMP_MODE_OGL,
836 #else
837          .LODPreClampEnable = CLAMP_ENABLE_OGL,
838 #endif
839 
840 #if GFX_VER == 8
841          .BaseMipLevel = 0.0,
842 #endif
843          .MipModeFilter = mip_filter_mode,
844          .MagModeFilter = vk_to_intel_tex_filter(mag_filter, pCreateInfo->anisotropyEnable),
845          .MinModeFilter = vk_to_intel_tex_filter(min_filter, pCreateInfo->anisotropyEnable),
846          .TextureLODBias = anv_clamp_f(pCreateInfo->mipLodBias, -16, 15.996),
847          .AnisotropicAlgorithm =
848             pCreateInfo->anisotropyEnable ? EWAApproximation : LEGACY,
849          .MinLOD = anv_clamp_f(pCreateInfo->minLod, 0, 14),
850          .MaxLOD = anv_clamp_f(pCreateInfo->maxLod, 0, 14),
851          .ChromaKeyEnable = 0,
852          .ChromaKeyIndex = 0,
853          .ChromaKeyMode = 0,
854          .ShadowFunction =
855             vk_to_intel_shadow_compare_op[pCreateInfo->compareEnable ?
856                                         pCreateInfo->compareOp : VK_COMPARE_OP_NEVER],
857          .CubeSurfaceControlMode = OVERRIDE,
858 
859          .BorderColorPointer = border_color_offset,
860 
861 #if GFX_VER >= 8
862          .LODClampMagnificationMode = MIPNONE,
863 #endif
864 
865          .MaximumAnisotropy = vk_to_intel_max_anisotropy(pCreateInfo->maxAnisotropy),
866          .RAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
867          .RAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
868          .VAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
869          .VAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
870          .UAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
871          .UAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
872          .TrilinearFilterQuality = 0,
873          .NonnormalizedCoordinateEnable = pCreateInfo->unnormalizedCoordinates,
874          .TCXAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeU],
875          .TCYAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeV],
876          .TCZAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeW],
877 
878 #if GFX_VER >= 9
879          .ReductionType = sampler_reduction_mode,
880          .ReductionTypeEnable = enable_sampler_reduction,
881 #endif
882       };
883 
884       GENX(SAMPLER_STATE_pack)(NULL, sampler->state[p], &sampler_state);
885 
886       if (sampler->bindless_state.map) {
887          memcpy(sampler->bindless_state.map + p * 32,
888                 sampler->state[p], GENX(SAMPLER_STATE_length) * 4);
889       }
890    }
891 
892    *pSampler = anv_sampler_to_handle(sampler);
893 
894    return VK_SUCCESS;
895 }
896