• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 
30 #include "anv_private.h"
31 
32 #include "common/intel_aux_map.h"
33 #include "common/intel_sample_positions.h"
34 #include "common/intel_pixel_hash.h"
35 #include "genxml/gen_macros.h"
36 #include "genxml/genX_pack.h"
37 
38 #include "vk_standard_sample_locations.h"
39 #include "vk_util.h"
40 #include "vk_format.h"
41 
42 static VkResult
init_render_queue_state(struct anv_queue * queue)43 init_render_queue_state(struct anv_queue *queue)
44 {
45    struct anv_device *device = queue->device;
46    uint32_t cmds[128];
47    struct anv_batch batch = {
48       .start = cmds,
49       .next = cmds,
50       .end = (void *) cmds + sizeof(cmds),
51    };
52 
53    anv_batch_emit(&batch, GENX(PIPELINE_SELECT), ps) {
54       ps.PipelineSelection = _3D;
55    }
56 
57    anv_batch_emit(&batch, GENX(3DSTATE_AA_LINE_PARAMETERS), aa);
58 
59    anv_batch_emit(&batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
60       rect.ClippedDrawingRectangleYMin = 0;
61       rect.ClippedDrawingRectangleXMin = 0;
62       rect.ClippedDrawingRectangleYMax = UINT16_MAX;
63       rect.ClippedDrawingRectangleXMax = UINT16_MAX;
64       rect.DrawingRectangleOriginY = 0;
65       rect.DrawingRectangleOriginX = 0;
66    }
67 
68 #if GFX_VER >= 8
69    anv_batch_emit(&batch, GENX(3DSTATE_WM_CHROMAKEY), ck);
70 
71    genX(emit_sample_pattern)(&batch, NULL);
72 
73    /* The BDW+ docs describe how to use the 3DSTATE_WM_HZ_OP instruction in the
74     * section titled, "Optimized Depth Buffer Clear and/or Stencil Buffer
75     * Clear." It mentions that the packet overrides GPU state for the clear
76     * operation and needs to be reset to 0s to clear the overrides. Depending
77     * on the kernel, we may not get a context with the state for this packet
78     * zeroed. Do it ourselves just in case. We've observed this to prevent a
79     * number of GPU hangs on ICL.
80     */
81    anv_batch_emit(&batch, GENX(3DSTATE_WM_HZ_OP), hzp);
82 #endif
83 
84    /* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
85     * 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
86     *
87     * This is only safe on kernels with context isolation support.
88     */
89    if (GFX_VER >= 8 && device->physical->info.has_context_isolation) {
90 #if GFX_VER == 8
91       anv_batch_write_reg(&batch, GENX(INSTPM), instpm) {
92          instpm.CONSTANT_BUFFERAddressOffsetDisable = true;
93          instpm.CONSTANT_BUFFERAddressOffsetDisableMask = true;
94       }
95 #endif
96    }
97 
98    anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
99 
100    assert(batch.next <= batch.end);
101 
102    return anv_queue_submit_simple_batch(queue, &batch);
103 }
104 
105 void
genX(init_physical_device_state)106 genX(init_physical_device_state)(ASSERTED struct anv_physical_device *pdevice)
107 {
108    assert(pdevice->info.verx10 == GFX_VERx10);
109 }
110 
111 VkResult
genX(init_device_state)112 genX(init_device_state)(struct anv_device *device)
113 {
114    VkResult res;
115 
116    device->slice_hash = (struct anv_state) { 0 };
117    for (uint32_t i = 0; i < device->queue_count; i++) {
118       struct anv_queue *queue = &device->queues[i];
119       switch (queue->family->engine_class) {
120       case INTEL_ENGINE_CLASS_RENDER:
121          res = init_render_queue_state(queue);
122          break;
123       default:
124          res = vk_error(device, VK_ERROR_INITIALIZATION_FAILED);
125          break;
126       }
127       if (res != VK_SUCCESS)
128          return res;
129    }
130 
131    return res;
132 }
133 
134 void
genX(emit_l3_config)135 genX(emit_l3_config)(struct anv_batch *batch,
136                      const struct anv_device *device,
137                      const struct intel_l3_config *cfg)
138 {
139    UNUSED const struct intel_device_info *devinfo = device->info;
140 
141 #if GFX_VER >= 8
142 
143 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
144 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
145 
146    anv_batch_write_reg(batch, L3_ALLOCATION_REG, l3cr) {
147       if (cfg == NULL) {
148          unreachable("Invalid L3$ config");
149       } else {
150          l3cr.SLMEnable = cfg->n[INTEL_L3P_SLM];
151          assert(cfg->n[INTEL_L3P_IS] == 0);
152          assert(cfg->n[INTEL_L3P_C] == 0);
153          assert(cfg->n[INTEL_L3P_T] == 0);
154          l3cr.URBAllocation = cfg->n[INTEL_L3P_URB];
155          l3cr.ROAllocation = cfg->n[INTEL_L3P_RO];
156          l3cr.DCAllocation = cfg->n[INTEL_L3P_DC];
157          l3cr.AllAllocation = cfg->n[INTEL_L3P_ALL];
158       }
159    }
160 
161 #else /* GFX_VER < 8 */
162 
163    const bool has_dc = cfg->n[INTEL_L3P_DC] || cfg->n[INTEL_L3P_ALL];
164    const bool has_is = cfg->n[INTEL_L3P_IS] || cfg->n[INTEL_L3P_RO] ||
165                        cfg->n[INTEL_L3P_ALL];
166    const bool has_c = cfg->n[INTEL_L3P_C] || cfg->n[INTEL_L3P_RO] ||
167                       cfg->n[INTEL_L3P_ALL];
168    const bool has_t = cfg->n[INTEL_L3P_T] || cfg->n[INTEL_L3P_RO] ||
169                       cfg->n[INTEL_L3P_ALL];
170 
171    assert(!cfg->n[INTEL_L3P_ALL]);
172 
173    /* When enabled SLM only uses a portion of the L3 on half of the banks,
174     * the matching space on the remaining banks has to be allocated to a
175     * client (URB for all validated configurations) set to the
176     * lower-bandwidth 2-bank address hashing mode.
177     */
178    const bool urb_low_bw = cfg->n[INTEL_L3P_SLM] && devinfo->platform != INTEL_PLATFORM_BYT;
179    assert(!urb_low_bw || cfg->n[INTEL_L3P_URB] == cfg->n[INTEL_L3P_SLM]);
180 
181    /* Minimum number of ways that can be allocated to the URB. */
182    const unsigned n0_urb = devinfo->platform == INTEL_PLATFORM_BYT ? 32 : 0;
183    assert(cfg->n[INTEL_L3P_URB] >= n0_urb);
184 
185    anv_batch_write_reg(batch, GENX(L3SQCREG1), l3sqc) {
186       l3sqc.ConvertDC_UC = !has_dc;
187       l3sqc.ConvertIS_UC = !has_is;
188       l3sqc.ConvertC_UC = !has_c;
189       l3sqc.ConvertT_UC = !has_t;
190 #if GFX_VERx10 == 75
191       l3sqc.L3SQGeneralPriorityCreditInitialization = SQGPCI_DEFAULT;
192 #else
193       l3sqc.L3SQGeneralPriorityCreditInitialization =
194          devinfo->platform == INTEL_PLATFORM_BYT ? BYT_SQGPCI_DEFAULT : SQGPCI_DEFAULT;
195 #endif
196       l3sqc.L3SQHighPriorityCreditInitialization = SQHPCI_DEFAULT;
197    }
198 
199    anv_batch_write_reg(batch, GENX(L3CNTLREG2), l3cr2) {
200       l3cr2.SLMEnable = cfg->n[INTEL_L3P_SLM];
201       l3cr2.URBLowBandwidth = urb_low_bw;
202       l3cr2.URBAllocation = cfg->n[INTEL_L3P_URB] - n0_urb;
203 #if !GFX_VERx10 == 75
204       l3cr2.ALLAllocation = cfg->n[INTEL_L3P_ALL];
205 #endif
206       l3cr2.ROAllocation = cfg->n[INTEL_L3P_RO];
207       l3cr2.DCAllocation = cfg->n[INTEL_L3P_DC];
208    }
209 
210    anv_batch_write_reg(batch, GENX(L3CNTLREG3), l3cr3) {
211       l3cr3.ISAllocation = cfg->n[INTEL_L3P_IS];
212       l3cr3.ISLowBandwidth = 0;
213       l3cr3.CAllocation = cfg->n[INTEL_L3P_C];
214       l3cr3.CLowBandwidth = 0;
215       l3cr3.TAllocation = cfg->n[INTEL_L3P_T];
216       l3cr3.TLowBandwidth = 0;
217    }
218 
219 #if GFX_VERx10 == 75
220    if (device->physical->cmd_parser_version >= 4) {
221       /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
222        * them disabled to avoid crashing the system hard.
223        */
224       anv_batch_write_reg(batch, GENX(SCRATCH1), s1) {
225          s1.L3AtomicDisable = !has_dc;
226       }
227       anv_batch_write_reg(batch, GENX(CHICKEN3), c3) {
228          c3.L3AtomicDisableMask = true;
229          c3.L3AtomicDisable = !has_dc;
230       }
231    }
232 #endif /* GFX_VERx10 == 75 */
233 
234 #endif /* GFX_VER < 8 */
235 }
236 
237 void
genX(emit_multisample)238 genX(emit_multisample)(struct anv_batch *batch, uint32_t samples,
239                        const struct vk_sample_locations_state *sl)
240 {
241    if (sl != NULL) {
242       assert(sl->per_pixel == samples);
243       assert(sl->grid_size.width == 1);
244       assert(sl->grid_size.height == 1);
245    } else {
246       sl = vk_standard_sample_locations_state(samples);
247    }
248 
249    anv_batch_emit(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
250       ms.NumberofMultisamples       = __builtin_ffs(samples) - 1;
251 
252       ms.PixelLocation              = CENTER;
253 #if GFX_VER < 8
254       switch (samples) {
255       case 1:
256          INTEL_SAMPLE_POS_1X_ARRAY(ms.Sample, sl->locations);
257          break;
258       case 2:
259          INTEL_SAMPLE_POS_2X_ARRAY(ms.Sample, sl->locations);
260          break;
261       case 4:
262          INTEL_SAMPLE_POS_4X_ARRAY(ms.Sample, sl->locations);
263          break;
264       case 8:
265          INTEL_SAMPLE_POS_8X_ARRAY(ms.Sample, sl->locations);
266          break;
267       default:
268             break;
269       }
270 #endif
271    }
272 }
273 
274 #if GFX_VER >= 8
275 void
genX(emit_sample_pattern)276 genX(emit_sample_pattern)(struct anv_batch *batch,
277                           const struct vk_sample_locations_state *sl)
278 {
279    assert(sl == NULL || sl->grid_size.width == 1);
280    assert(sl == NULL || sl->grid_size.height == 1);
281 
282    /* See the Vulkan 1.0 spec Table 24.1 "Standard sample locations" and
283     * VkPhysicalDeviceFeatures::standardSampleLocations.
284     */
285    anv_batch_emit(batch, GENX(3DSTATE_SAMPLE_PATTERN), sp) {
286       /* The Skylake PRM Vol. 2a "3DSTATE_SAMPLE_PATTERN" says:
287        *
288        *    "When programming the sample offsets (for NUMSAMPLES_4 or _8
289        *    and MSRASTMODE_xxx_PATTERN), the order of the samples 0 to 3
290        *    (or 7 for 8X, or 15 for 16X) must have monotonically increasing
291        *    distance from the pixel center. This is required to get the
292        *    correct centroid computation in the device."
293        *
294        * However, the Vulkan spec seems to require that the the samples occur
295        * in the order provided through the API. The standard sample patterns
296        * have the above property that they have monotonically increasing
297        * distances from the center but client-provided ones do not. As long as
298        * this only affects centroid calculations as the docs say, we should be
299        * ok because OpenGL and Vulkan only require that the centroid be some
300        * lit sample and that it's the same for all samples in a pixel; they
301        * have no requirement that it be the one closest to center.
302        */
303       for (uint32_t i = 1; i <= 8; i *= 2) {
304          switch (i) {
305          case VK_SAMPLE_COUNT_1_BIT:
306             if (sl && sl->per_pixel == i) {
307                INTEL_SAMPLE_POS_1X_ARRAY(sp._1xSample, sl->locations);
308             } else {
309                INTEL_SAMPLE_POS_1X(sp._1xSample);
310             }
311             break;
312          case VK_SAMPLE_COUNT_2_BIT:
313             if (sl && sl->per_pixel == i) {
314                INTEL_SAMPLE_POS_2X_ARRAY(sp._2xSample, sl->locations);
315             } else {
316                INTEL_SAMPLE_POS_2X(sp._2xSample);
317             }
318             break;
319          case VK_SAMPLE_COUNT_4_BIT:
320             if (sl && sl->per_pixel == i) {
321                INTEL_SAMPLE_POS_4X_ARRAY(sp._4xSample, sl->locations);
322             } else {
323                INTEL_SAMPLE_POS_4X(sp._4xSample);
324             }
325             break;
326          case VK_SAMPLE_COUNT_8_BIT:
327             if (sl && sl->per_pixel == i) {
328                INTEL_SAMPLE_POS_8X_ARRAY(sp._8xSample, sl->locations);
329             } else {
330                INTEL_SAMPLE_POS_8X(sp._8xSample);
331             }
332             break;
333          default:
334             unreachable("Invalid sample count");
335          }
336       }
337    }
338 }
339 #endif
340 
341 static uint32_t
vk_to_intel_tex_filter(VkFilter filter,bool anisotropyEnable)342 vk_to_intel_tex_filter(VkFilter filter, bool anisotropyEnable)
343 {
344    switch (filter) {
345    default:
346       unreachable("Invalid filter");
347    case VK_FILTER_NEAREST:
348       return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_NEAREST;
349    case VK_FILTER_LINEAR:
350       return anisotropyEnable ? MAPFILTER_ANISOTROPIC : MAPFILTER_LINEAR;
351    }
352 }
353 
354 static uint32_t
vk_to_intel_max_anisotropy(float ratio)355 vk_to_intel_max_anisotropy(float ratio)
356 {
357    return (CLAMP(ratio, 2, 16) - 2) / 2;
358 }
359 
360 static const uint32_t vk_to_intel_mipmap_mode[] = {
361    [VK_SAMPLER_MIPMAP_MODE_NEAREST]          = MIPFILTER_NEAREST,
362    [VK_SAMPLER_MIPMAP_MODE_LINEAR]           = MIPFILTER_LINEAR
363 };
364 
365 static const uint32_t vk_to_intel_tex_address[] = {
366    [VK_SAMPLER_ADDRESS_MODE_REPEAT]          = TCM_WRAP,
367    [VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = TCM_MIRROR,
368    [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE]   = TCM_CLAMP,
369    [VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE] = TCM_MIRROR_ONCE,
370    [VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER] = TCM_CLAMP_BORDER,
371 };
372 
373 /* Vulkan specifies the result of shadow comparisons as:
374  *     1     if   ref <op> texel,
375  *     0     otherwise.
376  *
377  * The hardware does:
378  *     0     if texel <op> ref,
379  *     1     otherwise.
380  *
381  * So, these look a bit strange because there's both a negation
382  * and swapping of the arguments involved.
383  */
384 static const uint32_t vk_to_intel_shadow_compare_op[] = {
385    [VK_COMPARE_OP_NEVER]                        = PREFILTEROP_ALWAYS,
386    [VK_COMPARE_OP_LESS]                         = PREFILTEROP_LEQUAL,
387    [VK_COMPARE_OP_EQUAL]                        = PREFILTEROP_NOTEQUAL,
388    [VK_COMPARE_OP_LESS_OR_EQUAL]                = PREFILTEROP_LESS,
389    [VK_COMPARE_OP_GREATER]                      = PREFILTEROP_GEQUAL,
390    [VK_COMPARE_OP_NOT_EQUAL]                    = PREFILTEROP_EQUAL,
391    [VK_COMPARE_OP_GREATER_OR_EQUAL]             = PREFILTEROP_GREATER,
392    [VK_COMPARE_OP_ALWAYS]                       = PREFILTEROP_NEVER,
393 };
394 
genX(CreateSampler)395 VkResult genX(CreateSampler)(
396     VkDevice                                    _device,
397     const VkSamplerCreateInfo*                  pCreateInfo,
398     const VkAllocationCallbacks*                pAllocator,
399     VkSampler*                                  pSampler)
400 {
401    ANV_FROM_HANDLE(anv_device, device, _device);
402    struct anv_sampler *sampler;
403 
404    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
405 
406    sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),
407                               VK_OBJECT_TYPE_SAMPLER);
408    if (!sampler)
409       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
410 
411    sampler->n_planes = 1;
412 
413    uint32_t border_color_stride = GFX_VERx10 == 75 ? 512 : 64;
414    uint32_t border_color_offset;
415    ASSERTED bool has_custom_color = false;
416    if (pCreateInfo->borderColor <= VK_BORDER_COLOR_INT_OPAQUE_WHITE) {
417       border_color_offset = device->border_colors.offset +
418                             pCreateInfo->borderColor *
419                             border_color_stride;
420    } else {
421       assert(GFX_VER >= 8);
422       sampler->custom_border_color =
423          anv_state_reserved_pool_alloc(&device->custom_border_colors);
424       border_color_offset = sampler->custom_border_color.offset;
425    }
426 
427    const struct vk_format_ycbcr_info *ycbcr_info = NULL;
428    vk_foreach_struct_const(ext, pCreateInfo->pNext) {
429       switch (ext->sType) {
430       case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: {
431          VkSamplerYcbcrConversionInfo *pSamplerConversion =
432             (VkSamplerYcbcrConversionInfo *) ext;
433          VK_FROM_HANDLE(vk_ycbcr_conversion, conversion,
434                         pSamplerConversion->conversion);
435 
436          /* Ignore conversion for non-YUV formats. This fulfills a requirement
437           * for clients that want to utilize same code path for images with
438           * external formats (VK_FORMAT_UNDEFINED) and "regular" RGBA images
439           * where format is known.
440           */
441          if (conversion == NULL)
442             break;
443 
444          ycbcr_info = vk_format_get_ycbcr_info(conversion->state.format);
445          if (ycbcr_info == NULL)
446             break;
447 
448          sampler->n_planes = ycbcr_info->n_planes;
449          sampler->conversion = conversion;
450          break;
451       }
452       case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT: {
453          VkSamplerCustomBorderColorCreateInfoEXT *custom_border_color =
454             (VkSamplerCustomBorderColorCreateInfoEXT *) ext;
455          if (sampler->custom_border_color.map == NULL)
456             break;
457 
458          union isl_color_value color = { .u32 = {
459             custom_border_color->customBorderColor.uint32[0],
460             custom_border_color->customBorderColor.uint32[1],
461             custom_border_color->customBorderColor.uint32[2],
462             custom_border_color->customBorderColor.uint32[3],
463          } };
464 
465          const struct anv_format *format_desc =
466             custom_border_color->format != VK_FORMAT_UNDEFINED ?
467             anv_get_format(custom_border_color->format) : NULL;
468 
469          /* For formats with a swizzle, it does not carry over to the sampler
470           * for border colors, so we need to do the swizzle ourselves here.
471           */
472          if (format_desc && format_desc->n_planes == 1 &&
473              !isl_swizzle_is_identity(format_desc->planes[0].swizzle)) {
474             const struct anv_format_plane *fmt_plane = &format_desc->planes[0];
475 
476             assert(!isl_format_has_int_channel(fmt_plane->isl_format));
477             color = isl_color_value_swizzle(color, fmt_plane->swizzle, true);
478          }
479 
480          memcpy(sampler->custom_border_color.map, color.u32, sizeof(color));
481          has_custom_color = true;
482          break;
483       }
484       case VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT:
485          break;
486       default:
487          anv_debug_ignored_stype(ext->sType);
488          break;
489       }
490    }
491 
492    assert((sampler->custom_border_color.map == NULL) || has_custom_color);
493 
494    if (device->physical->has_bindless_samplers) {
495       /* If we have bindless, allocate enough samplers.  We allocate 32 bytes
496        * for each sampler instead of 16 bytes because we want all bindless
497        * samplers to be 32-byte aligned so we don't have to use indirect
498        * sampler messages on them.
499        */
500       sampler->bindless_state =
501          anv_state_pool_alloc(&device->dynamic_state_pool,
502                               sampler->n_planes * 32, 32);
503    }
504 
505    const bool seamless_cube =
506       !(pCreateInfo->flags & VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT);
507 
508    for (unsigned p = 0; p < sampler->n_planes; p++) {
509       const bool plane_has_chroma =
510          ycbcr_info && ycbcr_info->planes[p].has_chroma;
511       const VkFilter min_filter =
512          plane_has_chroma ? sampler->conversion->state.chroma_filter : pCreateInfo->minFilter;
513       const VkFilter mag_filter =
514          plane_has_chroma ? sampler->conversion->state.chroma_filter : pCreateInfo->magFilter;
515       const bool enable_min_filter_addr_rounding = min_filter != VK_FILTER_NEAREST;
516       const bool enable_mag_filter_addr_rounding = mag_filter != VK_FILTER_NEAREST;
517       /* From Broadwell PRM, SAMPLER_STATE:
518        *   "Mip Mode Filter must be set to MIPFILTER_NONE for Planar YUV surfaces."
519        */
520       enum isl_format plane0_isl_format = sampler->conversion ?
521          anv_get_format(sampler->conversion->state.format)->planes[0].isl_format :
522          ISL_FORMAT_UNSUPPORTED;
523       const bool isl_format_is_planar_yuv =
524          plane0_isl_format != ISL_FORMAT_UNSUPPORTED &&
525          isl_format_is_yuv(plane0_isl_format) &&
526          isl_format_is_planar(plane0_isl_format);
527 
528       const uint32_t mip_filter_mode =
529          isl_format_is_planar_yuv ?
530          MIPFILTER_NONE : vk_to_intel_mipmap_mode[pCreateInfo->mipmapMode];
531 
532       struct GENX(SAMPLER_STATE) sampler_state = {
533          .SamplerDisable = false,
534          .TextureBorderColorMode = DX10OGL,
535 
536 #if GFX_VER >= 8
537          .LODPreClampMode = CLAMP_MODE_OGL,
538 #else
539          .LODPreClampEnable = CLAMP_ENABLE_OGL,
540 #endif
541 
542 #if GFX_VER == 8
543          .BaseMipLevel = 0.0,
544 #endif
545          .MipModeFilter = mip_filter_mode,
546          .MagModeFilter = vk_to_intel_tex_filter(mag_filter, pCreateInfo->anisotropyEnable),
547          .MinModeFilter = vk_to_intel_tex_filter(min_filter, pCreateInfo->anisotropyEnable),
548          .TextureLODBias = CLAMP(pCreateInfo->mipLodBias, -16, 15.996),
549          .AnisotropicAlgorithm =
550             pCreateInfo->anisotropyEnable ? EWAApproximation : LEGACY,
551          .MinLOD = CLAMP(pCreateInfo->minLod, 0, 14),
552          .MaxLOD = CLAMP(pCreateInfo->maxLod, 0, 14),
553          .ChromaKeyEnable = 0,
554          .ChromaKeyIndex = 0,
555          .ChromaKeyMode = 0,
556          .ShadowFunction =
557             vk_to_intel_shadow_compare_op[pCreateInfo->compareEnable ?
558                                         pCreateInfo->compareOp : VK_COMPARE_OP_NEVER],
559          .CubeSurfaceControlMode = seamless_cube ? OVERRIDE : PROGRAMMED,
560 
561          .BorderColorPointer = border_color_offset,
562 
563 #if GFX_VER >= 8
564          .LODClampMagnificationMode = MIPNONE,
565 #endif
566 
567          .MaximumAnisotropy = vk_to_intel_max_anisotropy(pCreateInfo->maxAnisotropy),
568          .RAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
569          .RAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
570          .VAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
571          .VAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
572          .UAddressMinFilterRoundingEnable = enable_min_filter_addr_rounding,
573          .UAddressMagFilterRoundingEnable = enable_mag_filter_addr_rounding,
574          .TrilinearFilterQuality = 0,
575          .NonnormalizedCoordinateEnable = pCreateInfo->unnormalizedCoordinates,
576          .TCXAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeU],
577          .TCYAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeV],
578          .TCZAddressControlMode = vk_to_intel_tex_address[pCreateInfo->addressModeW],
579       };
580 
581       GENX(SAMPLER_STATE_pack)(NULL, sampler->state[p], &sampler_state);
582 
583       if (sampler->bindless_state.map) {
584          memcpy(sampler->bindless_state.map + p * 32,
585                 sampler->state[p], GENX(SAMPLER_STATE_length) * 4);
586       }
587    }
588 
589    *pSampler = anv_sampler_to_handle(sampler);
590 
591    return VK_SUCCESS;
592 }
593