• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "SamplerCore.hpp"
16 
17 #include "Constants.hpp"
18 #include "PixelRoutine.hpp"
19 #include "System/Debug.hpp"
20 #include "Vulkan/VkSampler.hpp"
21 
22 namespace sw {
23 
SamplerCore(Pointer<Byte> & constants,const Sampler & state,SamplerFunction function)24 SamplerCore::SamplerCore(Pointer<Byte> &constants, const Sampler &state, SamplerFunction function)
25     : constants(constants)
26     , state(state)
27     , function(function)
28 {
29 }
sampleTexture(Pointer<Byte> & texture,Float4 uvwa[4],Float4 & dRef,Float && lodOrBias,Float4 & dsx,Float4 & dsy,Vector4i & offset,Int4 & sample)30 Vector4f SamplerCore::sampleTexture(Pointer<Byte> &texture, Float4 uvwa[4], Float4 &dRef, Float &&lodOrBias, Float4 &dsx, Float4 &dsy, Vector4i &offset, Int4 &sample)
31 {
32 	Vector4f c;
33 
34 	Float4 u = uvwa[0];
35 	Float4 v = uvwa[1];
36 	Float4 w = uvwa[2];
37 	Float4 a;  // Array layer coordinate
38 	switch(state.textureType)
39 	{
40 	case VK_IMAGE_VIEW_TYPE_1D_ARRAY: a = uvwa[1]; break;
41 	case VK_IMAGE_VIEW_TYPE_2D_ARRAY: a = uvwa[2]; break;
42 	case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: a = uvwa[3]; break;
43 	default: break;
44 	}
45 
46 	Float lod;
47 	Float anisotropy;
48 	Float4 uDelta;
49 	Float4 vDelta;
50 	Float4 M;  // Major axis
51 
52 	if(state.isCube())
53 	{
54 		Int4 face = cubeFace(u, v, uvwa[0], uvwa[1], uvwa[2], M);
55 		w = As<Float4>(face);
56 	}
57 
58 	// Determine if we can skip the LOD computation. This is the case when the mipmap has only one level, except for LOD query,
59 	// where we have to return the computed value. Anisotropic filtering requires computing the anisotropy factor even for a single mipmap level.
60 	bool singleMipLevel = (state.minLod == state.maxLod);
61 	bool requiresLodComputation = (function == Query) || (state.textureFilter == FILTER_ANISOTROPIC);
62 	bool skipLodComputation = singleMipLevel && !requiresLodComputation;
63 
64 	if(skipLodComputation)
65 	{
66 		lod = state.minLod;
67 	}
68 	else if(function == Implicit || function == Bias || function == Grad || function == Query)
69 	{
70 		if(state.is1D())
71 		{
72 			computeLod1D(texture, lod, u, dsx, dsy);
73 		}
74 		else if(state.is2D())
75 		{
76 			computeLod2D(texture, lod, anisotropy, uDelta, vDelta, u, v, dsx, dsy);
77 		}
78 		else if(state.isCube())
79 		{
80 			computeLodCube(texture, lod, uvwa[0], uvwa[1], uvwa[2], dsx, dsy, M);
81 		}
82 		else
83 		{
84 			computeLod3D(texture, lod, u, v, w, dsx, dsy);
85 		}
86 
87 		Float bias = state.mipLodBias;
88 
89 		if(function == Bias)
90 		{
91 			// Add SPIR-V Bias operand to the sampler provided bias and clamp to maxSamplerLodBias limit.
92 			bias = Min(Max(bias + lodOrBias, -vk::MAX_SAMPLER_LOD_BIAS), vk::MAX_SAMPLER_LOD_BIAS);
93 		}
94 
95 		lod += bias;
96 	}
97 	else if(function == Lod)
98 	{
99 		// Vulkan 1.1: "The absolute value of mipLodBias must be less than or equal to VkPhysicalDeviceLimits::maxSamplerLodBias"
100 		// Hence no explicit clamping to maxSamplerLodBias is required in this case.
101 		lod = lodOrBias + state.mipLodBias;
102 	}
103 	else if(function == Fetch)
104 	{
105 		// TODO: Eliminate int-float-int conversion.
106 		lod = Float(As<Int>(lodOrBias));
107 	}
108 	else if(function == Base || function == Gather)
109 	{
110 		lod = Float(0);
111 	}
112 	else
113 		UNREACHABLE("Sampler function %d", int(function));
114 
115 	if(function != Base && function != Fetch && function != Gather)
116 	{
117 		if(function == Query)
118 		{
119 			c.y = Float4(lod);  // Unclamped LOD.
120 		}
121 
122 		if(!skipLodComputation)
123 		{
124 			lod = Max(lod, state.minLod);
125 			lod = Min(lod, state.maxLod);
126 		}
127 
128 		if(function == Query)
129 		{
130 			if(state.mipmapFilter == MIPMAP_POINT)
131 			{
132 				lod = Round(lod);  // TODO: Preferred formula is ceil(lod + 0.5) - 1
133 			}
134 
135 			c.x = lod;
136 			//	c.y contains unclamped LOD.
137 
138 			return c;
139 		}
140 	}
141 
142 	bool force32BitFiltering = state.highPrecisionFiltering && !isYcbcrFormat() && (state.textureFilter != FILTER_POINT);
143 	bool use32BitFiltering = hasFloatTexture() || hasUnnormalizedIntegerTexture() || force32BitFiltering ||
144 	                         state.isCube() || state.unnormalizedCoordinates || state.compareEnable ||
145 	                         borderModeActive() || (function == Gather) || (function == Fetch);
146 	int numComponents = (function == Gather) ? 4 : textureComponentCount();
147 
148 	if(use32BitFiltering)
149 	{
150 		c = sampleFloatFilter(texture, u, v, w, a, dRef, offset, sample, lod, anisotropy, uDelta, vDelta);
151 	}
152 	else  // 16-bit filtering.
153 	{
154 		Vector4s cs = sampleFilter(texture, u, v, w, a, offset, sample, lod, anisotropy, uDelta, vDelta);
155 
156 		for(int component = 0; component < numComponents; component++)
157 		{
158 			if(hasUnsignedTextureComponent(component))
159 			{
160 				c[component] = Float4(As<UShort4>(cs[component]));
161 			}
162 			else
163 			{
164 				c[component] = Float4(cs[component]);
165 			}
166 		}
167 	}
168 
169 	if(hasNormalizedFormat() && !state.compareEnable)
170 	{
171 		sw::float4 scale = getComponentScale();
172 
173 		for(int component = 0; component < numComponents; component++)
174 		{
175 			int texelComponent = (function == Gather) ? getGatherComponent() : component;
176 			c[component] *= Float4(1.0f / scale[texelComponent]);
177 		}
178 	}
179 
180 	if(state.textureFormat.isSignedNormalized())
181 	{
182 		for(int component = 0; component < numComponents; component++)
183 		{
184 			c[component] = Max(c[component], Float4(-1.0f));
185 		}
186 	}
187 
188 	if(state.textureFilter != FILTER_GATHER)
189 	{
190 		if((state.swizzle.r != VK_COMPONENT_SWIZZLE_R) ||
191 		   (state.swizzle.g != VK_COMPONENT_SWIZZLE_G) ||
192 		   (state.swizzle.b != VK_COMPONENT_SWIZZLE_B) ||
193 		   (state.swizzle.a != VK_COMPONENT_SWIZZLE_A))
194 		{
195 			const Vector4f col = c;
196 			bool integer = hasUnnormalizedIntegerTexture();
197 			c.x = applySwizzle(col, state.swizzle.r, integer);
198 			c.y = applySwizzle(col, state.swizzle.g, integer);
199 			c.z = applySwizzle(col, state.swizzle.b, integer);
200 			c.w = applySwizzle(col, state.swizzle.a, integer);
201 		}
202 	}
203 	else  // Gather
204 	{
205 		VkComponentSwizzle swizzle = gatherSwizzle();
206 
207 		// R/G/B/A swizzles affect the component collected from each texel earlier.
208 		// Handle the ZERO and ONE cases here because we don't need to know the format.
209 
210 		if(swizzle == VK_COMPONENT_SWIZZLE_ZERO)
211 		{
212 			c.x = c.y = c.z = c.w = Float4(0);
213 		}
214 		else if(swizzle == VK_COMPONENT_SWIZZLE_ONE)
215 		{
216 			bool integer = hasUnnormalizedIntegerTexture();
217 			c.x = c.y = c.z = c.w = integer ? As<Float4>(Int4(1)) : RValue<Float4>(Float4(1.0f));
218 		}
219 	}
220 
221 	return c;
222 }
223 
applySwizzle(const Vector4f & c,VkComponentSwizzle swizzle,bool integer)224 Float4 SamplerCore::applySwizzle(const Vector4f &c, VkComponentSwizzle swizzle, bool integer)
225 {
226 	switch(swizzle)
227 	{
228 	default: UNSUPPORTED("VkComponentSwizzle %d", (int)swizzle);
229 	case VK_COMPONENT_SWIZZLE_R: return c.x;
230 	case VK_COMPONENT_SWIZZLE_G: return c.y;
231 	case VK_COMPONENT_SWIZZLE_B: return c.z;
232 	case VK_COMPONENT_SWIZZLE_A: return c.w;
233 	case VK_COMPONENT_SWIZZLE_ZERO: return Float4(0.0f, 0.0f, 0.0f, 0.0f);
234 	case VK_COMPONENT_SWIZZLE_ONE:
235 		if(integer)
236 		{
237 			return Float4(As<Float4>(sw::Int4(1, 1, 1, 1)));
238 		}
239 		else
240 		{
241 			return Float4(1.0f, 1.0f, 1.0f, 1.0f);
242 		}
243 		break;
244 	}
245 };
246 
offsetSample(Short4 & uvw,Pointer<Byte> & mipmap,int halfOffset,bool wrap,int count,Float & lod)247 Short4 SamplerCore::offsetSample(Short4 &uvw, Pointer<Byte> &mipmap, int halfOffset, bool wrap, int count, Float &lod)
248 {
249 	Short4 offset = *Pointer<Short4>(mipmap + halfOffset);
250 
251 	if(state.textureFilter == FILTER_MIN_LINEAR_MAG_POINT)
252 	{
253 		offset &= Short4(CmpNLE(Float4(lod), Float4(0.0f)));
254 	}
255 	else if(state.textureFilter == FILTER_MIN_POINT_MAG_LINEAR)
256 	{
257 		offset &= Short4(CmpLE(Float4(lod), Float4(0.0f)));
258 	}
259 
260 	if(wrap)
261 	{
262 		switch(count)
263 		{
264 		case -1: return uvw - offset;
265 		case 0: return uvw;
266 		case +1: return uvw + offset;
267 		case 2: return uvw + offset + offset;
268 		}
269 	}
270 	else  // Clamp or mirror
271 	{
272 		switch(count)
273 		{
274 		case -1: return SubSat(As<UShort4>(uvw), As<UShort4>(offset));
275 		case 0: return uvw;
276 		case +1: return AddSat(As<UShort4>(uvw), As<UShort4>(offset));
277 		case 2: return AddSat(AddSat(As<UShort4>(uvw), As<UShort4>(offset)), As<UShort4>(offset));
278 		}
279 	}
280 
281 	return uvw;
282 }
283 
sampleFilter(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Vector4i & offset,const Int4 & sample,Float & lod,Float & anisotropy,Float4 & uDelta,Float4 & vDelta)284 Vector4s SamplerCore::sampleFilter(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Vector4i &offset, const Int4 &sample, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta)
285 {
286 	Vector4s c = sampleAniso(texture, u, v, w, a, offset, sample, lod, anisotropy, uDelta, vDelta, false);
287 
288 	if(function == Fetch)
289 	{
290 		return c;
291 	}
292 
293 	if(state.mipmapFilter == MIPMAP_LINEAR)
294 	{
295 		Vector4s cc = sampleAniso(texture, u, v, w, a, offset, sample, lod, anisotropy, uDelta, vDelta, true);
296 
297 		lod *= Float(1 << 16);
298 
299 		UShort4 utri = UShort4(Float4(lod));  // TODO: Optimize
300 		Short4 stri = utri >> 1;              // TODO: Optimize
301 
302 		if(hasUnsignedTextureComponent(0))
303 			cc.x = MulHigh(As<UShort4>(cc.x), utri);
304 		else
305 			cc.x = MulHigh(cc.x, stri);
306 		if(hasUnsignedTextureComponent(1))
307 			cc.y = MulHigh(As<UShort4>(cc.y), utri);
308 		else
309 			cc.y = MulHigh(cc.y, stri);
310 		if(hasUnsignedTextureComponent(2))
311 			cc.z = MulHigh(As<UShort4>(cc.z), utri);
312 		else
313 			cc.z = MulHigh(cc.z, stri);
314 		if(hasUnsignedTextureComponent(3))
315 			cc.w = MulHigh(As<UShort4>(cc.w), utri);
316 		else
317 			cc.w = MulHigh(cc.w, stri);
318 
319 		utri = ~utri;
320 		stri = Short4(0x7FFF) - stri;
321 
322 		if(hasUnsignedTextureComponent(0))
323 			c.x = MulHigh(As<UShort4>(c.x), utri);
324 		else
325 			c.x = MulHigh(c.x, stri);
326 		if(hasUnsignedTextureComponent(1))
327 			c.y = MulHigh(As<UShort4>(c.y), utri);
328 		else
329 			c.y = MulHigh(c.y, stri);
330 		if(hasUnsignedTextureComponent(2))
331 			c.z = MulHigh(As<UShort4>(c.z), utri);
332 		else
333 			c.z = MulHigh(c.z, stri);
334 		if(hasUnsignedTextureComponent(3))
335 			c.w = MulHigh(As<UShort4>(c.w), utri);
336 		else
337 			c.w = MulHigh(c.w, stri);
338 
339 		c.x += cc.x;
340 		c.y += cc.y;
341 		c.z += cc.z;
342 		c.w += cc.w;
343 
344 		if(!hasUnsignedTextureComponent(0)) c.x += c.x;
345 		if(!hasUnsignedTextureComponent(1)) c.y += c.y;
346 		if(!hasUnsignedTextureComponent(2)) c.z += c.z;
347 		if(!hasUnsignedTextureComponent(3)) c.w += c.w;
348 	}
349 
350 	return c;
351 }
352 
sampleAniso(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Vector4i & offset,const Int4 & sample,Float & lod,Float & anisotropy,Float4 & uDelta,Float4 & vDelta,bool secondLOD)353 Vector4s SamplerCore::sampleAniso(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Vector4i &offset, const Int4 &sample, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, bool secondLOD)
354 {
355 	Vector4s c;
356 
357 	if(state.textureFilter != FILTER_ANISOTROPIC)
358 	{
359 		c = sampleQuad(texture, u, v, w, a, offset, sample, lod, secondLOD);
360 	}
361 	else
362 	{
363 		Int N = RoundInt(anisotropy);
364 
365 		Vector4s cSum;
366 
367 		cSum.x = Short4(0);
368 		cSum.y = Short4(0);
369 		cSum.z = Short4(0);
370 		cSum.w = Short4(0);
371 
372 		Float4 A = *Pointer<Float4>(constants + OFFSET(Constants, uvWeight) + 16 * N);
373 		Float4 B = *Pointer<Float4>(constants + OFFSET(Constants, uvStart) + 16 * N);
374 		UShort4 cw = *Pointer<UShort4>(constants + OFFSET(Constants, cWeight) + 8 * N);
375 		Short4 sw = Short4(cw >> 1);
376 
377 		Float4 du = uDelta;
378 		Float4 dv = vDelta;
379 
380 		Float4 u0 = u + B * du;
381 		Float4 v0 = v + B * dv;
382 
383 		du *= A;
384 		dv *= A;
385 
386 		Int i = 0;
387 
388 		Do
389 		{
390 			c = sampleQuad(texture, u0, v0, w, a, offset, sample, lod, secondLOD);
391 
392 			u0 += du;
393 			v0 += dv;
394 
395 			if(hasUnsignedTextureComponent(0))
396 				cSum.x += As<Short4>(MulHigh(As<UShort4>(c.x), cw));
397 			else
398 				cSum.x += MulHigh(c.x, sw);
399 			if(hasUnsignedTextureComponent(1))
400 				cSum.y += As<Short4>(MulHigh(As<UShort4>(c.y), cw));
401 			else
402 				cSum.y += MulHigh(c.y, sw);
403 			if(hasUnsignedTextureComponent(2))
404 				cSum.z += As<Short4>(MulHigh(As<UShort4>(c.z), cw));
405 			else
406 				cSum.z += MulHigh(c.z, sw);
407 			if(hasUnsignedTextureComponent(3))
408 				cSum.w += As<Short4>(MulHigh(As<UShort4>(c.w), cw));
409 			else
410 				cSum.w += MulHigh(c.w, sw);
411 
412 			i++;
413 		}
414 		Until(i >= N);
415 
416 		if(hasUnsignedTextureComponent(0))
417 			c.x = cSum.x;
418 		else
419 			c.x = AddSat(cSum.x, cSum.x);
420 		if(hasUnsignedTextureComponent(1))
421 			c.y = cSum.y;
422 		else
423 			c.y = AddSat(cSum.y, cSum.y);
424 		if(hasUnsignedTextureComponent(2))
425 			c.z = cSum.z;
426 		else
427 			c.z = AddSat(cSum.z, cSum.z);
428 		if(hasUnsignedTextureComponent(3))
429 			c.w = cSum.w;
430 		else
431 			c.w = AddSat(cSum.w, cSum.w);
432 	}
433 
434 	return c;
435 }
436 
sampleQuad(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Vector4i & offset,const Int4 & sample,Float & lod,bool secondLOD)437 Vector4s SamplerCore::sampleQuad(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Vector4i &offset, const Int4 &sample, Float &lod, bool secondLOD)
438 {
439 	if(state.textureType != VK_IMAGE_VIEW_TYPE_3D)
440 	{
441 		return sampleQuad2D(texture, u, v, w, a, offset, sample, lod, secondLOD);
442 	}
443 	else
444 	{
445 		return sample3D(texture, u, v, w, offset, sample, lod, secondLOD);
446 	}
447 }
448 
sampleQuad2D(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Vector4i & offset,const Int4 & sample,Float & lod,bool secondLOD)449 Vector4s SamplerCore::sampleQuad2D(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Vector4i &offset, const Int4 &sample, Float &lod, bool secondLOD)
450 {
451 	Vector4s c;
452 
453 	int componentCount = textureComponentCount();
454 	bool gather = (state.textureFilter == FILTER_GATHER);
455 
456 	Pointer<Byte> mipmap = selectMipmap(texture, lod, secondLOD);
457 	Pointer<Byte> buffer = *Pointer<Pointer<Byte>>(mipmap + OFFSET(Mipmap, buffer));
458 
459 	Short4 uuuu = address(u, state.addressingModeU, mipmap);
460 	Short4 vvvv = address(v, state.addressingModeV, mipmap);
461 	Short4 wwww = address(w, state.addressingModeW, mipmap);
462 	Short4 layerIndex = computeLayerIndex16(a, mipmap);
463 
464 	if(state.textureFilter == FILTER_POINT)
465 	{
466 		c = sampleTexel(uuuu, vvvv, wwww, layerIndex, offset, sample, mipmap, buffer);
467 	}
468 	else
469 	{
470 		Short4 uuuu0 = offsetSample(uuuu, mipmap, OFFSET(Mipmap, uHalf), state.addressingModeU == ADDRESSING_WRAP, -1, lod);
471 		Short4 vvvv0 = offsetSample(vvvv, mipmap, OFFSET(Mipmap, vHalf), state.addressingModeV == ADDRESSING_WRAP, -1, lod);
472 		Short4 uuuu1 = offsetSample(uuuu, mipmap, OFFSET(Mipmap, uHalf), state.addressingModeU == ADDRESSING_WRAP, +1, lod);
473 		Short4 vvvv1 = offsetSample(vvvv, mipmap, OFFSET(Mipmap, vHalf), state.addressingModeV == ADDRESSING_WRAP, +1, lod);
474 
475 		Vector4s c00 = sampleTexel(uuuu0, vvvv0, wwww, layerIndex, offset, sample, mipmap, buffer);
476 		Vector4s c10 = sampleTexel(uuuu1, vvvv0, wwww, layerIndex, offset, sample, mipmap, buffer);
477 		Vector4s c01 = sampleTexel(uuuu0, vvvv1, wwww, layerIndex, offset, sample, mipmap, buffer);
478 		Vector4s c11 = sampleTexel(uuuu1, vvvv1, wwww, layerIndex, offset, sample, mipmap, buffer);
479 
480 		if(!gather)  // Blend
481 		{
482 			// Fractions
483 			UShort4 f0u = As<UShort4>(uuuu0) * UShort4(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, width)));
484 			UShort4 f0v = As<UShort4>(vvvv0) * UShort4(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, height)));
485 
486 			UShort4 f1u = ~f0u;
487 			UShort4 f1v = ~f0v;
488 
489 			UShort4 f0u0v = MulHigh(f0u, f0v);
490 			UShort4 f1u0v = MulHigh(f1u, f0v);
491 			UShort4 f0u1v = MulHigh(f0u, f1v);
492 			UShort4 f1u1v = MulHigh(f1u, f1v);
493 
494 			// Signed fractions
495 			Short4 f1u1vs;
496 			Short4 f0u1vs;
497 			Short4 f1u0vs;
498 			Short4 f0u0vs;
499 
500 			if(!hasUnsignedTextureComponent(0) || !hasUnsignedTextureComponent(1) || !hasUnsignedTextureComponent(2) || !hasUnsignedTextureComponent(3))
501 			{
502 				f1u1vs = f1u1v >> 1;
503 				f0u1vs = f0u1v >> 1;
504 				f1u0vs = f1u0v >> 1;
505 				f0u0vs = f0u0v >> 1;
506 			}
507 
508 			// Bilinear interpolation
509 			if(componentCount >= 1)
510 			{
511 				if(has16bitTextureComponents() && hasUnsignedTextureComponent(0))
512 				{
513 					c00.x = As<UShort4>(c00.x) - MulHigh(As<UShort4>(c00.x), f0u) + MulHigh(As<UShort4>(c10.x), f0u);
514 					c01.x = As<UShort4>(c01.x) - MulHigh(As<UShort4>(c01.x), f0u) + MulHigh(As<UShort4>(c11.x), f0u);
515 					c.x = As<UShort4>(c00.x) - MulHigh(As<UShort4>(c00.x), f0v) + MulHigh(As<UShort4>(c01.x), f0v);
516 				}
517 				else
518 				{
519 					if(hasUnsignedTextureComponent(0))
520 					{
521 						c00.x = MulHigh(As<UShort4>(c00.x), f1u1v);
522 						c10.x = MulHigh(As<UShort4>(c10.x), f0u1v);
523 						c01.x = MulHigh(As<UShort4>(c01.x), f1u0v);
524 						c11.x = MulHigh(As<UShort4>(c11.x), f0u0v);
525 					}
526 					else
527 					{
528 						c00.x = MulHigh(c00.x, f1u1vs);
529 						c10.x = MulHigh(c10.x, f0u1vs);
530 						c01.x = MulHigh(c01.x, f1u0vs);
531 						c11.x = MulHigh(c11.x, f0u0vs);
532 					}
533 
534 					c.x = (c00.x + c10.x) + (c01.x + c11.x);
535 					if(!hasUnsignedTextureComponent(0)) c.x = AddSat(c.x, c.x);  // Correct for signed fractions
536 				}
537 			}
538 
539 			if(componentCount >= 2)
540 			{
541 				if(has16bitTextureComponents() && hasUnsignedTextureComponent(1))
542 				{
543 					c00.y = As<UShort4>(c00.y) - MulHigh(As<UShort4>(c00.y), f0u) + MulHigh(As<UShort4>(c10.y), f0u);
544 					c01.y = As<UShort4>(c01.y) - MulHigh(As<UShort4>(c01.y), f0u) + MulHigh(As<UShort4>(c11.y), f0u);
545 					c.y = As<UShort4>(c00.y) - MulHigh(As<UShort4>(c00.y), f0v) + MulHigh(As<UShort4>(c01.y), f0v);
546 				}
547 				else
548 				{
549 					if(hasUnsignedTextureComponent(1))
550 					{
551 						c00.y = MulHigh(As<UShort4>(c00.y), f1u1v);
552 						c10.y = MulHigh(As<UShort4>(c10.y), f0u1v);
553 						c01.y = MulHigh(As<UShort4>(c01.y), f1u0v);
554 						c11.y = MulHigh(As<UShort4>(c11.y), f0u0v);
555 					}
556 					else
557 					{
558 						c00.y = MulHigh(c00.y, f1u1vs);
559 						c10.y = MulHigh(c10.y, f0u1vs);
560 						c01.y = MulHigh(c01.y, f1u0vs);
561 						c11.y = MulHigh(c11.y, f0u0vs);
562 					}
563 
564 					c.y = (c00.y + c10.y) + (c01.y + c11.y);
565 					if(!hasUnsignedTextureComponent(1)) c.y = AddSat(c.y, c.y);  // Correct for signed fractions
566 				}
567 			}
568 
569 			if(componentCount >= 3)
570 			{
571 				if(has16bitTextureComponents() && hasUnsignedTextureComponent(2))
572 				{
573 					c00.z = As<UShort4>(c00.z) - MulHigh(As<UShort4>(c00.z), f0u) + MulHigh(As<UShort4>(c10.z), f0u);
574 					c01.z = As<UShort4>(c01.z) - MulHigh(As<UShort4>(c01.z), f0u) + MulHigh(As<UShort4>(c11.z), f0u);
575 					c.z = As<UShort4>(c00.z) - MulHigh(As<UShort4>(c00.z), f0v) + MulHigh(As<UShort4>(c01.z), f0v);
576 				}
577 				else
578 				{
579 					if(hasUnsignedTextureComponent(2))
580 					{
581 						c00.z = MulHigh(As<UShort4>(c00.z), f1u1v);
582 						c10.z = MulHigh(As<UShort4>(c10.z), f0u1v);
583 						c01.z = MulHigh(As<UShort4>(c01.z), f1u0v);
584 						c11.z = MulHigh(As<UShort4>(c11.z), f0u0v);
585 					}
586 					else
587 					{
588 						c00.z = MulHigh(c00.z, f1u1vs);
589 						c10.z = MulHigh(c10.z, f0u1vs);
590 						c01.z = MulHigh(c01.z, f1u0vs);
591 						c11.z = MulHigh(c11.z, f0u0vs);
592 					}
593 
594 					c.z = (c00.z + c10.z) + (c01.z + c11.z);
595 					if(!hasUnsignedTextureComponent(2)) c.z = AddSat(c.z, c.z);  // Correct for signed fractions
596 				}
597 			}
598 
599 			if(componentCount >= 4)
600 			{
601 				if(has16bitTextureComponents() && hasUnsignedTextureComponent(3))
602 				{
603 					c00.w = As<UShort4>(c00.w) - MulHigh(As<UShort4>(c00.w), f0u) + MulHigh(As<UShort4>(c10.w), f0u);
604 					c01.w = As<UShort4>(c01.w) - MulHigh(As<UShort4>(c01.w), f0u) + MulHigh(As<UShort4>(c11.w), f0u);
605 					c.w = As<UShort4>(c00.w) - MulHigh(As<UShort4>(c00.w), f0v) + MulHigh(As<UShort4>(c01.w), f0v);
606 				}
607 				else
608 				{
609 					if(hasUnsignedTextureComponent(3))
610 					{
611 						c00.w = MulHigh(As<UShort4>(c00.w), f1u1v);
612 						c10.w = MulHigh(As<UShort4>(c10.w), f0u1v);
613 						c01.w = MulHigh(As<UShort4>(c01.w), f1u0v);
614 						c11.w = MulHigh(As<UShort4>(c11.w), f0u0v);
615 					}
616 					else
617 					{
618 						c00.w = MulHigh(c00.w, f1u1vs);
619 						c10.w = MulHigh(c10.w, f0u1vs);
620 						c01.w = MulHigh(c01.w, f1u0vs);
621 						c11.w = MulHigh(c11.w, f0u0vs);
622 					}
623 
624 					c.w = (c00.w + c10.w) + (c01.w + c11.w);
625 					if(!hasUnsignedTextureComponent(3)) c.w = AddSat(c.w, c.w);  // Correct for signed fractions
626 				}
627 			}
628 		}
629 		else  // Gather
630 		{
631 			VkComponentSwizzle swizzle = gatherSwizzle();
632 			switch(swizzle)
633 			{
634 			case VK_COMPONENT_SWIZZLE_ZERO:
635 			case VK_COMPONENT_SWIZZLE_ONE:
636 				// Handled at the final component swizzle.
637 				break;
638 			default:
639 				c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R];
640 				c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R];
641 				c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R];
642 				c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R];
643 				break;
644 			}
645 		}
646 	}
647 
648 	return c;
649 }
650 
sample3D(Pointer<Byte> & texture,Float4 & u_,Float4 & v_,Float4 & w_,Vector4i & offset,const Int4 & sample,Float & lod,bool secondLOD)651 Vector4s SamplerCore::sample3D(Pointer<Byte> &texture, Float4 &u_, Float4 &v_, Float4 &w_, Vector4i &offset, const Int4 &sample, Float &lod, bool secondLOD)
652 {
653 	Vector4s c_;
654 
655 	int componentCount = textureComponentCount();
656 
657 	Pointer<Byte> mipmap = selectMipmap(texture, lod, secondLOD);
658 	Pointer<Byte> buffer = *Pointer<Pointer<Byte>>(mipmap + OFFSET(Mipmap, buffer));
659 
660 	Short4 uuuu = address(u_, state.addressingModeU, mipmap);
661 	Short4 vvvv = address(v_, state.addressingModeV, mipmap);
662 	Short4 wwww = address(w_, state.addressingModeW, mipmap);
663 
664 	if(state.textureFilter == FILTER_POINT)
665 	{
666 		c_ = sampleTexel(uuuu, vvvv, wwww, 0, offset, sample, mipmap, buffer);
667 	}
668 	else
669 	{
670 		Vector4s c[2][2][2];
671 
672 		Short4 u[2][2][2];
673 		Short4 v[2][2][2];
674 		Short4 s[2][2][2];
675 
676 		for(int i = 0; i < 2; i++)
677 		{
678 			for(int j = 0; j < 2; j++)
679 			{
680 				for(int k = 0; k < 2; k++)
681 				{
682 					u[i][j][k] = offsetSample(uuuu, mipmap, OFFSET(Mipmap, uHalf), state.addressingModeU == ADDRESSING_WRAP, i * 2 - 1, lod);
683 					v[i][j][k] = offsetSample(vvvv, mipmap, OFFSET(Mipmap, vHalf), state.addressingModeV == ADDRESSING_WRAP, j * 2 - 1, lod);
684 					s[i][j][k] = offsetSample(wwww, mipmap, OFFSET(Mipmap, wHalf), state.addressingModeW == ADDRESSING_WRAP, k * 2 - 1, lod);
685 				}
686 			}
687 		}
688 
689 		// Fractions
690 		UShort4 f0u = As<UShort4>(u[0][0][0]) * UShort4(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, width)));
691 		UShort4 f0v = As<UShort4>(v[0][0][0]) * UShort4(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, height)));
692 		UShort4 f0s = As<UShort4>(s[0][0][0]) * UShort4(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, depth)));
693 
694 		UShort4 f1u = ~f0u;
695 		UShort4 f1v = ~f0v;
696 		UShort4 f1s = ~f0s;
697 
698 		UShort4 f[2][2][2];
699 		Short4 fs[2][2][2];
700 
701 		f[1][1][1] = MulHigh(f1u, f1v);
702 		f[0][1][1] = MulHigh(f0u, f1v);
703 		f[1][0][1] = MulHigh(f1u, f0v);
704 		f[0][0][1] = MulHigh(f0u, f0v);
705 		f[1][1][0] = MulHigh(f1u, f1v);
706 		f[0][1][0] = MulHigh(f0u, f1v);
707 		f[1][0][0] = MulHigh(f1u, f0v);
708 		f[0][0][0] = MulHigh(f0u, f0v);
709 
710 		f[1][1][1] = MulHigh(f[1][1][1], f1s);
711 		f[0][1][1] = MulHigh(f[0][1][1], f1s);
712 		f[1][0][1] = MulHigh(f[1][0][1], f1s);
713 		f[0][0][1] = MulHigh(f[0][0][1], f1s);
714 		f[1][1][0] = MulHigh(f[1][1][0], f0s);
715 		f[0][1][0] = MulHigh(f[0][1][0], f0s);
716 		f[1][0][0] = MulHigh(f[1][0][0], f0s);
717 		f[0][0][0] = MulHigh(f[0][0][0], f0s);
718 
719 		// Signed fractions
720 		if(!hasUnsignedTextureComponent(0) || !hasUnsignedTextureComponent(1) || !hasUnsignedTextureComponent(2) || !hasUnsignedTextureComponent(3))
721 		{
722 			fs[0][0][0] = f[0][0][0] >> 1;
723 			fs[0][0][1] = f[0][0][1] >> 1;
724 			fs[0][1][0] = f[0][1][0] >> 1;
725 			fs[0][1][1] = f[0][1][1] >> 1;
726 			fs[1][0][0] = f[1][0][0] >> 1;
727 			fs[1][0][1] = f[1][0][1] >> 1;
728 			fs[1][1][0] = f[1][1][0] >> 1;
729 			fs[1][1][1] = f[1][1][1] >> 1;
730 		}
731 
732 		for(int i = 0; i < 2; i++)
733 		{
734 			for(int j = 0; j < 2; j++)
735 			{
736 				for(int k = 0; k < 2; k++)
737 				{
738 					c[i][j][k] = sampleTexel(u[i][j][k], v[i][j][k], s[i][j][k], 0, offset, sample, mipmap, buffer);
739 
740 					if(componentCount >= 1)
741 					{
742 						if(hasUnsignedTextureComponent(0))
743 							c[i][j][k].x = MulHigh(As<UShort4>(c[i][j][k].x), f[1 - i][1 - j][1 - k]);
744 						else
745 							c[i][j][k].x = MulHigh(c[i][j][k].x, fs[1 - i][1 - j][1 - k]);
746 					}
747 					if(componentCount >= 2)
748 					{
749 						if(hasUnsignedTextureComponent(1))
750 							c[i][j][k].y = MulHigh(As<UShort4>(c[i][j][k].y), f[1 - i][1 - j][1 - k]);
751 						else
752 							c[i][j][k].y = MulHigh(c[i][j][k].y, fs[1 - i][1 - j][1 - k]);
753 					}
754 					if(componentCount >= 3)
755 					{
756 						if(hasUnsignedTextureComponent(2))
757 							c[i][j][k].z = MulHigh(As<UShort4>(c[i][j][k].z), f[1 - i][1 - j][1 - k]);
758 						else
759 							c[i][j][k].z = MulHigh(c[i][j][k].z, fs[1 - i][1 - j][1 - k]);
760 					}
761 					if(componentCount >= 4)
762 					{
763 						if(hasUnsignedTextureComponent(3))
764 							c[i][j][k].w = MulHigh(As<UShort4>(c[i][j][k].w), f[1 - i][1 - j][1 - k]);
765 						else
766 							c[i][j][k].w = MulHigh(c[i][j][k].w, fs[1 - i][1 - j][1 - k]);
767 					}
768 
769 					if(i != 0 || j != 0 || k != 0)
770 					{
771 						if(componentCount >= 1) c[0][0][0].x += c[i][j][k].x;
772 						if(componentCount >= 2) c[0][0][0].y += c[i][j][k].y;
773 						if(componentCount >= 3) c[0][0][0].z += c[i][j][k].z;
774 						if(componentCount >= 4) c[0][0][0].w += c[i][j][k].w;
775 					}
776 				}
777 			}
778 		}
779 
780 		if(componentCount >= 1) c_.x = c[0][0][0].x;
781 		if(componentCount >= 2) c_.y = c[0][0][0].y;
782 		if(componentCount >= 3) c_.z = c[0][0][0].z;
783 		if(componentCount >= 4) c_.w = c[0][0][0].w;
784 
785 		// Correct for signed fractions
786 		if(componentCount >= 1)
787 			if(!hasUnsignedTextureComponent(0)) c_.x = AddSat(c_.x, c_.x);
788 		if(componentCount >= 2)
789 			if(!hasUnsignedTextureComponent(1)) c_.y = AddSat(c_.y, c_.y);
790 		if(componentCount >= 3)
791 			if(!hasUnsignedTextureComponent(2)) c_.z = AddSat(c_.z, c_.z);
792 		if(componentCount >= 4)
793 			if(!hasUnsignedTextureComponent(3)) c_.w = AddSat(c_.w, c_.w);
794 	}
795 
796 	return c_;
797 }
798 
sampleFloatFilter(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Float4 & dRef,Vector4i & offset,const Int4 & sample,Float & lod,Float & anisotropy,Float4 & uDelta,Float4 & vDelta)799 Vector4f SamplerCore::sampleFloatFilter(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Float4 &dRef, Vector4i &offset, const Int4 &sample, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta)
800 {
801 	Vector4f c = sampleFloatAniso(texture, u, v, w, a, dRef, offset, sample, lod, anisotropy, uDelta, vDelta, false);
802 
803 	if(function == Fetch)
804 	{
805 		return c;
806 	}
807 
808 	if(state.mipmapFilter == MIPMAP_LINEAR)
809 	{
810 		Vector4f cc = sampleFloatAniso(texture, u, v, w, a, dRef, offset, sample, lod, anisotropy, uDelta, vDelta, true);
811 
812 		Float4 lod4 = Float4(Frac(lod));
813 
814 		c.x = (cc.x - c.x) * lod4 + c.x;
815 		c.y = (cc.y - c.y) * lod4 + c.y;
816 		c.z = (cc.z - c.z) * lod4 + c.z;
817 		c.w = (cc.w - c.w) * lod4 + c.w;
818 	}
819 
820 	return c;
821 }
822 
sampleFloatAniso(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Float4 & dRef,Vector4i & offset,const Int4 & sample,Float & lod,Float & anisotropy,Float4 & uDelta,Float4 & vDelta,bool secondLOD)823 Vector4f SamplerCore::sampleFloatAniso(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Float4 &dRef, Vector4i &offset, const Int4 &sample, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, bool secondLOD)
824 {
825 	Vector4f c;
826 
827 	if(state.textureFilter != FILTER_ANISOTROPIC)
828 	{
829 		c = sampleFloat(texture, u, v, w, a, dRef, offset, sample, lod, secondLOD);
830 	}
831 	else
832 	{
833 		Int N = RoundInt(anisotropy);
834 
835 		Vector4f cSum;
836 
837 		cSum.x = Float4(0.0f);
838 		cSum.y = Float4(0.0f);
839 		cSum.z = Float4(0.0f);
840 		cSum.w = Float4(0.0f);
841 
842 		Float4 A = *Pointer<Float4>(constants + OFFSET(Constants, uvWeight) + 16 * N);
843 		Float4 B = *Pointer<Float4>(constants + OFFSET(Constants, uvStart) + 16 * N);
844 
845 		Float4 du = uDelta;
846 		Float4 dv = vDelta;
847 
848 		Float4 u0 = u + B * du;
849 		Float4 v0 = v + B * dv;
850 
851 		du *= A;
852 		dv *= A;
853 
854 		Int i = 0;
855 
856 		Do
857 		{
858 			c = sampleFloat(texture, u0, v0, w, a, dRef, offset, sample, lod, secondLOD);
859 
860 			u0 += du;
861 			v0 += dv;
862 
863 			cSum.x += c.x * A;
864 			cSum.y += c.y * A;
865 			cSum.z += c.z * A;
866 			cSum.w += c.w * A;
867 
868 			i++;
869 		}
870 		Until(i >= N);
871 
872 		c.x = cSum.x;
873 		c.y = cSum.y;
874 		c.z = cSum.z;
875 		c.w = cSum.w;
876 	}
877 
878 	return c;
879 }
880 
sampleFloat(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Float4 & dRef,Vector4i & offset,const Int4 & sample,Float & lod,bool secondLOD)881 Vector4f SamplerCore::sampleFloat(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Float4 &dRef, Vector4i &offset, const Int4 &sample, Float &lod, bool secondLOD)
882 {
883 	if(state.textureType != VK_IMAGE_VIEW_TYPE_3D)
884 	{
885 		return sampleFloat2D(texture, u, v, w, a, dRef, offset, sample, lod, secondLOD);
886 	}
887 	else
888 	{
889 		return sampleFloat3D(texture, u, v, w, dRef, offset, sample, lod, secondLOD);
890 	}
891 }
892 
sampleFloat2D(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,const Float4 & a,Float4 & dRef,Vector4i & offset,const Int4 & sample,Float & lod,bool secondLOD)893 Vector4f SamplerCore::sampleFloat2D(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, const Float4 &a, Float4 &dRef, Vector4i &offset, const Int4 &sample, Float &lod, bool secondLOD)
894 {
895 	Vector4f c;
896 
897 	int componentCount = textureComponentCount();
898 	bool gather = (state.textureFilter == FILTER_GATHER);
899 
900 	Pointer<Byte> mipmap = selectMipmap(texture, lod, secondLOD);
901 	Pointer<Byte> buffer = *Pointer<Pointer<Byte>>(mipmap + OFFSET(Mipmap, buffer));
902 
903 	Int4 x0, x1, y0, y1;
904 	Float4 fu, fv;
905 	Int4 filter = computeFilterOffset(lod);
906 	address(u, x0, x1, fu, mipmap, offset.x, filter, OFFSET(Mipmap, width), state.addressingModeU);
907 	address(v, y0, y1, fv, mipmap, offset.y, filter, OFFSET(Mipmap, height), state.addressingModeV);
908 
909 	Int4 pitchP = As<Int4>(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, pitchP), 16));
910 	y0 *= pitchP;
911 
912 	Int4 z;
913 	if(state.isCube() || state.isArrayed())
914 	{
915 		Int4 face = As<Int4>(w);
916 		Int4 layerIndex = computeLayerIndex(a, mipmap);
917 
918 		// For cube maps, the layer argument is per cube, each of which has 6 layers
919 		if(state.textureType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
920 		{
921 			layerIndex *= Int4(6);
922 		}
923 
924 		z = state.isCube() ? face : layerIndex;
925 
926 		if(state.textureType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
927 		{
928 			z += layerIndex;
929 		}
930 
931 		z *= *Pointer<Int4>(mipmap + OFFSET(Mipmap, sliceP), 16);
932 	}
933 
934 	if(state.textureFilter == FILTER_POINT || (function == Fetch))
935 	{
936 		c = sampleTexel(x0, y0, z, dRef, sample, mipmap, buffer);
937 	}
938 	else
939 	{
940 		y1 *= pitchP;
941 
942 		Vector4f c00 = sampleTexel(x0, y0, z, dRef, sample, mipmap, buffer);
943 		Vector4f c10 = sampleTexel(x1, y0, z, dRef, sample, mipmap, buffer);
944 		Vector4f c01 = sampleTexel(x0, y1, z, dRef, sample, mipmap, buffer);
945 		Vector4f c11 = sampleTexel(x1, y1, z, dRef, sample, mipmap, buffer);
946 
947 		if(!gather)  // Blend
948 		{
949 			if(componentCount >= 1) c00.x = c00.x + fu * (c10.x - c00.x);
950 			if(componentCount >= 2) c00.y = c00.y + fu * (c10.y - c00.y);
951 			if(componentCount >= 3) c00.z = c00.z + fu * (c10.z - c00.z);
952 			if(componentCount >= 4) c00.w = c00.w + fu * (c10.w - c00.w);
953 
954 			if(componentCount >= 1) c01.x = c01.x + fu * (c11.x - c01.x);
955 			if(componentCount >= 2) c01.y = c01.y + fu * (c11.y - c01.y);
956 			if(componentCount >= 3) c01.z = c01.z + fu * (c11.z - c01.z);
957 			if(componentCount >= 4) c01.w = c01.w + fu * (c11.w - c01.w);
958 
959 			if(componentCount >= 1) c.x = c00.x + fv * (c01.x - c00.x);
960 			if(componentCount >= 2) c.y = c00.y + fv * (c01.y - c00.y);
961 			if(componentCount >= 3) c.z = c00.z + fv * (c01.z - c00.z);
962 			if(componentCount >= 4) c.w = c00.w + fv * (c01.w - c00.w);
963 		}
964 		else  // Gather
965 		{
966 			VkComponentSwizzle swizzle = gatherSwizzle();
967 			switch(swizzle)
968 			{
969 			case VK_COMPONENT_SWIZZLE_ZERO:
970 			case VK_COMPONENT_SWIZZLE_ONE:
971 				// Handled at the final component swizzle.
972 				break;
973 			default:
974 				c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R];
975 				c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R];
976 				c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R];
977 				c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R];
978 				break;
979 			}
980 		}
981 	}
982 
983 	return c;
984 }
985 
sampleFloat3D(Pointer<Byte> & texture,Float4 & u,Float4 & v,Float4 & w,Float4 & dRef,Vector4i & offset,const Int4 & sample,Float & lod,bool secondLOD)986 Vector4f SamplerCore::sampleFloat3D(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Float4 &dRef, Vector4i &offset, const Int4 &sample, Float &lod, bool secondLOD)
987 {
988 	Vector4f c;
989 
990 	int componentCount = textureComponentCount();
991 
992 	Pointer<Byte> mipmap = selectMipmap(texture, lod, secondLOD);
993 	Pointer<Byte> buffer = *Pointer<Pointer<Byte>>(mipmap + OFFSET(Mipmap, buffer));
994 
995 	Int4 x0, x1, y0, y1, z0, z1;
996 	Float4 fu, fv, fw;
997 	Int4 filter = computeFilterOffset(lod);
998 	address(u, x0, x1, fu, mipmap, offset.x, filter, OFFSET(Mipmap, width), state.addressingModeU);
999 	address(v, y0, y1, fv, mipmap, offset.y, filter, OFFSET(Mipmap, height), state.addressingModeV);
1000 	address(w, z0, z1, fw, mipmap, offset.z, filter, OFFSET(Mipmap, depth), state.addressingModeW);
1001 
1002 	Int4 pitchP = As<Int4>(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, pitchP), 16));
1003 	Int4 sliceP = As<Int4>(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, sliceP), 16));
1004 	y0 *= pitchP;
1005 	z0 *= sliceP;
1006 
1007 	if(state.textureFilter == FILTER_POINT || (function == Fetch))
1008 	{
1009 		c = sampleTexel(x0, y0, z0, dRef, sample, mipmap, buffer);
1010 	}
1011 	else
1012 	{
1013 		y1 *= pitchP;
1014 		z1 *= sliceP;
1015 
1016 		Vector4f c000 = sampleTexel(x0, y0, z0, dRef, sample, mipmap, buffer);
1017 		Vector4f c100 = sampleTexel(x1, y0, z0, dRef, sample, mipmap, buffer);
1018 		Vector4f c010 = sampleTexel(x0, y1, z0, dRef, sample, mipmap, buffer);
1019 		Vector4f c110 = sampleTexel(x1, y1, z0, dRef, sample, mipmap, buffer);
1020 		Vector4f c001 = sampleTexel(x0, y0, z1, dRef, sample, mipmap, buffer);
1021 		Vector4f c101 = sampleTexel(x1, y0, z1, dRef, sample, mipmap, buffer);
1022 		Vector4f c011 = sampleTexel(x0, y1, z1, dRef, sample, mipmap, buffer);
1023 		Vector4f c111 = sampleTexel(x1, y1, z1, dRef, sample, mipmap, buffer);
1024 
1025 		// Blend first slice
1026 		if(componentCount >= 1) c000.x = c000.x + fu * (c100.x - c000.x);
1027 		if(componentCount >= 2) c000.y = c000.y + fu * (c100.y - c000.y);
1028 		if(componentCount >= 3) c000.z = c000.z + fu * (c100.z - c000.z);
1029 		if(componentCount >= 4) c000.w = c000.w + fu * (c100.w - c000.w);
1030 
1031 		if(componentCount >= 1) c010.x = c010.x + fu * (c110.x - c010.x);
1032 		if(componentCount >= 2) c010.y = c010.y + fu * (c110.y - c010.y);
1033 		if(componentCount >= 3) c010.z = c010.z + fu * (c110.z - c010.z);
1034 		if(componentCount >= 4) c010.w = c010.w + fu * (c110.w - c010.w);
1035 
1036 		if(componentCount >= 1) c000.x = c000.x + fv * (c010.x - c000.x);
1037 		if(componentCount >= 2) c000.y = c000.y + fv * (c010.y - c000.y);
1038 		if(componentCount >= 3) c000.z = c000.z + fv * (c010.z - c000.z);
1039 		if(componentCount >= 4) c000.w = c000.w + fv * (c010.w - c000.w);
1040 
1041 		// Blend second slice
1042 		if(componentCount >= 1) c001.x = c001.x + fu * (c101.x - c001.x);
1043 		if(componentCount >= 2) c001.y = c001.y + fu * (c101.y - c001.y);
1044 		if(componentCount >= 3) c001.z = c001.z + fu * (c101.z - c001.z);
1045 		if(componentCount >= 4) c001.w = c001.w + fu * (c101.w - c001.w);
1046 
1047 		if(componentCount >= 1) c011.x = c011.x + fu * (c111.x - c011.x);
1048 		if(componentCount >= 2) c011.y = c011.y + fu * (c111.y - c011.y);
1049 		if(componentCount >= 3) c011.z = c011.z + fu * (c111.z - c011.z);
1050 		if(componentCount >= 4) c011.w = c011.w + fu * (c111.w - c011.w);
1051 
1052 		if(componentCount >= 1) c001.x = c001.x + fv * (c011.x - c001.x);
1053 		if(componentCount >= 2) c001.y = c001.y + fv * (c011.y - c001.y);
1054 		if(componentCount >= 3) c001.z = c001.z + fv * (c011.z - c001.z);
1055 		if(componentCount >= 4) c001.w = c001.w + fv * (c011.w - c001.w);
1056 
1057 		// Blend slices
1058 		if(componentCount >= 1) c.x = c000.x + fw * (c001.x - c000.x);
1059 		if(componentCount >= 2) c.y = c000.y + fw * (c001.y - c000.y);
1060 		if(componentCount >= 3) c.z = c000.z + fw * (c001.z - c000.z);
1061 		if(componentCount >= 4) c.w = c000.w + fw * (c001.w - c000.w);
1062 	}
1063 
1064 	return c;
1065 }
1066 
log2sqrt(Float lod)1067 static Float log2sqrt(Float lod)
1068 {
1069 	// log2(sqrt(lod))                              // Equals 0.25 * log2(lod^2).
1070 	lod *= lod;                                     // Squaring doubles the exponent and produces an extra bit of precision.
1071 	lod = Float(As<Int>(lod)) - Float(0x3F800000);  // Interpret as integer and subtract the exponent bias.
1072 	lod *= As<Float>(Int(0x33000000));              // Scale by 0.25 * 2^-23 (mantissa length).
1073 
1074 	return lod;
1075 }
1076 
log2(Float lod)1077 static Float log2(Float lod)
1078 {
1079 	lod *= lod;                                     // Squaring doubles the exponent and produces an extra bit of precision.
1080 	lod = Float(As<Int>(lod)) - Float(0x3F800000);  // Interpret as integer and subtract the exponent bias.
1081 	lod *= As<Float>(Int(0x33800000));              // Scale by 0.5 * 2^-23 (mantissa length).
1082 
1083 	return lod;
1084 }
1085 
computeLod1D(Pointer<Byte> & texture,Float & lod,Float4 & uuuu,Float4 & dsx,Float4 & dsy)1086 void SamplerCore::computeLod1D(Pointer<Byte> &texture, Float &lod, Float4 &uuuu, Float4 &dsx, Float4 &dsy)
1087 {
1088 	Float4 dudxy;
1089 
1090 	if(function != Grad)  // Implicit
1091 	{
1092 		dudxy = uuuu.yz - uuuu.xx;
1093 	}
1094 	else
1095 	{
1096 		dudxy = UnpackLow(dsx, dsy);
1097 	}
1098 
1099 	// Scale by texture dimensions.
1100 	Float4 dUdxy = dudxy * *Pointer<Float4>(texture + OFFSET(Texture, widthWidthHeightHeight));
1101 
1102 	// Note we could take the absolute value here and omit the square root below,
1103 	// but this is more consistent with the 2D calculation and still cheap.
1104 	Float4 dU2dxy = dUdxy * dUdxy;
1105 
1106 	lod = Max(Float(dU2dxy.x), Float(dU2dxy.y));
1107 	lod = log2sqrt(lod);
1108 }
1109 
computeLod2D(Pointer<Byte> & texture,Float & lod,Float & anisotropy,Float4 & uDelta,Float4 & vDelta,Float4 & uuuu,Float4 & vvvv,Float4 & dsx,Float4 & dsy)1110 void SamplerCore::computeLod2D(Pointer<Byte> &texture, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, Float4 &uuuu, Float4 &vvvv, Float4 &dsx, Float4 &dsy)
1111 {
1112 	Float4 duvdxy;
1113 
1114 	if(function != Grad)  // Implicit
1115 	{
1116 		duvdxy = Float4(uuuu.yz, vvvv.yz) - Float4(uuuu.xx, vvvv.xx);
1117 	}
1118 	else
1119 	{
1120 		Float4 dudxy = Float4(dsx.xx, dsy.xx);
1121 		Float4 dvdxy = Float4(dsx.yy, dsy.yy);
1122 
1123 		duvdxy = Float4(dudxy.xz, dvdxy.xz);
1124 	}
1125 
1126 	// Scale by texture dimensions.
1127 	Float4 dUVdxy = duvdxy * *Pointer<Float4>(texture + OFFSET(Texture, widthWidthHeightHeight));
1128 
1129 	Float4 dUV2dxy = dUVdxy * dUVdxy;
1130 	Float4 dUV2 = dUV2dxy.xy + dUV2dxy.zw;
1131 
1132 	lod = Max(Float(dUV2.x), Float(dUV2.y));  // Square length of major axis
1133 
1134 	if(state.textureFilter == FILTER_ANISOTROPIC)
1135 	{
1136 		Float det = Abs(Float(dUVdxy.x) * Float(dUVdxy.w) - Float(dUVdxy.y) * Float(dUVdxy.z));
1137 
1138 		Float4 dudx = duvdxy.xxxx;
1139 		Float4 dudy = duvdxy.yyyy;
1140 		Float4 dvdx = duvdxy.zzzz;
1141 		Float4 dvdy = duvdxy.wwww;
1142 
1143 		Int4 mask = As<Int4>(CmpNLT(dUV2.x, dUV2.y));
1144 		uDelta = As<Float4>((As<Int4>(dudx) & mask) | ((As<Int4>(dudy) & ~mask)));
1145 		vDelta = As<Float4>((As<Int4>(dvdx) & mask) | ((As<Int4>(dvdy) & ~mask)));
1146 
1147 		anisotropy = lod * Rcp(det, true /* relaxedPrecision */);
1148 		anisotropy = Min(anisotropy, state.maxAnisotropy);
1149 
1150 		// TODO(b/151263485): While we always need `lod` above, when there's only
1151 		// a single mipmap level the following calculations could be skipped.
1152 		lod *= Rcp(anisotropy * anisotropy, true /* relaxedPrecision */);
1153 	}
1154 
1155 	lod = log2sqrt(lod);  // log2(sqrt(lod))
1156 }
1157 
computeLodCube(Pointer<Byte> & texture,Float & lod,Float4 & u,Float4 & v,Float4 & w,Float4 & dsx,Float4 & dsy,Float4 & M)1158 void SamplerCore::computeLodCube(Pointer<Byte> &texture, Float &lod, Float4 &u, Float4 &v, Float4 &w, Float4 &dsx, Float4 &dsy, Float4 &M)
1159 {
1160 	Float4 dudxy, dvdxy, dsdxy;
1161 
1162 	if(function != Grad)  // Implicit
1163 	{
1164 		Float4 U = u * M;
1165 		Float4 V = v * M;
1166 		Float4 W = w * M;
1167 
1168 		dudxy = Abs(U - U.xxxx);
1169 		dvdxy = Abs(V - V.xxxx);
1170 		dsdxy = Abs(W - W.xxxx);
1171 	}
1172 	else
1173 	{
1174 		dudxy = Float4(dsx.xx, dsy.xx);
1175 		dvdxy = Float4(dsx.yy, dsy.yy);
1176 		dsdxy = Float4(dsx.zz, dsy.zz);
1177 
1178 		dudxy = Abs(dudxy * Float4(M.x));
1179 		dvdxy = Abs(dvdxy * Float4(M.x));
1180 		dsdxy = Abs(dsdxy * Float4(M.x));
1181 	}
1182 
1183 	// Compute the largest Manhattan distance in two dimensions.
1184 	// This takes the footprint across adjacent faces into account.
1185 	Float4 duvdxy = dudxy + dvdxy;
1186 	Float4 dusdxy = dudxy + dsdxy;
1187 	Float4 dvsdxy = dvdxy + dsdxy;
1188 
1189 	dudxy = Max(Max(duvdxy, dusdxy), dvsdxy);
1190 
1191 	lod = Max(Float(dudxy.y), Float(dudxy.z));  // TODO: Max(dudxy.y, dudxy.z);
1192 
1193 	// Scale by texture dimension.
1194 	lod *= *Pointer<Float>(texture + OFFSET(Texture, width));
1195 
1196 	lod = log2(lod);
1197 }
1198 
computeLod3D(Pointer<Byte> & texture,Float & lod,Float4 & uuuu,Float4 & vvvv,Float4 & wwww,Float4 & dsx,Float4 & dsy)1199 void SamplerCore::computeLod3D(Pointer<Byte> &texture, Float &lod, Float4 &uuuu, Float4 &vvvv, Float4 &wwww, Float4 &dsx, Float4 &dsy)
1200 {
1201 	Float4 dudxy, dvdxy, dsdxy;
1202 
1203 	if(function != Grad)  // Implicit
1204 	{
1205 		dudxy = uuuu - uuuu.xxxx;
1206 		dvdxy = vvvv - vvvv.xxxx;
1207 		dsdxy = wwww - wwww.xxxx;
1208 	}
1209 	else
1210 	{
1211 		dudxy = Float4(dsx.xx, dsy.xx);
1212 		dvdxy = Float4(dsx.yy, dsy.yy);
1213 		dsdxy = Float4(dsx.zz, dsy.zz);
1214 	}
1215 
1216 	// Scale by texture dimensions.
1217 	dudxy *= *Pointer<Float4>(texture + OFFSET(Texture, width));
1218 	dvdxy *= *Pointer<Float4>(texture + OFFSET(Texture, height));
1219 	dsdxy *= *Pointer<Float4>(texture + OFFSET(Texture, depth));
1220 
1221 	dudxy *= dudxy;
1222 	dvdxy *= dvdxy;
1223 	dsdxy *= dsdxy;
1224 
1225 	dudxy += dvdxy;
1226 	dudxy += dsdxy;
1227 
1228 	lod = Max(Float(dudxy.y), Float(dudxy.z));  // TODO: Max(dudxy.y, dudxy.z);
1229 
1230 	lod = log2sqrt(lod);  // log2(sqrt(lod))
1231 }
1232 
cubeFace(Float4 & U,Float4 & V,Float4 & x,Float4 & y,Float4 & z,Float4 & M)1233 Int4 SamplerCore::cubeFace(Float4 &U, Float4 &V, Float4 &x, Float4 &y, Float4 &z, Float4 &M)
1234 {
1235 	// TODO: Comply with Vulkan recommendation:
1236 	// Vulkan 1.1: "The rules should have as the first rule that rz wins over ry and rx, and the second rule that ry wins over rx."
1237 
1238 	Int4 xn = CmpLT(x, Float4(0.0f));  // x < 0
1239 	Int4 yn = CmpLT(y, Float4(0.0f));  // y < 0
1240 	Int4 zn = CmpLT(z, Float4(0.0f));  // z < 0
1241 
1242 	Float4 absX = Abs(x);
1243 	Float4 absY = Abs(y);
1244 	Float4 absZ = Abs(z);
1245 
1246 	Int4 xy = CmpNLE(absX, absY);  // abs(x) > abs(y)
1247 	Int4 yz = CmpNLE(absY, absZ);  // abs(y) > abs(z)
1248 	Int4 zx = CmpNLE(absZ, absX);  // abs(z) > abs(x)
1249 	Int4 xMajor = xy & ~zx;        // abs(x) > abs(y) && abs(x) > abs(z)
1250 	Int4 yMajor = yz & ~xy;        // abs(y) > abs(z) && abs(y) > abs(x)
1251 	Int4 zMajor = zx & ~yz;        // abs(z) > abs(x) && abs(z) > abs(y)
1252 
1253 	// FACE_POSITIVE_X = 000b
1254 	// FACE_NEGATIVE_X = 001b
1255 	// FACE_POSITIVE_Y = 010b
1256 	// FACE_NEGATIVE_Y = 011b
1257 	// FACE_POSITIVE_Z = 100b
1258 	// FACE_NEGATIVE_Z = 101b
1259 
1260 	Int yAxis = SignMask(yMajor);
1261 	Int zAxis = SignMask(zMajor);
1262 
1263 	Int4 n = ((xn & xMajor) | (yn & yMajor) | (zn & zMajor)) & Int4(0x80000000);
1264 	Int negative = SignMask(n);
1265 
1266 	Int faces = *Pointer<Int>(constants + OFFSET(Constants, transposeBit0) + negative * 4);
1267 	faces |= *Pointer<Int>(constants + OFFSET(Constants, transposeBit1) + yAxis * 4);
1268 	faces |= *Pointer<Int>(constants + OFFSET(Constants, transposeBit2) + zAxis * 4);
1269 
1270 	Int4 face;
1271 	face.x = faces & 0x7;
1272 	face.y = (faces >> 4) & 0x7;
1273 	face.z = (faces >> 8) & 0x7;
1274 	face.w = (faces >> 12) & 0x7;
1275 
1276 	M = Max(Max(absX, absY), absZ);
1277 
1278 	// U = xMajor ? (neg ^ -z) : ((zMajor & neg) ^ x)
1279 	U = As<Float4>((xMajor & (n ^ As<Int4>(-z))) | (~xMajor & ((zMajor & n) ^ As<Int4>(x))));
1280 
1281 	// V = !yMajor ? -y : (n ^ z)
1282 	V = As<Float4>((~yMajor & As<Int4>(-y)) | (yMajor & (n ^ As<Int4>(z))));
1283 
1284 	M = reciprocal(M) * Float4(0.5f);
1285 	U = U * M + Float4(0.5f);
1286 	V = V * M + Float4(0.5f);
1287 
1288 	return face;
1289 }
1290 
applyOffset(Short4 & uvw,Int4 & offset,const Int4 & whd,AddressingMode mode)1291 Short4 SamplerCore::applyOffset(Short4 &uvw, Int4 &offset, const Int4 &whd, AddressingMode mode)
1292 {
1293 	Int4 tmp = Int4(As<UShort4>(uvw));
1294 	tmp = tmp + offset;
1295 
1296 	switch(mode)
1297 	{
1298 	case AddressingMode::ADDRESSING_WRAP:
1299 		tmp = (tmp + whd * Int4(-MIN_TEXEL_OFFSET)) % whd;
1300 		break;
1301 	case AddressingMode::ADDRESSING_CLAMP:
1302 	case AddressingMode::ADDRESSING_MIRROR:
1303 	case AddressingMode::ADDRESSING_MIRRORONCE:
1304 	case AddressingMode::ADDRESSING_BORDER:  // TODO(b/29069044): Implement and test ADDRESSING_MIRROR, ADDRESSING_MIRRORONCE, ADDRESSING_BORDER
1305 		tmp = Min(Max(tmp, Int4(0)), whd - Int4(1));
1306 		break;
1307 	case AddressingMode::ADDRESSING_SEAMLESS:
1308 		ASSERT(false);  // Cube sampling doesn't support offset.
1309 	default:
1310 		ASSERT(false);
1311 	}
1312 
1313 	return As<Short4>(UShort4(tmp));
1314 }
1315 
computeIndices(UInt index[4],Short4 uuuu,Short4 vvvv,Short4 wwww,const Short4 & layerIndex,Vector4i & offset,const Int4 & sample,const Pointer<Byte> & mipmap)1316 void SamplerCore::computeIndices(UInt index[4], Short4 uuuu, Short4 vvvv, Short4 wwww, const Short4 &layerIndex, Vector4i &offset, const Int4 &sample, const Pointer<Byte> &mipmap)
1317 {
1318 	uuuu = MulHigh(As<UShort4>(uuuu), UShort4(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, width))));
1319 
1320 	if(function.offset)
1321 	{
1322 		uuuu = applyOffset(uuuu, offset.x, *Pointer<UInt4>(mipmap + OFFSET(Mipmap, width)), state.addressingModeU);
1323 	}
1324 
1325 	UInt4 indices = Int4(uuuu);
1326 
1327 	if(state.is2D() || state.is3D() || state.isCube())
1328 	{
1329 		vvvv = MulHigh(As<UShort4>(vvvv), UShort4(*Pointer<UInt4>(mipmap + OFFSET(Mipmap, height))));
1330 
1331 		if(function.offset)
1332 		{
1333 			vvvv = applyOffset(vvvv, offset.y, *Pointer<UInt4>(mipmap + OFFSET(Mipmap, height)), state.addressingModeV);
1334 		}
1335 
1336 		Short4 uv0uv1 = As<Short4>(UnpackLow(uuuu, vvvv));
1337 		Short4 uv2uv3 = As<Short4>(UnpackHigh(uuuu, vvvv));
1338 		Int2 i01 = MulAdd(uv0uv1, *Pointer<Short4>(mipmap + OFFSET(Mipmap, onePitchP)));
1339 		Int2 i23 = MulAdd(uv2uv3, *Pointer<Short4>(mipmap + OFFSET(Mipmap, onePitchP)));
1340 
1341 		indices = UInt4(As<UInt2>(i01), As<UInt2>(i23));
1342 	}
1343 
1344 	if(state.is3D())
1345 	{
1346 		wwww = MulHigh(As<UShort4>(wwww), UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap, depth))));
1347 
1348 		if(function.offset)
1349 		{
1350 			wwww = applyOffset(wwww, offset.z, *Pointer<Int4>(mipmap + OFFSET(Mipmap, depth)), state.addressingModeW);
1351 		}
1352 
1353 		indices += As<UInt4>(Int4(As<UShort4>(wwww))) * *Pointer<UInt4>(mipmap + OFFSET(Mipmap, sliceP));
1354 	}
1355 
1356 	if(state.isArrayed())
1357 	{
1358 		Int4 layer = Int4(As<UShort4>(layerIndex));
1359 
1360 		if(state.textureType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1361 		{
1362 			layer *= Int4(6);
1363 		}
1364 
1365 		UInt4 layerOffset = As<UInt4>(layer) * *Pointer<UInt4>(mipmap + OFFSET(Mipmap, sliceP));
1366 
1367 		indices += layerOffset;
1368 	}
1369 
1370 	if(function.sample)
1371 	{
1372 		UInt4 sampleOffset = Min(As<UInt4>(sample), *Pointer<UInt4>(mipmap + OFFSET(Mipmap, sampleMax), 16)) *
1373 		                     *Pointer<UInt4>(mipmap + OFFSET(Mipmap, samplePitchP), 16);
1374 		indices += sampleOffset;
1375 	}
1376 
1377 	index[0] = Extract(indices, 0);
1378 	index[1] = Extract(indices, 1);
1379 	index[2] = Extract(indices, 2);
1380 	index[3] = Extract(indices, 3);
1381 }
1382 
computeIndices(UInt index[4],Int4 uuuu,Int4 vvvv,Int4 wwww,const Int4 & sample,Int4 valid,const Pointer<Byte> & mipmap)1383 void SamplerCore::computeIndices(UInt index[4], Int4 uuuu, Int4 vvvv, Int4 wwww, const Int4 &sample, Int4 valid, const Pointer<Byte> &mipmap)
1384 {
1385 	UInt4 indices = uuuu;
1386 
1387 	if(state.is2D() || state.is3D() || state.isCube())
1388 	{
1389 		indices += As<UInt4>(vvvv);
1390 	}
1391 
1392 	if(state.is3D() || state.isCube() || state.isArrayed())
1393 	{
1394 		indices += As<UInt4>(wwww);
1395 	}
1396 
1397 	if(function.sample)
1398 	{
1399 		indices += Min(As<UInt4>(sample), *Pointer<UInt4>(mipmap + OFFSET(Mipmap, sampleMax), 16)) *
1400 		           *Pointer<UInt4>(mipmap + OFFSET(Mipmap, samplePitchP), 16);
1401 	}
1402 
1403 	if(borderModeActive())
1404 	{
1405 		// Texels out of range are still sampled before being replaced
1406 		// with the border color, so sample them at linear index 0.
1407 		indices &= As<UInt4>(valid);
1408 	}
1409 
1410 	for(int i = 0; i < 4; i++)
1411 	{
1412 		index[i] = Extract(As<Int4>(indices), i);
1413 	}
1414 }
1415 
sampleTexel(UInt index[4],Pointer<Byte> buffer)1416 Vector4s SamplerCore::sampleTexel(UInt index[4], Pointer<Byte> buffer)
1417 {
1418 	Vector4s c;
1419 
1420 	if(has16bitPackedTextureFormat())
1421 	{
1422 		c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0);
1423 		c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1);
1424 		c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2);
1425 		c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3);
1426 
1427 		switch(state.textureFormat)
1428 		{
1429 		case VK_FORMAT_R5G6B5_UNORM_PACK16:
1430 			c.z = (c.x & Short4(0x001Fu)) << 11;
1431 			c.y = (c.x & Short4(0x07E0u)) << 5;
1432 			c.x = (c.x & Short4(0xF800u));
1433 			break;
1434 		case VK_FORMAT_B5G6R5_UNORM_PACK16:
1435 			c.z = (c.x & Short4(0xF800u));
1436 			c.y = (c.x & Short4(0x07E0u)) << 5;
1437 			c.x = (c.x & Short4(0x001Fu)) << 11;
1438 			break;
1439 		case VK_FORMAT_R4G4B4A4_UNORM_PACK16:
1440 			c.w = (c.x << 12) & Short4(0xF000u);
1441 			c.z = (c.x << 8) & Short4(0xF000u);
1442 			c.y = (c.x << 4) & Short4(0xF000u);
1443 			c.x = (c.x) & Short4(0xF000u);
1444 			break;
1445 		case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
1446 			c.w = (c.x << 12) & Short4(0xF000u);
1447 			c.z = (c.x) & Short4(0xF000u);
1448 			c.y = (c.x << 4) & Short4(0xF000u);
1449 			c.x = (c.x << 8) & Short4(0xF000u);
1450 			break;
1451 		case VK_FORMAT_A4R4G4B4_UNORM_PACK16:
1452 			c.w = (c.x) & Short4(0xF000u);
1453 			c.z = (c.x << 12) & Short4(0xF000u);
1454 			c.y = (c.x << 8) & Short4(0xF000u);
1455 			c.x = (c.x << 4) & Short4(0xF000u);
1456 			break;
1457 		case VK_FORMAT_A4B4G4R4_UNORM_PACK16:
1458 			c.w = (c.x) & Short4(0xF000u);
1459 			c.z = (c.x << 4) & Short4(0xF000u);
1460 			c.y = (c.x << 8) & Short4(0xF000u);
1461 			c.x = (c.x << 12) & Short4(0xF000u);
1462 			break;
1463 		case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
1464 			c.w = (c.x << 15) & Short4(0x8000u);
1465 			c.z = (c.x << 10) & Short4(0xF800u);
1466 			c.y = (c.x << 5) & Short4(0xF800u);
1467 			c.x = (c.x) & Short4(0xF800u);
1468 			break;
1469 		case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
1470 			c.w = (c.x << 15) & Short4(0x8000u);
1471 			c.z = (c.x) & Short4(0xF800u);
1472 			c.y = (c.x << 5) & Short4(0xF800u);
1473 			c.x = (c.x << 10) & Short4(0xF800u);
1474 			break;
1475 		case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
1476 			c.w = (c.x) & Short4(0x8000u);
1477 			c.z = (c.x << 11) & Short4(0xF800u);
1478 			c.y = (c.x << 6) & Short4(0xF800u);
1479 			c.x = (c.x << 1) & Short4(0xF800u);
1480 			break;
1481 		default:
1482 			ASSERT(false);
1483 		}
1484 	}
1485 	else if(has8bitTextureComponents())
1486 	{
1487 		switch(textureComponentCount())
1488 		{
1489 		case 4:
1490 			{
1491 				Byte4 c0 = Pointer<Byte4>(buffer)[index[0]];
1492 				Byte4 c1 = Pointer<Byte4>(buffer)[index[1]];
1493 				Byte4 c2 = Pointer<Byte4>(buffer)[index[2]];
1494 				Byte4 c3 = Pointer<Byte4>(buffer)[index[3]];
1495 				c.x = Unpack(c0, c1);
1496 				c.y = Unpack(c2, c3);
1497 
1498 				switch(state.textureFormat)
1499 				{
1500 				case VK_FORMAT_B8G8R8A8_UNORM:
1501 				case VK_FORMAT_B8G8R8A8_SRGB:
1502 					c.z = As<Short4>(UnpackLow(c.x, c.y));
1503 					c.x = As<Short4>(UnpackHigh(c.x, c.y));
1504 					c.y = c.z;
1505 					c.w = c.x;
1506 					c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z));
1507 					c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y));
1508 					c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x));
1509 					c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w));
1510 					break;
1511 				case VK_FORMAT_R8G8B8A8_UNORM:
1512 				case VK_FORMAT_R8G8B8A8_SNORM:
1513 				case VK_FORMAT_R8G8B8A8_SINT:
1514 				case VK_FORMAT_R8G8B8A8_SRGB:
1515 				case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
1516 				case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
1517 				case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1518 				case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
1519 					c.z = As<Short4>(UnpackHigh(c.x, c.y));
1520 					c.x = As<Short4>(UnpackLow(c.x, c.y));
1521 					c.y = c.x;
1522 					c.w = c.z;
1523 					c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x));
1524 					c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y));
1525 					c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z));
1526 					c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w));
1527 					// Propagate sign bit
1528 					if(state.textureFormat == VK_FORMAT_R8G8B8A8_SINT ||
1529 					   state.textureFormat == VK_FORMAT_A8B8G8R8_SINT_PACK32)
1530 					{
1531 						c.x >>= 8;
1532 						c.y >>= 8;
1533 						c.z >>= 8;
1534 						c.w >>= 8;
1535 					}
1536 					break;
1537 				case VK_FORMAT_R8G8B8A8_UINT:
1538 				case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1539 					c.z = As<Short4>(UnpackHigh(c.x, c.y));
1540 					c.x = As<Short4>(UnpackLow(c.x, c.y));
1541 					c.y = c.x;
1542 					c.w = c.z;
1543 					c.x = UnpackLow(As<Byte8>(c.x), As<Byte8>(Short4(0)));
1544 					c.y = UnpackHigh(As<Byte8>(c.y), As<Byte8>(Short4(0)));
1545 					c.z = UnpackLow(As<Byte8>(c.z), As<Byte8>(Short4(0)));
1546 					c.w = UnpackHigh(As<Byte8>(c.w), As<Byte8>(Short4(0)));
1547 					break;
1548 				default:
1549 					ASSERT(false);
1550 				}
1551 			}
1552 			break;
1553 		case 2:
1554 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0);
1555 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1);
1556 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2);
1557 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3);
1558 
1559 			switch(state.textureFormat)
1560 			{
1561 			case VK_FORMAT_R8G8_UNORM:
1562 			case VK_FORMAT_R8G8_SNORM:
1563 			case VK_FORMAT_R8G8_SRGB:
1564 				c.y = (c.x & Short4(0xFF00u));
1565 				c.x = (c.x << 8);
1566 				break;
1567 			case VK_FORMAT_R8G8_SINT:
1568 				c.y = c.x >> 8;
1569 				c.x = (c.x << 8) >> 8;  // Propagate sign bit
1570 				break;
1571 			case VK_FORMAT_R8G8_UINT:
1572 				c.y = As<Short4>(As<UShort4>(c.x) >> 8);
1573 				c.x &= Short4(0x00FFu);
1574 				break;
1575 			default:
1576 				ASSERT(false);
1577 			}
1578 			break;
1579 		case 1:
1580 			{
1581 				Int c0 = Int(*Pointer<Byte>(buffer + index[0]));
1582 				Int c1 = Int(*Pointer<Byte>(buffer + index[1]));
1583 				Int c2 = Int(*Pointer<Byte>(buffer + index[2]));
1584 				Int c3 = Int(*Pointer<Byte>(buffer + index[3]));
1585 				c0 = c0 | (c1 << 8) | (c2 << 16) | (c3 << 24);
1586 
1587 				switch(state.textureFormat)
1588 				{
1589 				case VK_FORMAT_R8_SINT:
1590 				case VK_FORMAT_R8_UINT:
1591 				case VK_FORMAT_S8_UINT:
1592 					{
1593 						Int zero(0);
1594 						c.x = Unpack(As<Byte4>(c0), As<Byte4>(zero));
1595 						// Propagate sign bit
1596 						if(state.textureFormat == VK_FORMAT_R8_SINT)
1597 						{
1598 							c.x = (c.x << 8) >> 8;
1599 						}
1600 					}
1601 					break;
1602 				case VK_FORMAT_R8_SNORM:
1603 				case VK_FORMAT_R8_UNORM:
1604 				case VK_FORMAT_R8_SRGB:
1605 					// TODO: avoid populating the low bits at all.
1606 					c.x = Unpack(As<Byte4>(c0));
1607 					c.x &= Short4(0xFF00u);
1608 					break;
1609 				default:
1610 					c.x = Unpack(As<Byte4>(c0));
1611 					break;
1612 				}
1613 			}
1614 			break;
1615 		default:
1616 			ASSERT(false);
1617 		}
1618 	}
1619 	else if(has16bitTextureComponents())
1620 	{
1621 		switch(textureComponentCount())
1622 		{
1623 		case 4:
1624 			c.x = Pointer<Short4>(buffer)[index[0]];
1625 			c.y = Pointer<Short4>(buffer)[index[1]];
1626 			c.z = Pointer<Short4>(buffer)[index[2]];
1627 			c.w = Pointer<Short4>(buffer)[index[3]];
1628 			transpose4x4(c.x, c.y, c.z, c.w);
1629 			break;
1630 		case 2:
1631 			c.x = *Pointer<Short4>(buffer + 4 * index[0]);
1632 			c.x = As<Short4>(UnpackLow(c.x, *Pointer<Short4>(buffer + 4 * index[1])));
1633 			c.z = *Pointer<Short4>(buffer + 4 * index[2]);
1634 			c.z = As<Short4>(UnpackLow(c.z, *Pointer<Short4>(buffer + 4 * index[3])));
1635 			c.y = c.x;
1636 			c.x = UnpackLow(As<Int2>(c.x), As<Int2>(c.z));
1637 			c.y = UnpackHigh(As<Int2>(c.y), As<Int2>(c.z));
1638 			break;
1639 		case 1:
1640 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0);
1641 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1);
1642 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2);
1643 			c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3);
1644 			break;
1645 		default:
1646 			ASSERT(false);
1647 		}
1648 	}
1649 	else if(state.textureFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32)
1650 	{
1651 		Int4 cc;
1652 		cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0);
1653 		cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1);
1654 		cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2);
1655 		cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3);
1656 
1657 		c.x = Short4(cc << 6) & Short4(0xFFC0u);
1658 		c.y = Short4(cc >> 4) & Short4(0xFFC0u);
1659 		c.z = Short4(cc >> 14) & Short4(0xFFC0u);
1660 		c.w = Short4(cc >> 16) & Short4(0xC000u);
1661 	}
1662 	else if(state.textureFormat == VK_FORMAT_A2R10G10B10_UNORM_PACK32)
1663 	{
1664 		Int4 cc;
1665 		cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0);
1666 		cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1);
1667 		cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2);
1668 		cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3);
1669 
1670 		c.x = Short4(cc >> 14) & Short4(0xFFC0u);
1671 		c.y = Short4(cc >> 4) & Short4(0xFFC0u);
1672 		c.z = Short4(cc << 6) & Short4(0xFFC0u);
1673 		c.w = Short4(cc >> 16) & Short4(0xC000u);
1674 	}
1675 	else if(state.textureFormat == VK_FORMAT_A2B10G10R10_UINT_PACK32)
1676 	{
1677 		Int4 cc;
1678 		cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0);
1679 		cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1);
1680 		cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2);
1681 		cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3);
1682 
1683 		c.x = Short4(cc & Int4(0x3FF));
1684 		c.y = Short4((cc >> 10) & Int4(0x3FF));
1685 		c.z = Short4((cc >> 20) & Int4(0x3FF));
1686 		c.w = Short4((cc >> 30) & Int4(0x3));
1687 	}
1688 	else if(state.textureFormat == VK_FORMAT_A2R10G10B10_UINT_PACK32)
1689 	{
1690 		Int4 cc;
1691 		cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0);
1692 		cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1);
1693 		cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2);
1694 		cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3);
1695 
1696 		c.z = Short4((cc & Int4(0x3FF)));
1697 		c.y = Short4(((cc >> 10) & Int4(0x3FF)));
1698 		c.x = Short4(((cc >> 20) & Int4(0x3FF)));
1699 		c.w = Short4(((cc >> 30) & Int4(0x3)));
1700 	}
1701 	else
1702 		ASSERT(false);
1703 
1704 	if(state.textureFormat.isSRGBformat())
1705 	{
1706 		for(int i = 0; i < textureComponentCount(); i++)
1707 		{
1708 			if(isRGBComponent(i))
1709 			{
1710 				// The current table-based sRGB conversion requires 0xFF00 to represent 1.0.
1711 				ASSERT(state.textureFormat.has8bitTextureComponents());
1712 
1713 				sRGBtoLinearFF00(c[i]);
1714 			}
1715 		}
1716 	}
1717 
1718 	return c;
1719 }
1720 
sampleTexel(Short4 & uuuu,Short4 & vvvv,Short4 & wwww,const Short4 & layerIndex,Vector4i & offset,const Int4 & sample,Pointer<Byte> & mipmap,Pointer<Byte> buffer)1721 Vector4s SamplerCore::sampleTexel(Short4 &uuuu, Short4 &vvvv, Short4 &wwww, const Short4 &layerIndex, Vector4i &offset, const Int4 &sample, Pointer<Byte> &mipmap, Pointer<Byte> buffer)
1722 {
1723 	Vector4s c;
1724 
1725 	UInt index[4];
1726 	computeIndices(index, uuuu, vvvv, wwww, layerIndex, offset, sample, mipmap);
1727 
1728 	if(isYcbcrFormat())
1729 	{
1730 		// Generates 15-bit output.
1731 
1732 		// Pointers to the planes of YCbCr images are stored in consecutive mipmap levels.
1733 		Pointer<Byte> bufferY = buffer;                                                                         // *Pointer<Pointer<Byte>>(mipmap + 0 * sizeof(Mipmap) + OFFSET(Mipmap, buffer));
1734 		Pointer<Byte> bufferU = *Pointer<Pointer<Byte>>(mipmap + 1 * sizeof(Mipmap) + OFFSET(Mipmap, buffer));  // U/V for 2-plane interleaved formats.
1735 		Pointer<Byte> bufferV = *Pointer<Pointer<Byte>>(mipmap + 2 * sizeof(Mipmap) + OFFSET(Mipmap, buffer));
1736 
1737 		// Luminance (either 8-bit or 10-bit in bottom bits).
1738 		UShort4 Y;
1739 		{
1740 			switch(state.textureFormat)
1741 			{
1742 			case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
1743 			case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
1744 				{
1745 					Y = Insert(Y, UShort(bufferY[index[0]]), 0);
1746 					Y = Insert(Y, UShort(bufferY[index[1]]), 1);
1747 					Y = Insert(Y, UShort(bufferY[index[2]]), 2);
1748 					Y = Insert(Y, UShort(bufferY[index[3]]), 3);
1749 				}
1750 				break;
1751 			case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
1752 				{
1753 					Y = Insert(Y, Pointer<UShort>(bufferY)[index[0]], 0);
1754 					Y = Insert(Y, Pointer<UShort>(bufferY)[index[1]], 1);
1755 					Y = Insert(Y, Pointer<UShort>(bufferY)[index[2]], 2);
1756 					Y = Insert(Y, Pointer<UShort>(bufferY)[index[3]], 3);
1757 					// Top 10 bits of each 16 bits:
1758 					Y = (Y & UShort4(0xFFC0u)) >> 6;
1759 				}
1760 				break;
1761 			default:
1762 				UNSUPPORTED("state.textureFormat %d", (int)state.textureFormat);
1763 				break;
1764 			}
1765 		}
1766 
1767 		// Chroma (either 8-bit or 10-bit in bottom bits).
1768 		UShort4 Cb, Cr;
1769 		{
1770 			computeIndices(index, uuuu, vvvv, wwww, layerIndex, offset, sample, mipmap + sizeof(Mipmap));
1771 			UShort4 U, V;
1772 
1773 			switch(state.textureFormat)
1774 			{
1775 			case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
1776 				{
1777 					U = Insert(U, UShort(bufferU[index[0]]), 0);
1778 					U = Insert(U, UShort(bufferU[index[1]]), 1);
1779 					U = Insert(U, UShort(bufferU[index[2]]), 2);
1780 					U = Insert(U, UShort(bufferU[index[3]]), 3);
1781 
1782 					V = Insert(V, UShort(bufferV[index[0]]), 0);
1783 					V = Insert(V, UShort(bufferV[index[1]]), 1);
1784 					V = Insert(V, UShort(bufferV[index[2]]), 2);
1785 					V = Insert(V, UShort(bufferV[index[3]]), 3);
1786 				}
1787 				break;
1788 			case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
1789 				{
1790 					UShort4 UV;
1791 					UV = Insert(UV, Pointer<UShort>(bufferU)[index[0]], 0);
1792 					UV = Insert(UV, Pointer<UShort>(bufferU)[index[1]], 1);
1793 					UV = Insert(UV, Pointer<UShort>(bufferU)[index[2]], 2);
1794 					UV = Insert(UV, Pointer<UShort>(bufferU)[index[3]], 3);
1795 
1796 					U = (UV & UShort4(0x00FFu));
1797 					V = (UV & UShort4(0xFF00u)) >> 8;
1798 				}
1799 				break;
1800 			case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
1801 				{
1802 					UInt4 UV;
1803 					UV = Insert(UV, Pointer<UInt>(bufferU)[index[0]], 0);
1804 					UV = Insert(UV, Pointer<UInt>(bufferU)[index[1]], 1);
1805 					UV = Insert(UV, Pointer<UInt>(bufferU)[index[2]], 2);
1806 					UV = Insert(UV, Pointer<UInt>(bufferU)[index[3]], 3);
1807 					// Top 10 bits of first 16-bits:
1808 					U = UShort4((UV & UInt4(0x0000FFC0u)) >> 6);
1809 					// Top 10 bits of second 16-bits:
1810 					V = UShort4((UV & UInt4(0xFFC00000u)) >> 22);
1811 				}
1812 				break;
1813 			default:
1814 				UNSUPPORTED("state.textureFormat %d", (int)state.textureFormat);
1815 				break;
1816 			}
1817 
1818 			if(!state.swappedChroma)
1819 			{
1820 				Cb = U;
1821 				Cr = V;
1822 			}
1823 			else
1824 			{
1825 				Cb = V;
1826 				Cr = U;
1827 			}
1828 		}
1829 
1830 		uint8_t lumaBits = 8;
1831 		uint8_t chromaBits = 8;
1832 		switch(state.textureFormat)
1833 		{
1834 		case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
1835 		case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
1836 			lumaBits = 8;
1837 			chromaBits = 8;
1838 			break;
1839 		case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
1840 			lumaBits = 10;
1841 			chromaBits = 10;
1842 			break;
1843 		default:
1844 			UNSUPPORTED("state.textureFormat %d", (int)state.textureFormat);
1845 			break;
1846 		}
1847 
1848 		if(state.ycbcrModel == VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY)
1849 		{
1850 			// Scale to the output 15-bit.
1851 			c.x = Cr << (15 - chromaBits);
1852 			c.y = Y << (15 - lumaBits);
1853 			c.z = Cb << (15 - chromaBits);
1854 		}
1855 		else
1856 		{
1857 			const float twoPowLumaBits = static_cast<float>(0x1u << lumaBits);
1858 			const float twoPowLumaBitsMinus8 = static_cast<float>(0x1u << (lumaBits - 8));
1859 			const float twoPowChromaBits = static_cast<float>(0x1u << chromaBits);
1860 			const float twoPowChromaBitsMinus1 = static_cast<float>(0x1u << (chromaBits - 1));
1861 			const float twoPowChromaBitsMinus8 = static_cast<float>(0x1u << (chromaBits - 8));
1862 
1863 			Float4 y = Float4(Y);
1864 			Float4 u = Float4(Cb);
1865 			Float4 v = Float4(Cr);
1866 
1867 			if(state.studioSwing)
1868 			{
1869 				// See https://www.khronos.org/registry/DataFormat/specs/1.3/dataformat.1.3.html#QUANTIZATION_NARROW
1870 				y = ((y / Float4(twoPowLumaBitsMinus8)) - Float4(16.0f)) / Float4(219.0f);
1871 				u = ((u / Float4(twoPowChromaBitsMinus8)) - Float4(128.0f)) / Float4(224.0f);
1872 				v = ((v / Float4(twoPowChromaBitsMinus8)) - Float4(128.0f)) / Float4(224.0f);
1873 			}
1874 			else
1875 			{
1876 				// See https://www.khronos.org/registry/DataFormat/specs/1.3/dataformat.1.3.html#QUANTIZATION_FULL
1877 				y = y / Float4(twoPowLumaBits - 1.0f);
1878 				u = (u - Float4(twoPowChromaBitsMinus1)) / Float4(twoPowChromaBits - 1.0f);
1879 				v = (v - Float4(twoPowChromaBitsMinus1)) / Float4(twoPowChromaBits - 1.0f);
1880 			}
1881 
1882 			// Now, `y` is in [0, 1] and `u` and `v` are in [-0.5, 0.5].
1883 
1884 			if(state.ycbcrModel == VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY)
1885 			{
1886 				c.x = Short4(v * static_cast<float>(0x7FFF));
1887 				c.y = Short4(y * static_cast<float>(0x7FFF));
1888 				c.z = Short4(u * static_cast<float>(0x7FFF));
1889 			}
1890 			else
1891 			{
1892 				// Generic YCbCr to RGB transformation:
1893 				// R = Y                               +           2 * (1 - Kr) * Cr
1894 				// G = Y - 2 * Kb * (1 - Kb) / Kg * Cb - 2 * Kr * (1 - Kr) / Kg * Cr
1895 				// B = Y +           2 * (1 - Kb) * Cb
1896 
1897 				float Kb = 0.114f;
1898 				float Kr = 0.299f;
1899 
1900 				switch(state.ycbcrModel)
1901 				{
1902 				case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709:
1903 					Kb = 0.0722f;
1904 					Kr = 0.2126f;
1905 					break;
1906 				case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601:
1907 					Kb = 0.114f;
1908 					Kr = 0.299f;
1909 					break;
1910 				case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020:
1911 					Kb = 0.0593f;
1912 					Kr = 0.2627f;
1913 					break;
1914 				default:
1915 					UNSUPPORTED("ycbcrModel %d", int(state.ycbcrModel));
1916 				}
1917 
1918 				const float Kg = 1.0f - Kr - Kb;
1919 
1920 				const float Rr = 2 * (1 - Kr);
1921 				const float Gb = -2 * Kb * (1 - Kb) / Kg;
1922 				const float Gr = -2 * Kr * (1 - Kr) / Kg;
1923 				const float Bb = 2 * (1 - Kb);
1924 
1925 				Float4 r = y + Float4(Rr) * v;
1926 				Float4 g = y + Float4(Gb) * u + Float4(Gr) * v;
1927 				Float4 b = y + Float4(Bb) * u;
1928 
1929 				c.x = Short4(r * static_cast<float>(0x7FFF));
1930 				c.y = Short4(g * static_cast<float>(0x7FFF));
1931 				c.z = Short4(b * static_cast<float>(0x7FFF));
1932 			}
1933 		}
1934 	}
1935 	else
1936 	{
1937 		return sampleTexel(index, buffer);
1938 	}
1939 
1940 	return c;
1941 }
1942 
sampleTexel(Int4 & uuuu,Int4 & vvvv,Int4 & wwww,Float4 & dRef,const Int4 & sample,Pointer<Byte> & mipmap,Pointer<Byte> buffer)1943 Vector4f SamplerCore::sampleTexel(Int4 &uuuu, Int4 &vvvv, Int4 &wwww, Float4 &dRef, const Int4 &sample, Pointer<Byte> &mipmap, Pointer<Byte> buffer)
1944 {
1945 	Int4 valid;
1946 
1947 	if(borderModeActive())
1948 	{
1949 		// Valid texels have positive coordinates.
1950 		Int4 negative = uuuu;
1951 		if(state.is2D() || state.is3D() || state.isCube()) negative |= vvvv;
1952 		if(state.is3D() || state.isCube() || state.isArrayed()) negative |= wwww;
1953 		valid = CmpNLT(negative, Int4(0));
1954 	}
1955 
1956 	UInt index[4];
1957 	computeIndices(index, uuuu, vvvv, wwww, sample, valid, mipmap);
1958 
1959 	Vector4f c;
1960 
1961 	if(hasFloatTexture() || has32bitIntegerTextureComponents())
1962 	{
1963 		UInt4 t0, t1, t2, t3;
1964 
1965 		switch(state.textureFormat)
1966 		{
1967 		case VK_FORMAT_R16_SFLOAT:
1968 			t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 2));
1969 			t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 2));
1970 			t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 2));
1971 			t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 2));
1972 
1973 			c.x.x = Extract(As<Float4>(halfToFloatBits(t0)), 0);
1974 			c.x.y = Extract(As<Float4>(halfToFloatBits(t1)), 0);
1975 			c.x.z = Extract(As<Float4>(halfToFloatBits(t2)), 0);
1976 			c.x.w = Extract(As<Float4>(halfToFloatBits(t3)), 0);
1977 			break;
1978 		case VK_FORMAT_R16G16_SFLOAT:
1979 			t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 4));
1980 			t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 4));
1981 			t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 4));
1982 			t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 4));
1983 
1984 			// TODO: shuffles
1985 			c.x = As<Float4>(halfToFloatBits(t0));
1986 			c.y = As<Float4>(halfToFloatBits(t1));
1987 			c.z = As<Float4>(halfToFloatBits(t2));
1988 			c.w = As<Float4>(halfToFloatBits(t3));
1989 			transpose4x4(c.x, c.y, c.z, c.w);
1990 			break;
1991 		case VK_FORMAT_R16G16B16A16_SFLOAT:
1992 			t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 8));
1993 			t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 8));
1994 			t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 8));
1995 			t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 8));
1996 
1997 			c.x = As<Float4>(halfToFloatBits(t0));
1998 			c.y = As<Float4>(halfToFloatBits(t1));
1999 			c.z = As<Float4>(halfToFloatBits(t2));
2000 			c.w = As<Float4>(halfToFloatBits(t3));
2001 			transpose4x4(c.x, c.y, c.z, c.w);
2002 			break;
2003 		case VK_FORMAT_R32_SFLOAT:
2004 		case VK_FORMAT_R32_SINT:
2005 		case VK_FORMAT_R32_UINT:
2006 		case VK_FORMAT_D32_SFLOAT:
2007 			// TODO: Optimal shuffling?
2008 			c.x.x = *Pointer<Float>(buffer + index[0] * 4);
2009 			c.x.y = *Pointer<Float>(buffer + index[1] * 4);
2010 			c.x.z = *Pointer<Float>(buffer + index[2] * 4);
2011 			c.x.w = *Pointer<Float>(buffer + index[3] * 4);
2012 			break;
2013 		case VK_FORMAT_R32G32_SFLOAT:
2014 		case VK_FORMAT_R32G32_SINT:
2015 		case VK_FORMAT_R32G32_UINT:
2016 			// TODO: Optimal shuffling?
2017 			c.x.xy = *Pointer<Float4>(buffer + index[0] * 8);
2018 			c.x.zw = *Pointer<Float4>(buffer + index[1] * 8 - 8);
2019 			c.z.xy = *Pointer<Float4>(buffer + index[2] * 8);
2020 			c.z.zw = *Pointer<Float4>(buffer + index[3] * 8 - 8);
2021 			c.y = c.x;
2022 			c.x = Float4(c.x.xz, c.z.xz);
2023 			c.y = Float4(c.y.yw, c.z.yw);
2024 			break;
2025 		case VK_FORMAT_R32G32B32A32_SFLOAT:
2026 		case VK_FORMAT_R32G32B32A32_SINT:
2027 		case VK_FORMAT_R32G32B32A32_UINT:
2028 			c.x = *Pointer<Float4>(buffer + index[0] * 16, 16);
2029 			c.y = *Pointer<Float4>(buffer + index[1] * 16, 16);
2030 			c.z = *Pointer<Float4>(buffer + index[2] * 16, 16);
2031 			c.w = *Pointer<Float4>(buffer + index[3] * 16, 16);
2032 			transpose4x4(c.x, c.y, c.z, c.w);
2033 			break;
2034 		case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
2035 			{
2036 				Float4 t;  // TODO: add Insert(UInt4, RValue<UInt>)
2037 				t.x = *Pointer<Float>(buffer + index[0] * 4);
2038 				t.y = *Pointer<Float>(buffer + index[1] * 4);
2039 				t.z = *Pointer<Float>(buffer + index[2] * 4);
2040 				t.w = *Pointer<Float>(buffer + index[3] * 4);
2041 				t0 = As<UInt4>(t);
2042 				c.w = Float4(UInt4(1) << ((t0 >> 27) & UInt4(0x1F))) * Float4(1.0f / (1 << 24));
2043 				c.x = Float4(t0 & UInt4(0x1FF)) * c.w;
2044 				c.y = Float4((t0 >> 9) & UInt4(0x1FF)) * c.w;
2045 				c.z = Float4((t0 >> 18) & UInt4(0x1FF)) * c.w;
2046 			}
2047 			break;
2048 		case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
2049 			{
2050 				Float4 t;  // TODO: add Insert(UInt4, RValue<UInt>)
2051 				t.x = *Pointer<Float>(buffer + index[0] * 4);
2052 				t.y = *Pointer<Float>(buffer + index[1] * 4);
2053 				t.z = *Pointer<Float>(buffer + index[2] * 4);
2054 				t.w = *Pointer<Float>(buffer + index[3] * 4);
2055 				t0 = As<UInt4>(t);
2056 				c.x = As<Float4>(halfToFloatBits((t0 << 4) & UInt4(0x7FF0)));
2057 				c.y = As<Float4>(halfToFloatBits((t0 >> 7) & UInt4(0x7FF0)));
2058 				c.z = As<Float4>(halfToFloatBits((t0 >> 17) & UInt4(0x7FE0)));
2059 			}
2060 			break;
2061 		default:
2062 			UNSUPPORTED("Format %d", VkFormat(state.textureFormat));
2063 		}
2064 	}
2065 	else
2066 	{
2067 		ASSERT(!isYcbcrFormat());
2068 
2069 		Vector4s cs = sampleTexel(index, buffer);
2070 
2071 		bool isInteger = state.textureFormat.isUnnormalizedInteger();
2072 		int componentCount = textureComponentCount();
2073 		for(int n = 0; n < componentCount; n++)
2074 		{
2075 			if(hasUnsignedTextureComponent(n))
2076 			{
2077 				if(isInteger)
2078 				{
2079 					c[n] = As<Float4>(Int4(As<UShort4>(cs[n])));
2080 				}
2081 				else
2082 				{
2083 					c[n] = Float4(As<UShort4>(cs[n]));
2084 				}
2085 			}
2086 			else
2087 			{
2088 				if(isInteger)
2089 				{
2090 					c[n] = As<Float4>(Int4(cs[n]));
2091 				}
2092 				else
2093 				{
2094 					c[n] = Float4(cs[n]);
2095 				}
2096 			}
2097 		}
2098 	}
2099 
2100 	if(borderModeActive())
2101 	{
2102 		c = replaceBorderTexel(c, valid);
2103 	}
2104 
2105 	if(state.compareEnable)
2106 	{
2107 		Float4 ref = dRef;
2108 
2109 		if(!hasFloatTexture())
2110 		{
2111 			// D16_UNORM: clamp reference, normalize texel value
2112 			ref = Min(Max(ref, Float4(0.0f)), Float4(1.0f));
2113 			c.x = c.x * Float4(1.0f / 0xFFFF);
2114 		}
2115 
2116 		Int4 boolean;
2117 
2118 		switch(state.compareOp)
2119 		{
2120 		case VK_COMPARE_OP_LESS_OR_EQUAL: boolean = CmpLE(ref, c.x); break;
2121 		case VK_COMPARE_OP_GREATER_OR_EQUAL: boolean = CmpNLT(ref, c.x); break;
2122 		case VK_COMPARE_OP_LESS: boolean = CmpLT(ref, c.x); break;
2123 		case VK_COMPARE_OP_GREATER: boolean = CmpNLE(ref, c.x); break;
2124 		case VK_COMPARE_OP_EQUAL: boolean = CmpEQ(ref, c.x); break;
2125 		case VK_COMPARE_OP_NOT_EQUAL: boolean = CmpNEQ(ref, c.x); break;
2126 		case VK_COMPARE_OP_ALWAYS: boolean = Int4(-1); break;
2127 		case VK_COMPARE_OP_NEVER: boolean = Int4(0); break;
2128 		default: ASSERT(false);
2129 		}
2130 
2131 		c.x = As<Float4>(boolean & As<Int4>(Float4(1.0f)));
2132 		c.y = Float4(0.0f);
2133 		c.z = Float4(0.0f);
2134 		c.w = Float4(1.0f);
2135 	}
2136 
2137 	return c;
2138 }
2139 
replaceBorderTexel(const Vector4f & c,Int4 valid)2140 Vector4f SamplerCore::replaceBorderTexel(const Vector4f &c, Int4 valid)
2141 {
2142 	Vector4i border;
2143 
2144 	const bool scaled = hasNormalizedFormat() && !state.compareEnable;
2145 	const sw::float4 scaleComp = scaled ? getComponentScale() : sw::float4(1.0f, 1.0f, 1.0f, 1.0f);
2146 
2147 	switch(state.border)
2148 	{
2149 	case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
2150 	case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
2151 		border.x = Int4(0);
2152 		border.y = Int4(0);
2153 		border.z = Int4(0);
2154 		border.w = Int4(0);
2155 		break;
2156 	case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
2157 		border.x = Int4(0);
2158 		border.y = Int4(0);
2159 		border.z = Int4(0);
2160 		border.w = Int4(bit_cast<int>(scaleComp.w));
2161 		break;
2162 	case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
2163 		border.x = Int4(0);
2164 		border.y = Int4(0);
2165 		border.z = Int4(0);
2166 		border.w = Int4(1);
2167 		break;
2168 	case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
2169 		border.x = Int4(bit_cast<int>(scaleComp.x));
2170 		border.y = Int4(bit_cast<int>(scaleComp.y));
2171 		border.z = Int4(bit_cast<int>(scaleComp.z));
2172 		border.w = Int4(bit_cast<int>(scaleComp.w));
2173 		break;
2174 	case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
2175 		border.x = Int4(1);
2176 		border.y = Int4(1);
2177 		border.z = Int4(1);
2178 		border.w = Int4(1);
2179 		break;
2180 	case VK_BORDER_COLOR_FLOAT_CUSTOM_EXT:
2181 		// This bit-casts from float to int in C++ code instead of Reactor code
2182 		// because Reactor does not guarantee preserving infinity (b/140302841).
2183 		border.x = Int4(bit_cast<int>(scaleComp.x * state.customBorder.float32[0]));
2184 		border.y = Int4(bit_cast<int>(scaleComp.y * state.customBorder.float32[1]));
2185 		border.z = Int4(bit_cast<int>(scaleComp.z * state.customBorder.float32[2]));
2186 		border.w = Int4(bit_cast<int>(scaleComp.w * state.customBorder.float32[3]));
2187 		break;
2188 	case VK_BORDER_COLOR_INT_CUSTOM_EXT:
2189 		border.x = Int4(state.customBorder.int32[0]);
2190 		border.y = Int4(state.customBorder.int32[1]);
2191 		border.z = Int4(state.customBorder.int32[2]);
2192 		border.w = Int4(state.customBorder.int32[3]);
2193 		break;
2194 	default:
2195 		UNSUPPORTED("sint/uint/sfloat border: %u", state.border);
2196 	}
2197 
2198 	Vector4f out;
2199 	out.x = As<Float4>((valid & As<Int4>(c.x)) | (~valid & border.x));  // TODO: IfThenElse()
2200 	out.y = As<Float4>((valid & As<Int4>(c.y)) | (~valid & border.y));
2201 	out.z = As<Float4>((valid & As<Int4>(c.z)) | (~valid & border.z));
2202 	out.w = As<Float4>((valid & As<Int4>(c.w)) | (~valid & border.w));
2203 
2204 	return out;
2205 }
2206 
selectMipmap(const Pointer<Byte> & texture,const Float & lod,bool secondLOD)2207 Pointer<Byte> SamplerCore::selectMipmap(const Pointer<Byte> &texture, const Float &lod, bool secondLOD)
2208 {
2209 	Pointer<Byte> mipmap0 = texture + OFFSET(Texture, mipmap[0]);
2210 
2211 	if(state.mipmapFilter == MIPMAP_NONE)
2212 	{
2213 		return mipmap0;
2214 	}
2215 
2216 	Int ilod;
2217 
2218 	if(state.mipmapFilter == MIPMAP_POINT)
2219 	{
2220 		// TODO: Preferred formula is ceil(lod + 0.5) - 1
2221 		ilod = RoundInt(lod);
2222 	}
2223 	else  // MIPMAP_LINEAR
2224 	{
2225 		ilod = Int(lod);
2226 	}
2227 
2228 	return mipmap0 + ilod * sizeof(Mipmap) + secondLOD * sizeof(Mipmap);
2229 }
2230 
computeFilterOffset(Float & lod)2231 Int4 SamplerCore::computeFilterOffset(Float &lod)
2232 {
2233 	if(state.textureFilter == FILTER_POINT)
2234 	{
2235 		return Int4(0);
2236 	}
2237 	else if(state.textureFilter == FILTER_MIN_LINEAR_MAG_POINT)
2238 	{
2239 		return CmpNLE(Float4(lod), Float4(0.0f));
2240 	}
2241 	else if(state.textureFilter == FILTER_MIN_POINT_MAG_LINEAR)
2242 	{
2243 		return CmpLE(Float4(lod), Float4(0.0f));
2244 	}
2245 
2246 	return Int4(~0);
2247 }
2248 
address(const Float4 & uw,AddressingMode addressingMode,Pointer<Byte> & mipmap)2249 Short4 SamplerCore::address(const Float4 &uw, AddressingMode addressingMode, Pointer<Byte> &mipmap)
2250 {
2251 	if(addressingMode == ADDRESSING_UNUSED)
2252 	{
2253 		return Short4(0);  // TODO(b/134669567): Optimize for 1D filtering
2254 	}
2255 	else if(addressingMode == ADDRESSING_CLAMP || addressingMode == ADDRESSING_BORDER)
2256 	{
2257 		Float4 clamp = Min(Max(uw, Float4(0.0f)), Float4(65535.0f / 65536.0f));
2258 
2259 		return Short4(Int4(clamp * Float4(1 << 16)));
2260 	}
2261 	else if(addressingMode == ADDRESSING_MIRROR)
2262 	{
2263 		Int4 convert = Int4(uw * Float4(1 << 16));
2264 		Int4 mirror = (convert << 15) >> 31;
2265 
2266 		convert ^= mirror;
2267 
2268 		return Short4(convert);
2269 	}
2270 	else if(addressingMode == ADDRESSING_MIRRORONCE)
2271 	{
2272 		// Absolute value
2273 		Int4 convert = Int4(Abs(uw * Float4(1 << 16)));
2274 
2275 		// Clamp
2276 		convert -= Int4(0x00008000, 0x00008000, 0x00008000, 0x00008000);
2277 		convert = As<Int4>(PackSigned(convert, convert));
2278 
2279 		return As<Short4>(Int2(convert)) + Short4(0x8000u);
2280 	}
2281 	else  // Wrap
2282 	{
2283 		return Short4(Int4(uw * Float4(1 << 16)));
2284 	}
2285 }
2286 
computeLayerIndex16(const Float4 & a,Pointer<Byte> & mipmap)2287 Short4 SamplerCore::computeLayerIndex16(const Float4 &a, Pointer<Byte> &mipmap)
2288 {
2289 	if(!state.isArrayed())
2290 	{
2291 		return {};
2292 	}
2293 
2294 	Int4 layers = *Pointer<Int4>(mipmap + OFFSET(Mipmap, depth));
2295 
2296 	return Short4(Min(Max(RoundInt(a), Int4(0)), layers - Int4(1)));
2297 }
2298 
2299 // TODO: Eliminate when the gather + mirror addressing case is handled by mirroring the footprint.
mirror(Int4 n)2300 static Int4 mirror(Int4 n)
2301 {
2302 	auto positive = CmpNLT(n, Int4(0));
2303 	return (positive & n) | (~positive & (-(Int4(1) + n)));
2304 }
2305 
mod(Int4 n,Int4 d)2306 static Int4 mod(Int4 n, Int4 d)
2307 {
2308 	auto x = n % d;
2309 	auto positive = CmpNLT(x, Int4(0));
2310 	return (positive & x) | (~positive & (x + d));
2311 }
2312 
address(const Float4 & uvw,Int4 & xyz0,Int4 & xyz1,Float4 & f,Pointer<Byte> & mipmap,Int4 & offset,Int4 & filter,int whd,AddressingMode addressingMode)2313 void SamplerCore::address(const Float4 &uvw, Int4 &xyz0, Int4 &xyz1, Float4 &f, Pointer<Byte> &mipmap, Int4 &offset, Int4 &filter, int whd, AddressingMode addressingMode)
2314 {
2315 	if(addressingMode == ADDRESSING_UNUSED)
2316 	{
2317 		f = Float4(0.0f);  // TODO(b/134669567): Optimize for 1D filtering
2318 		return;
2319 	}
2320 
2321 	Int4 dim = As<Int4>(*Pointer<UInt4>(mipmap + whd, 16));
2322 	Int4 maxXYZ = dim - Int4(1);
2323 
2324 	if(function == Fetch)  // Unnormalized coordinates
2325 	{
2326 		Int4 xyz = function.offset ? As<Int4>(uvw) + offset : As<Int4>(uvw);
2327 		xyz0 = Min(Max(xyz, Int4(0)), maxXYZ);
2328 
2329 		// VK_EXT_image_robustness requires checking for out-of-bounds accesses.
2330 		// TODO(b/162327166): Only perform bounds checks when VK_EXT_image_robustness is enabled.
2331 		// If the above clamping altered the result, the access is out-of-bounds.
2332 		// In that case set the coordinate to -1 to perform texel replacement later.
2333 		Int4 outOfBounds = CmpNEQ(xyz, xyz0);
2334 		xyz0 |= outOfBounds;
2335 	}
2336 	else if(addressingMode == ADDRESSING_CUBEFACE)
2337 	{
2338 		xyz0 = As<Int4>(uvw);
2339 	}
2340 	else
2341 	{
2342 		const int oneBits = 0x3F7FFFFF;  // Value just under 1.0f
2343 
2344 		Float4 coord = uvw;
2345 
2346 		if(state.unnormalizedCoordinates)
2347 		{
2348 			switch(addressingMode)
2349 			{
2350 			case ADDRESSING_CLAMP:
2351 				coord = Min(Max(coord, Float4(0.0f)), Float4(dim) * As<Float4>(Int4(oneBits)));
2352 				break;
2353 			case ADDRESSING_BORDER:
2354 				// Don't map to a valid range here.
2355 				break;
2356 			default:
2357 				// "If unnormalizedCoordinates is VK_TRUE, addressModeU and addressModeV must each be
2358 				//  either VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE or VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER"
2359 				UNREACHABLE("addressingMode %d", int(addressingMode));
2360 				break;
2361 			}
2362 		}
2363 		else if(state.textureFilter == FILTER_GATHER && addressingMode == ADDRESSING_MIRROR)
2364 		{
2365 			// Gather requires the 'footprint' of the texels from which a component is taken, to also mirror around.
2366 			// Therefore we can't just compute one texel's location and find the other ones at +1 offsets from it.
2367 			// Here we handle that case separately by doing the mirroring per texel coordinate.
2368 			// TODO: Mirror the footprint by adjusting the sign of the 0.5f and 1 offsets.
2369 
2370 			coord = coord * Float4(dim);
2371 			coord -= Float4(0.5f);
2372 			Float4 floor = Floor(coord);
2373 			xyz0 = Int4(floor);
2374 
2375 			if(function.offset)
2376 			{
2377 				xyz0 += offset;
2378 			}
2379 
2380 			xyz1 = xyz0 + Int4(1);
2381 
2382 			xyz0 = (maxXYZ)-mirror(mod(xyz0, Int4(2) * dim) - dim);
2383 			xyz1 = (maxXYZ)-mirror(mod(xyz1, Int4(2) * dim) - dim);
2384 
2385 			return;
2386 		}
2387 		else
2388 		{
2389 			if(!function.offset)
2390 			{
2391 				switch(addressingMode)
2392 				{
2393 				case ADDRESSING_CLAMP:
2394 				case ADDRESSING_SEAMLESS:
2395 					// While cube face coordinates are nominally already in the [0.0, 1.0] range
2396 					// due to the projection, and numerical imprecision is tolerated due to the
2397 					// border of pixels for seamless filtering, the projection doesn't cause
2398 					// range normalization for Inf and NaN values. So we always clamp.
2399 					{
2400 						Float4 one = As<Float4>(Int4(oneBits));
2401 						coord = Min(Max(coord, Float4(0.0f)), one);
2402 					}
2403 					break;
2404 				case ADDRESSING_MIRROR:
2405 					{
2406 						Float4 one = As<Float4>(Int4(oneBits));
2407 						coord = coord * Float4(0.5f);
2408 						coord = Float4(2.0f) * Abs(coord - Round(coord));
2409 						coord = Min(coord, one);
2410 					}
2411 					break;
2412 				case ADDRESSING_MIRRORONCE:
2413 					{
2414 						Float4 one = As<Float4>(Int4(oneBits));
2415 						coord = Min(Abs(coord), one);
2416 					}
2417 					break;
2418 				case ADDRESSING_BORDER:
2419 					// Don't map to a valid range here.
2420 					break;
2421 				default:  // Wrap
2422 					coord = Frac(coord);
2423 					break;
2424 				}
2425 			}
2426 
2427 			coord = coord * Float4(dim);
2428 		}
2429 
2430 		if(state.textureFilter == FILTER_POINT)
2431 		{
2432 			if(addressingMode == ADDRESSING_BORDER || function.offset)
2433 			{
2434 				xyz0 = Int4(Floor(coord));
2435 			}
2436 			else  // Can't have negative coordinates, so floor() is redundant when casting to int.
2437 			{
2438 				xyz0 = Int4(coord);
2439 			}
2440 		}
2441 		else
2442 		{
2443 			if(state.textureFilter == FILTER_MIN_POINT_MAG_LINEAR ||
2444 			   state.textureFilter == FILTER_MIN_LINEAR_MAG_POINT)
2445 			{
2446 				coord -= As<Float4>(As<Int4>(Float4(0.5f)) & filter);
2447 			}
2448 			else
2449 			{
2450 				coord -= Float4(0.5f);
2451 			}
2452 
2453 			Float4 floor = Floor(coord);
2454 			xyz0 = Int4(floor);
2455 			f = coord - floor;
2456 		}
2457 
2458 		if(function.offset)
2459 		{
2460 			xyz0 += offset;
2461 		}
2462 
2463 		if(addressingMode == ADDRESSING_SEAMLESS)  // Adjust for border.
2464 		{
2465 			xyz0 += Int4(1);
2466 		}
2467 
2468 		xyz1 = xyz0 - filter;  // Increment
2469 
2470 		if(addressingMode == ADDRESSING_BORDER)
2471 		{
2472 			// Replace the coordinates with -1 if they're out of range.
2473 			Int4 border0 = CmpLT(xyz0, Int4(0)) | CmpNLT(xyz0, dim);
2474 			Int4 border1 = CmpLT(xyz1, Int4(0)) | CmpNLT(xyz1, dim);
2475 			xyz0 |= border0;
2476 			xyz1 |= border1;
2477 		}
2478 		else if(function.offset)
2479 		{
2480 			switch(addressingMode)
2481 			{
2482 			case ADDRESSING_SEAMLESS:
2483 				UNREACHABLE("addressingMode %d", int(addressingMode));  // Cube sampling doesn't support offset.
2484 			case ADDRESSING_MIRROR:
2485 			case ADDRESSING_MIRRORONCE:
2486 				// TODO(b/29069044): Implement ADDRESSING_MIRROR and ADDRESSING_MIRRORONCE.
2487 				// Fall through to Clamp.
2488 			case ADDRESSING_CLAMP:
2489 				xyz0 = Min(Max(xyz0, Int4(0)), maxXYZ);
2490 				xyz1 = Min(Max(xyz1, Int4(0)), maxXYZ);
2491 				break;
2492 			default:  // Wrap
2493 				xyz0 = mod(xyz0, dim);
2494 				xyz1 = mod(xyz1, dim);
2495 				break;
2496 			}
2497 		}
2498 		else if(state.textureFilter != FILTER_POINT)
2499 		{
2500 			switch(addressingMode)
2501 			{
2502 			case ADDRESSING_SEAMLESS:
2503 				break;
2504 			case ADDRESSING_MIRROR:
2505 			case ADDRESSING_MIRRORONCE:
2506 			case ADDRESSING_CLAMP:
2507 				xyz0 = Max(xyz0, Int4(0));
2508 				xyz1 = Min(xyz1, maxXYZ);
2509 				break;
2510 			default:  // Wrap
2511 				{
2512 					Int4 under = CmpLT(xyz0, Int4(0));
2513 					xyz0 = (under & maxXYZ) | (~under & xyz0);  // xyz < 0 ? dim - 1 : xyz   // TODO: IfThenElse()
2514 
2515 					Int4 nover = CmpLT(xyz1, dim);
2516 					xyz1 = nover & xyz1;  // xyz >= dim ? 0 : xyz
2517 				}
2518 				break;
2519 			}
2520 		}
2521 	}
2522 }
2523 
computeLayerIndex(const Float4 & a,Pointer<Byte> & mipmap)2524 Int4 SamplerCore::computeLayerIndex(const Float4 &a, Pointer<Byte> &mipmap)
2525 {
2526 	if(!state.isArrayed())
2527 	{
2528 		return {};
2529 	}
2530 
2531 	Int4 layers = *Pointer<Int4>(mipmap + OFFSET(Mipmap, depth), 16);
2532 	Int4 maxLayer = layers - Int4(1);
2533 
2534 	if(function == Fetch)  // Unnormalized coordinates
2535 	{
2536 		Int4 xyz = As<Int4>(a);
2537 		Int4 xyz0 = Min(Max(xyz, Int4(0)), maxLayer);
2538 
2539 		// VK_EXT_image_robustness requires checking for out-of-bounds accesses.
2540 		// TODO(b/162327166): Only perform bounds checks when VK_EXT_image_robustness is enabled.
2541 		// If the above clamping altered the result, the access is out-of-bounds.
2542 		// In that case set the coordinate to -1 to perform texel replacement later.
2543 		Int4 outOfBounds = CmpNEQ(xyz, xyz0);
2544 		xyz0 |= outOfBounds;
2545 
2546 		return xyz0;
2547 	}
2548 	else
2549 	{
2550 		return Min(Max(RoundInt(a), Int4(0)), maxLayer);
2551 	}
2552 }
2553 
sRGBtoLinearFF00(Short4 & c)2554 void SamplerCore::sRGBtoLinearFF00(Short4 &c)
2555 {
2556 	c = As<UShort4>(c) >> 8;
2557 
2558 	Pointer<Byte> LUT = Pointer<Byte>(constants + OFFSET(Constants, sRGBtoLinearFF_FF00));
2559 
2560 	c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 0))), 0);
2561 	c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 1))), 1);
2562 	c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 2))), 2);
2563 	c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 3))), 3);
2564 }
2565 
hasNormalizedFormat() const2566 bool SamplerCore::hasNormalizedFormat() const
2567 {
2568 	return state.textureFormat.isSignedNormalized() || state.textureFormat.isUnsignedNormalized();
2569 }
2570 
hasFloatTexture() const2571 bool SamplerCore::hasFloatTexture() const
2572 {
2573 	return state.textureFormat.isFloatFormat();
2574 }
2575 
hasUnnormalizedIntegerTexture() const2576 bool SamplerCore::hasUnnormalizedIntegerTexture() const
2577 {
2578 	return state.textureFormat.isUnnormalizedInteger();
2579 }
2580 
hasUnsignedTextureComponent(int component) const2581 bool SamplerCore::hasUnsignedTextureComponent(int component) const
2582 {
2583 	return state.textureFormat.isUnsignedComponent(component);
2584 }
2585 
textureComponentCount() const2586 int SamplerCore::textureComponentCount() const
2587 {
2588 	return state.textureFormat.componentCount();
2589 }
2590 
has16bitPackedTextureFormat() const2591 bool SamplerCore::has16bitPackedTextureFormat() const
2592 {
2593 	return state.textureFormat.has16bitPackedTextureFormat();
2594 }
2595 
has8bitTextureComponents() const2596 bool SamplerCore::has8bitTextureComponents() const
2597 {
2598 	return state.textureFormat.has8bitTextureComponents();
2599 }
2600 
has16bitTextureComponents() const2601 bool SamplerCore::has16bitTextureComponents() const
2602 {
2603 	return state.textureFormat.has16bitTextureComponents();
2604 }
2605 
has32bitIntegerTextureComponents() const2606 bool SamplerCore::has32bitIntegerTextureComponents() const
2607 {
2608 	return state.textureFormat.has32bitIntegerTextureComponents();
2609 }
2610 
isYcbcrFormat() const2611 bool SamplerCore::isYcbcrFormat() const
2612 {
2613 	return state.textureFormat.isYcbcrFormat();
2614 }
2615 
isRGBComponent(int component) const2616 bool SamplerCore::isRGBComponent(int component) const
2617 {
2618 	return state.textureFormat.isRGBComponent(component);
2619 }
2620 
borderModeActive() const2621 bool SamplerCore::borderModeActive() const
2622 {
2623 	return state.addressingModeU == ADDRESSING_BORDER ||
2624 	       state.addressingModeV == ADDRESSING_BORDER ||
2625 	       state.addressingModeW == ADDRESSING_BORDER;
2626 }
2627 
gatherSwizzle() const2628 VkComponentSwizzle SamplerCore::gatherSwizzle() const
2629 {
2630 	switch(state.gatherComponent)
2631 	{
2632 	case 0: return state.swizzle.r;
2633 	case 1: return state.swizzle.g;
2634 	case 2: return state.swizzle.b;
2635 	case 3: return state.swizzle.a;
2636 	default:
2637 		UNREACHABLE("Invalid component");
2638 		return VK_COMPONENT_SWIZZLE_R;
2639 	}
2640 }
2641 
getComponentScale() const2642 sw::float4 SamplerCore::getComponentScale() const
2643 {
2644 	// TODO(b/204709464): Unlike other formats, the fixed-point representation of the formats below are handled with bit extension.
2645 	// This special handling of such formats should be removed later.
2646 	switch(state.textureFormat)
2647 	{
2648 	case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
2649 	case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
2650 	case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
2651 		return sw::float4(0x7FFF, 0x7FFF, 0x7FFF, 0x7FFF);
2652 	default:
2653 		break;
2654 	};
2655 
2656 	const sw::int4 bits = state.textureFormat.bitsPerComponent();
2657 	const sw::int4 shift = sw::int4(16 - bits.x, 16 - bits.y, 16 - bits.z, 16 - bits.w);
2658 	const uint16_t sign = state.textureFormat.isUnsigned() ? 0xFFFF : 0x7FFF;
2659 
2660 	return sw::float4(static_cast<uint16_t>(0xFFFF << shift.x) & sign,
2661 	                  static_cast<uint16_t>(0xFFFF << shift.y) & sign,
2662 	                  static_cast<uint16_t>(0xFFFF << shift.z) & sign,
2663 	                  static_cast<uint16_t>(0xFFFF << shift.w) & sign);
2664 }
2665 
getGatherComponent() const2666 int SamplerCore::getGatherComponent() const
2667 {
2668 	VkComponentSwizzle swizzle = gatherSwizzle();
2669 
2670 	switch(swizzle)
2671 	{
2672 	default: UNSUPPORTED("VkComponentSwizzle %d", (int)swizzle); return 0;
2673 	case VK_COMPONENT_SWIZZLE_R:
2674 	case VK_COMPONENT_SWIZZLE_G:
2675 	case VK_COMPONENT_SWIZZLE_B:
2676 	case VK_COMPONENT_SWIZZLE_A:
2677 		// Normalize all components using the gather component scale.
2678 		return swizzle - VK_COMPONENT_SWIZZLE_R;
2679 	case VK_COMPONENT_SWIZZLE_ZERO:
2680 	case VK_COMPONENT_SWIZZLE_ONE:
2681 		// These cases are handled later.
2682 		return 0;
2683 	}
2684 
2685 	return 0;
2686 }
2687 
2688 }  // namespace sw
2689