1 2 3groupshared uint gs_ua; 4groupshared uint gs_ub; 5groupshared uint gs_uc; 6groupshared uint2 gs_ua2; 7groupshared uint2 gs_ub2; 8groupshared uint2 gs_uc2; 9groupshared uint3 gs_ua3; 10groupshared uint3 gs_ub3; 11groupshared uint3 gs_uc3; 12groupshared uint4 gs_ua4; 13groupshared uint4 gs_ub4; 14groupshared uint4 gs_uc4; 15 16float ComputeShaderFunctionS(float inF0, float inF1, float inF2, uint inU0, uint inU1) 17{ 18 uint out_u1; 19 20 // Don't repeat all the pixel/vertex fns - just one for sanity. 21 all(inF0); 22 23 // Test atomics 24 InterlockedAdd(gs_ua, gs_ub); 25 InterlockedAdd(gs_ua, gs_ub, out_u1); 26 InterlockedAnd(gs_ua, gs_ub); 27 InterlockedAnd(gs_ua, gs_ub, out_u1); 28 InterlockedCompareExchange(gs_ua, gs_ub, gs_uc, out_u1); 29 InterlockedExchange(gs_ua, gs_ub, out_u1); 30 InterlockedMax(gs_ua, gs_ub); 31 InterlockedMax(gs_ua, gs_ub, out_u1); 32 InterlockedMin(gs_ua, gs_ub); 33 InterlockedMin(gs_ua, gs_ub, out_u1); 34 InterlockedOr(gs_ua, gs_ub); 35 InterlockedOr(gs_ua, gs_ub, out_u1); 36 InterlockedXor(gs_ua, gs_ub); 37 InterlockedXor(gs_ua, gs_ub, out_u1); 38 39 // CheckAccessFullyMapped(3); // TODO: ... 40 41 return 0.0; 42} 43 44float1 ComputeShaderFunction1(float1 inF0, float1 inF1, float1 inF2) 45{ 46 // TODO: ... add when float1 prototypes are generated 47 return 0.0; 48} 49 50float2 ComputeShaderFunction2(float2 inF0, float2 inF1, float2 inF2, uint2 inU0, uint2 inU1) 51{ 52 uint2 out_u2; 53 54 // Don't repeat all the pixel/vertex fns - just one for sanity. 55 all(inF0); 56 57 // Test atomics 58 InterlockedAdd(gs_ua2, gs_ub2); 59 InterlockedAdd(gs_ua2, gs_ub2, out_u2); 60 InterlockedAnd(gs_ua2, gs_ub2); 61 InterlockedAnd(gs_ua2, gs_ub2, out_u2); 62 InterlockedCompareExchange(gs_ua2, gs_ub2, gs_uc2, out_u2); 63 InterlockedExchange(gs_ua2, gs_ub2, out_u2); 64 InterlockedMax(gs_ua2, gs_ub2); 65 InterlockedMax(gs_ua2, gs_ub2, out_u2); 66 InterlockedMin(gs_ua2, gs_ub2); 67 InterlockedMin(gs_ua2, gs_ub2, out_u2); 68 InterlockedOr(gs_ua2, gs_ub2); 69 InterlockedOr(gs_ua2, gs_ub2, out_u2); 70 InterlockedXor(gs_ua2, gs_ub2); 71 InterlockedXor(gs_ua2, gs_ub2, out_u2); 72 73 // TODO: ... add when float1 prototypes are generated 74 return float2(1,2); 75} 76 77float3 ComputeShaderFunction3(float3 inF0, float3 inF1, float3 inF2, uint3 inU0, uint3 inU1) 78{ 79 uint3 out_u3; 80 81 // Don't repeat all the pixel/vertex fns - just one for sanity. 82 all(inF0); 83 84 // Test atomics 85 InterlockedAdd(gs_ua3, gs_ub3); 86 InterlockedAdd(gs_ua3, gs_ub3, out_u3); 87 InterlockedAnd(gs_ua3, gs_ub3); 88 InterlockedAnd(gs_ua3, gs_ub3, out_u3); 89 InterlockedCompareExchange(gs_ua3, gs_ub3, gs_uc3, out_u3); 90 InterlockedExchange(gs_ua3, gs_ub3, out_u3); 91 InterlockedMax(gs_ua3, gs_ub3); 92 InterlockedMax(gs_ua3, gs_ub3, out_u3); 93 InterlockedMin(gs_ua3, gs_ub3); 94 InterlockedMin(gs_ua3, gs_ub3, out_u3); 95 InterlockedOr(gs_ua3, gs_ub3); 96 InterlockedOr(gs_ua3, gs_ub3, out_u3); 97 InterlockedXor(gs_ua3, gs_ub3); 98 InterlockedXor(gs_ua3, gs_ub3, out_u3); 99 100 // TODO: ... add when float1 prototypes are generated 101 return float3(1,2,3); 102} 103 104float4 ComputeShaderFunction(float4 inF0, float4 inF1, float4 inF2, uint4 inU0, uint4 inU1) 105{ 106 uint4 out_u4; 107 108 // Don't repeat all the pixel/vertex fns - just one for sanity. 109 all(inF0); 110 111 // Test atomics 112 InterlockedAdd(gs_ua4, gs_ub4); 113 InterlockedAdd(gs_ua4, gs_ub4, out_u4); 114 InterlockedAnd(gs_ua4, gs_ub4); 115 InterlockedAnd(gs_ua4, gs_ub4, out_u4); 116 InterlockedCompareExchange(gs_ua4, gs_ub4, gs_uc4, out_u4); 117 InterlockedExchange(gs_ua4, gs_ub4, out_u4); 118 InterlockedMax(gs_ua4, gs_ub4); 119 InterlockedMax(gs_ua4, gs_ub4, out_u4); 120 InterlockedMin(gs_ua4, gs_ub4); 121 InterlockedMin(gs_ua4, gs_ub4, out_u4); 122 InterlockedOr(gs_ua4, gs_ub4); 123 InterlockedOr(gs_ua4, gs_ub4, out_u4); 124 InterlockedXor(gs_ua4, gs_ub4); 125 InterlockedXor(gs_ua4, gs_ub4, out_u4); 126 127 // TODO: ... add when float1 prototypes are generated 128 return float4(1,2,3,4); 129} 130