• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 Google LLC.
2 // Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
3 
4 #ifndef SkVM_opts_DEFINED
5 #define SkVM_opts_DEFINED
6 
7 #include "include/private/SkVx.h"
8 #include "src/core/SkVM.h"
9 
10 template <int N>
gather32(const int * ptr,const skvx::Vec<N,int> & ix)11 static inline skvx::Vec<N,int> gather32(const int* ptr, const skvx::Vec<N,int>& ix) {
12 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
13     if constexpr (N == 8) {
14         return skvx::bit_pun<skvx::Vec<N,int>>(
15                 _mm256_i32gather_epi32(ptr, skvx::bit_pun<__m256i>(ix), 4));
16     }
17 #endif
18     // Try to recurse on specializations, falling back on standard scalar map()-based impl.
19     if constexpr (N > 8) {
20         return join(gather32(ptr, ix.lo),
21                     gather32(ptr, ix.hi));
22     }
23     return map([&](int i) { return ptr[i]; }, ix);
24 }
25 
26 namespace SK_OPTS_NS {
27 
28 namespace SkVMInterpreterTypes {
29 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
30     constexpr inline int K = 32;  // 1024-bit: 4 ymm or 2 zmm at a time
31 #else
32     constexpr inline int K = 8;   // 256-bit: 2 xmm, 2 v-registers, etc.
33 #endif
34     using I32 = skvx::Vec<K, int>;
35     using I16 = skvx::Vec<K, int16_t>;
36     using F32 = skvx::Vec<K, float>;
37     using U64 = skvx::Vec<K, uint64_t>;
38     using U32 = skvx::Vec<K, uint32_t>;
39     using U16 = skvx::Vec<K, uint16_t>;
40     using  U8 = skvx::Vec<K, uint8_t>;
41     union Slot {
42         F32   f32;
43         I32   i32;
44         U32   u32;
45         I16   i16;
46         U16   u16;
47     };
48 }  // namespace SkVMInterpreterTypes
49 
interpret_skvm(const skvm::InterpreterInstruction insts[],const int ninsts,const int nregs,const int loop,const int strides[],const int nargs,int n,void * args[])50     inline void interpret_skvm(const skvm::InterpreterInstruction insts[], const int ninsts,
51                                const int nregs, const int loop,
52                                const int strides[], const int nargs,
53                                int n, void* args[]) {
54         using namespace skvm;
55 
56         using SkVMInterpreterTypes::K;
57         using SkVMInterpreterTypes::I32;
58         using SkVMInterpreterTypes::I16;
59         using SkVMInterpreterTypes::F32;
60         using SkVMInterpreterTypes::U64;
61         using SkVMInterpreterTypes::U32;
62         using SkVMInterpreterTypes::U16;
63         using SkVMInterpreterTypes::U8;
64         using SkVMInterpreterTypes::Slot;
65 
66         // We'll operate in SIMT style, knocking off K-size chunks from n while possible.
67 
68         Slot                     few_regs[16];
69         std::unique_ptr<char[]> many_regs;
70 
71         Slot* r = few_regs;
72 
73         if (nregs > (int)SK_ARRAY_COUNT(few_regs)) {
74             // Annoyingly we can't trust that malloc() or new will work with Slot because
75             // the skvx::Vec types may have alignment greater than what they provide.
76             // We'll overallocate one extra register so we can align manually.
77             many_regs.reset(new char[ sizeof(Slot) * (nregs + 1) ]);
78 
79             uintptr_t addr = (uintptr_t)many_regs.get();
80             addr += alignof(Slot) -
81                      (addr & (alignof(Slot) - 1));
82             SkASSERT((addr & (alignof(Slot) - 1)) == 0);
83             r = (Slot*)addr;
84         }
85 
86 
87         // Step each argument pointer ahead by its stride a number of times.
88         auto step_args = [&](int times) {
89             for (int i = 0; i < nargs; i++) {
90                 args[i] = (void*)( (char*)args[i] + times * strides[i] );
91             }
92         };
93 
94         int start = 0,
95             stride;
96         for ( ; n > 0; start = loop, n -= stride, step_args(stride)) {
97             stride = n >= K ? K : 1;
98 
99             for (int instIdx = start; instIdx < ninsts; instIdx++) {
100                 InterpreterInstruction inst = insts[instIdx];
101 
102                 // d = op(x,y,z,w, immA,immB)
103                 Reg   d = inst.d,
104                       x = inst.x,
105                       y = inst.y,
106                       z = inst.z,
107                       w = inst.w;
108                 int immA = inst.immA,
109                     immB = inst.immB,
110                     immC = inst.immC;
111 
112                 // Ops that interact with memory need to know whether we're stride=1 or K,
113                 // but all non-memory ops can run the same code no matter the stride.
114                 switch (2*(int)inst.op + (stride == K ? 1 : 0)) {
115                     default: SkUNREACHABLE;
116 
117                 #define STRIDE_1(op) case 2*(int)op
118                 #define STRIDE_K(op) case 2*(int)op + 1
119                     STRIDE_1(Op::store8 ): memcpy(args[immA], &r[x].i32, 1); break;
120                     STRIDE_1(Op::store16): memcpy(args[immA], &r[x].i32, 2); break;
121                     STRIDE_1(Op::store32): memcpy(args[immA], &r[x].i32, 4); break;
122                     STRIDE_1(Op::store64): memcpy((char*)args[immA]+0, &r[x].i32, 4);
123                                            memcpy((char*)args[immA]+4, &r[y].i32, 4); break;
124 
125                     STRIDE_K(Op::store8 ): skvx::cast<uint8_t> (r[x].i32).store(args[immA]); break;
126                     STRIDE_K(Op::store16): skvx::cast<uint16_t>(r[x].i32).store(args[immA]); break;
127                     STRIDE_K(Op::store32):                     (r[x].i32).store(args[immA]); break;
128                     STRIDE_K(Op::store64): (skvx::cast<uint64_t>(r[x].u32) << 0 |
129                                             skvx::cast<uint64_t>(r[y].u32) << 32).store(args[immA]);
130                                            break;
131 
132                     STRIDE_1(Op::load8 ): r[d].i32 = 0; memcpy(&r[d].i32, args[immA], 1); break;
133                     STRIDE_1(Op::load16): r[d].i32 = 0; memcpy(&r[d].i32, args[immA], 2); break;
134                     STRIDE_1(Op::load32): r[d].i32 = 0; memcpy(&r[d].i32, args[immA], 4); break;
135                     STRIDE_1(Op::load64):
136                         r[d].i32 = 0; memcpy(&r[d].i32, (char*)args[immA] + 4*immB, 4); break;
137 
138                     STRIDE_K(Op::load8 ): r[d].i32= skvx::cast<int>(U8 ::Load(args[immA])); break;
139                     STRIDE_K(Op::load16): r[d].i32= skvx::cast<int>(U16::Load(args[immA])); break;
140                     STRIDE_K(Op::load32): r[d].i32=                 I32::Load(args[immA]) ; break;
141                     STRIDE_K(Op::load64):
142                         // Low 32 bits if immB=0, or high 32 bits if immB=1.
143                         r[d].i32 = skvx::cast<int>(U64::Load(args[immA]) >> (32*immB)); break;
144 
145                     // The pointer we base our gather on is loaded indirectly from a uniform:
146                     //     - args[immA] is the uniform holding our gather base pointer somewhere;
147                     //     - (const uint8_t*)args[immA] + immB points to the gather base pointer;
148                     //     - memcpy() loads the gather base and into a pointer of the right type.
149                     // After all that we have an ordinary (uniform) pointer `ptr` to load from,
150                     // and we then gather from it using the varying indices in r[x].
151                     STRIDE_1(Op::gather8): {
152                         const uint8_t* ptr;
153                         memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
154                         r[d].i32 = ptr[ r[x].i32[0] ];
155                     } break;
156                     STRIDE_1(Op::gather16): {
157                         const uint16_t* ptr;
158                         memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
159                         r[d].i32 = ptr[ r[x].i32[0] ];
160                     } break;
161                     STRIDE_1(Op::gather32): {
162                         const int* ptr;
163                         memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
164                         r[d].i32 = ptr[ r[x].i32[0] ];
165                     } break;
166 
167                     STRIDE_K(Op::gather8): {
168                         const uint8_t* ptr;
169                         memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
170                         r[d].i32 = map([&](int ix) { return (int)ptr[ix]; }, r[x].i32);
171                     } break;
172                     STRIDE_K(Op::gather16): {
173                         const uint16_t* ptr;
174                         memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
175                         r[d].i32 = map([&](int ix) { return (int)ptr[ix]; }, r[x].i32);
176                     } break;
177                     STRIDE_K(Op::gather32): {
178                         const int* ptr;
179                         memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
180                         r[d].i32 = gather32(ptr, r[x].i32);
181                     } break;
182 
183                 #undef STRIDE_1
184                 #undef STRIDE_K
185 
186                     // Ops that don't interact with memory should never care about the stride.
187                 #define CASE(op) case 2*(int)op: /*fallthrough*/ case 2*(int)op+1
188 
189                     // These 128-bit ops are implemented serially for simplicity.
190                     CASE(Op::store128): {
191                         U64 lo = (skvx::cast<uint64_t>(r[x].u32) << 0 |
192                                   skvx::cast<uint64_t>(r[y].u32) << 32),
193                             hi = (skvx::cast<uint64_t>(r[z].u32) << 0 |
194                                   skvx::cast<uint64_t>(r[w].u32) << 32);
195                         for (int i = 0; i < stride; i++) {
196                             memcpy((char*)args[immA] + 16*i + 0, &lo[i], 8);
197                             memcpy((char*)args[immA] + 16*i + 8, &hi[i], 8);
198                         }
199                     } break;
200 
201                     CASE(Op::load128):
202                         r[d].i32 = 0;
203                         for (int i = 0; i < stride; i++) {
204                             memcpy(&r[d].i32[i], (const char*)args[immA] + 16*i+ 4*immB, 4);
205                         } break;
206 
207                     CASE(Op::assert_true):
208                     #ifdef SK_DEBUG
209                         if (!all(r[x].i32)) {
210                             SkDebugf("inst %d, register %d\n", instIdx, y);
211                             for (int i = 0; i < K; i++) {
212                                 SkDebugf("\t%2d: %08x (%g)\n",
213                                          instIdx, r[y].i32[instIdx], r[y].f32[instIdx]);
214                             }
215                             SkASSERT(false);
216                         }
217                     #endif
218                     break;
219 
220                     CASE(Op::trace_line):
221                     #ifdef SK_DEBUG
222                     // TODO(skia:12614): this opcode will check the mask; if it's set, we write the
223                     // line number from immA into the trace buffer.
224                     #endif
225                     break;
226 
227                     CASE(Op::trace_var):
228                     #ifdef SK_DEBUG
229                     // TODO(skia:12614): this opcode will check the mask; if it's set, we write the
230                     // variable-assignment slot and value to the trace buffer.
231                     #endif
232                     break;
233 
234                     CASE(Op::trace_call):
235                     #ifdef SK_DEBUG
236                     // TODO(skia:12614): this opcode will be used to keep track of function entrance
237                     // and exits, enabling step-over of function calls.
238                     #endif
239                     break;
240 
241                     CASE(Op::index): {
242                         const int iota[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,
243                                             16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
244                                             32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
245                                             48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63 };
246                         static_assert(K <= SK_ARRAY_COUNT(iota), "");
247 
248                         r[d].i32 = n - I32::Load(iota);
249                     } break;
250 
251                     CASE(Op::uniform32):
252                         r[d].i32 = *(const int*)( (const char*)args[immA] + immB );
253                         break;
254 
255                     CASE(Op::array32):
256                         const int* ptr;
257                         memcpy(&ptr, (const uint8_t*)args[immA] + immB, sizeof(ptr));
258                         r[d].i32 = ptr[immC/sizeof(int)];
259                         break;
260 
261                     CASE(Op::splat): r[d].i32 = immA; break;
262 
263                     CASE(Op::add_f32): r[d].f32 = r[x].f32 + r[y].f32; break;
264                     CASE(Op::sub_f32): r[d].f32 = r[x].f32 - r[y].f32; break;
265                     CASE(Op::mul_f32): r[d].f32 = r[x].f32 * r[y].f32; break;
266                     CASE(Op::div_f32): r[d].f32 = r[x].f32 / r[y].f32; break;
267                     CASE(Op::min_f32): r[d].f32 = min(r[x].f32, r[y].f32); break;
268                     CASE(Op::max_f32): r[d].f32 = max(r[x].f32, r[y].f32); break;
269 
270                     CASE(Op::fma_f32):  r[d].f32 = fma( r[x].f32, r[y].f32,  r[z].f32); break;
271                     CASE(Op::fms_f32):  r[d].f32 = fma( r[x].f32, r[y].f32, -r[z].f32); break;
272                     CASE(Op::fnma_f32): r[d].f32 = fma(-r[x].f32, r[y].f32,  r[z].f32); break;
273 
274                     CASE(Op::sqrt_f32): r[d].f32 = sqrt(r[x].f32); break;
275 
276                     CASE(Op::add_i32): r[d].i32 = r[x].i32 + r[y].i32; break;
277                     CASE(Op::sub_i32): r[d].i32 = r[x].i32 - r[y].i32; break;
278                     CASE(Op::mul_i32): r[d].i32 = r[x].i32 * r[y].i32; break;
279 
280                     CASE(Op::shl_i32): r[d].i32 = r[x].i32 << immA; break;
281                     CASE(Op::sra_i32): r[d].i32 = r[x].i32 >> immA; break;
282                     CASE(Op::shr_i32): r[d].u32 = r[x].u32 >> immA; break;
283 
284                     CASE(Op:: eq_f32): r[d].i32 = r[x].f32 == r[y].f32; break;
285                     CASE(Op::neq_f32): r[d].i32 = r[x].f32 != r[y].f32; break;
286                     CASE(Op:: gt_f32): r[d].i32 = r[x].f32 >  r[y].f32; break;
287                     CASE(Op::gte_f32): r[d].i32 = r[x].f32 >= r[y].f32; break;
288 
289                     CASE(Op:: eq_i32): r[d].i32 = r[x].i32 == r[y].i32; break;
290                     CASE(Op:: gt_i32): r[d].i32 = r[x].i32 >  r[y].i32; break;
291 
292                     CASE(Op::bit_and  ): r[d].i32 = r[x].i32 &  r[y].i32; break;
293                     CASE(Op::bit_or   ): r[d].i32 = r[x].i32 |  r[y].i32; break;
294                     CASE(Op::bit_xor  ): r[d].i32 = r[x].i32 ^  r[y].i32; break;
295                     CASE(Op::bit_clear): r[d].i32 = r[x].i32 & ~r[y].i32; break;
296 
297                     CASE(Op::select): r[d].i32 = skvx::if_then_else(r[x].i32, r[y].i32, r[z].i32);
298                                       break;
299 
300                     CASE(Op::ceil):   r[d].f32 =                    skvx::ceil(r[x].f32) ; break;
301                     CASE(Op::floor):  r[d].f32 =                   skvx::floor(r[x].f32) ; break;
302                     CASE(Op::to_f32): r[d].f32 = skvx::cast<float>(            r[x].i32 ); break;
303                     CASE(Op::trunc):  r[d].i32 = skvx::cast<int>  (            r[x].f32 ); break;
304                     CASE(Op::round):  r[d].i32 = skvx::cast<int>  (skvx::lrint(r[x].f32)); break;
305 
306                     CASE(Op::to_fp16):
307                         r[d].i32 = skvx::cast<int>(skvx::to_half(r[x].f32));
308                         break;
309                     CASE(Op::from_fp16):
310                         r[d].f32 = skvx::from_half(skvx::cast<uint16_t>(r[x].i32));
311                         break;
312 
313                 #undef CASE
314                 }
315             }
316         }
317     }
318 
319 }  // namespace SK_OPTS_NS
320 
321 #endif//SkVM_opts_DEFINED
322