1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "common.h"
18 #include "function_list.h"
19 #include "test_functions.h"
20 #include "utility.h"
21
22 #include <cinttypes>
23 #include <cstring>
24
25 namespace {
26
BuildKernel(const char * name,int vectorSize,cl_uint kernel_count,cl_kernel * k,cl_program * p,bool relaxedMode)27 int BuildKernel(const char *name, int vectorSize, cl_uint kernel_count,
28 cl_kernel *k, cl_program *p, bool relaxedMode)
29 {
30 const char *c[] = { "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
31 "__kernel void math_kernel",
32 sizeNames[vectorSize],
33 "( __global long",
34 sizeNames[vectorSize],
35 "* out, __global double",
36 sizeNames[vectorSize],
37 "* in )\n"
38 "{\n"
39 " size_t i = get_global_id(0);\n"
40 " out[i] = ",
41 name,
42 "( in[i] );\n"
43 "}\n" };
44
45 const char *c3[] = {
46 "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
47 "__kernel void math_kernel",
48 sizeNames[vectorSize],
49 "( __global long* out, __global double* in)\n"
50 "{\n"
51 " size_t i = get_global_id(0);\n"
52 " if( i + 1 < get_global_size(0) )\n"
53 " {\n"
54 " double3 d0 = vload3( 0, in + 3 * i );\n"
55 " long3 l0 = ",
56 name,
57 "( d0 );\n"
58 " vstore3( l0, 0, out + 3*i );\n"
59 " }\n"
60 " else\n"
61 " {\n"
62 " size_t parity = i & 1; // Figure out how many elements are "
63 "left over after BUFFER_SIZE % (3*sizeof(float)). Assume power of two "
64 "buffer size \n"
65 " double3 d0;\n"
66 " switch( parity )\n"
67 " {\n"
68 " case 1:\n"
69 " d0 = (double3)( in[3*i], NAN, NAN ); \n"
70 " break;\n"
71 " case 0:\n"
72 " d0 = (double3)( in[3*i], in[3*i+1], NAN ); \n"
73 " break;\n"
74 " }\n"
75 " long3 l0 = ",
76 name,
77 "( d0 );\n"
78 " switch( parity )\n"
79 " {\n"
80 " case 0:\n"
81 " out[3*i+1] = l0.y; \n"
82 " // fall through\n"
83 " case 1:\n"
84 " out[3*i] = l0.x; \n"
85 " break;\n"
86 " }\n"
87 " }\n"
88 "}\n"
89 };
90
91 const char **kern = c;
92 size_t kernSize = sizeof(c) / sizeof(c[0]);
93
94 if (sizeValues[vectorSize] == 3)
95 {
96 kern = c3;
97 kernSize = sizeof(c3) / sizeof(c3[0]);
98 }
99
100 char testName[32];
101 snprintf(testName, sizeof(testName) - 1, "math_kernel%s",
102 sizeNames[vectorSize]);
103
104 return MakeKernels(kern, (cl_uint)kernSize, testName, kernel_count, k, p,
105 relaxedMode);
106 }
107
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)108 cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
109 {
110 BuildKernelInfo *info = (BuildKernelInfo *)p;
111 cl_uint vectorSize = gMinVectorSizeIndex + job_id;
112 return BuildKernel(info->nameInCode, vectorSize, info->threadCount,
113 info->kernels[vectorSize].data(),
114 &(info->programs[vectorSize]), info->relaxedMode);
115 }
116
117 // Thread specific data for a worker thread
118 struct ThreadInfo
119 {
120 // Input and output buffers for the thread
121 clMemWrapper inBuf;
122 Buffers outBuf;
123
124 // Per thread command queue to improve performance
125 clCommandQueueWrapper tQueue;
126 };
127
128 struct TestInfo
129 {
130 size_t subBufferSize; // Size of the sub-buffer in elements
131 const Func *f; // A pointer to the function info
132
133 // Programs for various vector sizes.
134 Programs programs;
135
136 // Thread-specific kernels for each vector size:
137 // k[vector_size][thread_id]
138 KernelMatrix k;
139
140 // Array of thread specific information
141 std::vector<ThreadInfo> tinfo;
142
143 cl_uint threadCount; // Number of worker threads
144 cl_uint jobCount; // Number of jobs
145 cl_uint step; // step between each chunk and the next.
146 cl_uint scale; // stride between individual test values
147 int ftz; // non-zero if running in flush to zero mode
148 bool relaxedMode; // True if test is running in relaxed mode, false
149 // otherwise.
150 };
151
Test(cl_uint job_id,cl_uint thread_id,void * data)152 cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
153 {
154 TestInfo *job = (TestInfo *)data;
155 size_t buffer_elements = job->subBufferSize;
156 size_t buffer_size = buffer_elements * sizeof(cl_double);
157 cl_uint scale = job->scale;
158 cl_uint base = job_id * (cl_uint)job->step;
159 ThreadInfo *tinfo = &(job->tinfo[thread_id]);
160 dptr dfunc = job->f->dfunc;
161 int ftz = job->ftz;
162 bool relaxedMode = job->relaxedMode;
163 cl_int error;
164 const char *name = job->f->name;
165
166 Force64BitFPUPrecision();
167
168 // start the map of the output arrays
169 cl_event e[VECTOR_SIZE_COUNT];
170 cl_long *out[VECTOR_SIZE_COUNT];
171 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
172 {
173 out[j] = (cl_long *)clEnqueueMapBuffer(
174 tinfo->tQueue, tinfo->outBuf[j], CL_FALSE, CL_MAP_WRITE, 0,
175 buffer_size, 0, NULL, e + j, &error);
176 if (error || NULL == out[j])
177 {
178 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
179 error);
180 return error;
181 }
182 }
183
184 // Get that moving
185 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush failed\n");
186
187 // Write the new values to the input array
188 cl_double *p = (cl_double *)gIn + thread_id * buffer_elements;
189 for (size_t j = 0; j < buffer_elements; j++)
190 p[j] = DoubleFromUInt32(base + j * scale);
191
192 if ((error = clEnqueueWriteBuffer(tinfo->tQueue, tinfo->inBuf, CL_FALSE, 0,
193 buffer_size, p, 0, NULL, NULL)))
194 {
195 vlog_error("Error: clEnqueueWriteBuffer failed! err: %d\n", error);
196 return error;
197 }
198
199 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
200 {
201 // Wait for the map to finish
202 if ((error = clWaitForEvents(1, e + j)))
203 {
204 vlog_error("Error: clWaitForEvents failed! err: %d\n", error);
205 return error;
206 }
207 if ((error = clReleaseEvent(e[j])))
208 {
209 vlog_error("Error: clReleaseEvent failed! err: %d\n", error);
210 return error;
211 }
212
213 // Fill the result buffer with garbage, so that old results don't carry
214 // over
215 uint32_t pattern = 0xffffdead;
216 memset_pattern4(out[j], &pattern, buffer_size);
217 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
218 out[j], 0, NULL, NULL)))
219 {
220 vlog_error("Error: clEnqueueUnmapMemObject failed! err: %d\n",
221 error);
222 return error;
223 }
224
225 // run the kernel
226 size_t vectorCount =
227 (buffer_elements + sizeValues[j] - 1) / sizeValues[j];
228 cl_kernel kernel = job->k[j][thread_id]; // each worker thread has its
229 // own copy of the cl_kernel
230 cl_program program = job->programs[j];
231
232 if ((error = clSetKernelArg(kernel, 0, sizeof(tinfo->outBuf[j]),
233 &tinfo->outBuf[j])))
234 {
235 LogBuildError(program);
236 return error;
237 }
238 if ((error = clSetKernelArg(kernel, 1, sizeof(tinfo->inBuf),
239 &tinfo->inBuf)))
240 {
241 LogBuildError(program);
242 return error;
243 }
244
245 if ((error = clEnqueueNDRangeKernel(tinfo->tQueue, kernel, 1, NULL,
246 &vectorCount, NULL, 0, NULL, NULL)))
247 {
248 vlog_error("FAILED -- could not execute kernel\n");
249 return error;
250 }
251 }
252
253 // Get that moving
254 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 2 failed\n");
255
256 if (gSkipCorrectnessTesting) return CL_SUCCESS;
257
258 // Calculate the correctly rounded reference result
259 cl_long *r = (cl_long *)gOut_Ref + thread_id * buffer_elements;
260 cl_double *s = (cl_double *)p;
261 for (size_t j = 0; j < buffer_elements; j++) r[j] = dfunc.i_f(s[j]);
262
263 // Read the data back -- no need to wait for the first N-1 buffers but wait
264 // for the last buffer. This is an in order queue.
265 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
266 {
267 cl_bool blocking = (j + 1 < gMaxVectorSizeIndex) ? CL_FALSE : CL_TRUE;
268 out[j] = (cl_long *)clEnqueueMapBuffer(
269 tinfo->tQueue, tinfo->outBuf[j], blocking, CL_MAP_READ, 0,
270 buffer_size, 0, NULL, NULL, &error);
271 if (error || NULL == out[j])
272 {
273 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
274 error);
275 return error;
276 }
277 }
278
279 // Verify data
280 cl_long *t = (cl_long *)r;
281 for (size_t j = 0; j < buffer_elements; j++)
282 {
283 cl_long *q = out[0];
284
285 // If we aren't getting the correctly rounded result
286 if (gMinVectorSizeIndex == 0 && t[j] != q[j])
287 {
288 // If we aren't getting the correctly rounded result
289 if (ftz || relaxedMode)
290 {
291 if (IsDoubleSubnormal(s[j]))
292 {
293 cl_long correct = dfunc.i_f(+0.0f);
294 cl_long correct2 = dfunc.i_f(-0.0f);
295 if (correct == q[j] || correct2 == q[j]) continue;
296 }
297 }
298
299 cl_ulong err = t[j] - q[j];
300 if (q[j] > t[j]) err = q[j] - t[j];
301 vlog_error("\nERROR: %sD: %" PRId64
302 " ulp error at %.13la: *%" PRId64 " vs. %" PRId64 "\n",
303 name, err, ((double *)gIn)[j], t[j], q[j]);
304 return -1;
305 }
306
307
308 for (auto k = std::max(1U, gMinVectorSizeIndex);
309 k < gMaxVectorSizeIndex; k++)
310 {
311 q = out[k];
312 // If we aren't getting the correctly rounded result
313 if (-t[j] != q[j])
314 {
315 if (ftz || relaxedMode)
316 {
317 if (IsDoubleSubnormal(s[j]))
318 {
319 int64_t correct = -dfunc.i_f(+0.0f);
320 int64_t correct2 = -dfunc.i_f(-0.0f);
321 if (correct == q[j] || correct2 == q[j]) continue;
322 }
323 }
324
325 cl_ulong err = -t[j] - q[j];
326 if (q[j] > -t[j]) err = q[j] + t[j];
327 vlog_error(
328 "\nERROR: %sD%s: %" PRId64 " ulp error at %.13la: *%" PRId64
329 " vs. %" PRId64 "\n",
330 name, sizeNames[k], err, ((double *)gIn)[j], -t[j], q[j]);
331 return -1;
332 }
333 }
334 }
335
336 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
337 {
338 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
339 out[j], 0, NULL, NULL)))
340 {
341 vlog_error("Error: clEnqueueUnmapMemObject %d failed 2! err: %d\n",
342 j, error);
343 return error;
344 }
345 }
346
347 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 3 failed\n");
348
349
350 if (0 == (base & 0x0fffffff))
351 {
352 if (gVerboseBruteForce)
353 {
354 vlog("base:%14u step:%10u scale:%10u buf_elements:%10zd "
355 "ThreadCount:%2u\n",
356 base, job->step, job->scale, buffer_elements,
357 job->threadCount);
358 }
359 else
360 {
361 vlog(".");
362 }
363 fflush(stdout);
364 }
365
366 return CL_SUCCESS;
367 }
368
369 } // anonymous namespace
370
TestMacro_Int_Double(const Func * f,MTdata d,bool relaxedMode)371 int TestMacro_Int_Double(const Func *f, MTdata d, bool relaxedMode)
372 {
373 TestInfo test_info{};
374 cl_int error;
375
376 logFunctionInfo(f->name, sizeof(cl_double), relaxedMode);
377
378 // Init test_info
379 test_info.threadCount = GetThreadCount();
380 test_info.subBufferSize = BUFFER_SIZE
381 / (sizeof(cl_double) * RoundUpToNextPowerOfTwo(test_info.threadCount));
382 test_info.scale = getTestScale(sizeof(cl_double));
383
384 test_info.step = (cl_uint)test_info.subBufferSize * test_info.scale;
385 if (test_info.step / test_info.subBufferSize != test_info.scale)
386 {
387 // there was overflow
388 test_info.jobCount = 1;
389 }
390 else
391 {
392 test_info.jobCount = (cl_uint)((1ULL << 32) / test_info.step);
393 }
394
395 test_info.f = f;
396 test_info.ftz = f->ftz || gForceFTZ;
397 test_info.relaxedMode = relaxedMode;
398
399 // cl_kernels aren't thread safe, so we make one for each vector size for
400 // every thread
401 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
402 {
403 test_info.k[i].resize(test_info.threadCount, nullptr);
404 }
405
406 test_info.tinfo.resize(test_info.threadCount);
407 for (cl_uint i = 0; i < test_info.threadCount; i++)
408 {
409 cl_buffer_region region = {
410 i * test_info.subBufferSize * sizeof(cl_double),
411 test_info.subBufferSize * sizeof(cl_double)
412 };
413 test_info.tinfo[i].inBuf =
414 clCreateSubBuffer(gInBuffer, CL_MEM_READ_ONLY,
415 CL_BUFFER_CREATE_TYPE_REGION, ®ion, &error);
416 if (error || NULL == test_info.tinfo[i].inBuf)
417 {
418 vlog_error("Error: Unable to create sub-buffer of gInBuffer for "
419 "region {%zd, %zd}\n",
420 region.origin, region.size);
421 goto exit;
422 }
423
424 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
425 {
426 test_info.tinfo[i].outBuf[j] = clCreateSubBuffer(
427 gOutBuffer[j], CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION,
428 ®ion, &error);
429 if (error || NULL == test_info.tinfo[i].outBuf[j])
430 {
431 vlog_error("Error: Unable to create sub-buffer of "
432 "gOutBuffer[%d] for region {%zd, %zd}\n",
433 (int)j, region.origin, region.size);
434 goto exit;
435 }
436 }
437 test_info.tinfo[i].tQueue =
438 clCreateCommandQueue(gContext, gDevice, 0, &error);
439 if (NULL == test_info.tinfo[i].tQueue || error)
440 {
441 vlog_error("clCreateCommandQueue failed. (%d)\n", error);
442 goto exit;
443 }
444 }
445
446 // Init the kernels
447 {
448 BuildKernelInfo build_info{ test_info.threadCount, test_info.k,
449 test_info.programs, f->nameInCode,
450 relaxedMode };
451 if ((error = ThreadPool_Do(BuildKernelFn,
452 gMaxVectorSizeIndex - gMinVectorSizeIndex,
453 &build_info)))
454 goto exit;
455 }
456
457 // Run the kernels
458 if (!gSkipCorrectnessTesting)
459 {
460 error = ThreadPool_Do(Test, test_info.jobCount, &test_info);
461
462 if (error) goto exit;
463
464 if (gWimpyMode)
465 vlog("Wimp pass");
466 else
467 vlog("passed");
468 }
469
470 vlog("\n");
471
472 exit:
473 // Release
474 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
475 {
476 for (auto &kernel : test_info.k[i])
477 {
478 clReleaseKernel(kernel);
479 }
480 }
481
482 return error;
483 }
484