1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "common.h"
18 #include "function_list.h"
19 #include "test_functions.h"
20 #include "utility.h"
21
22 #include <cinttypes>
23 #include <cstring>
24
25 namespace {
26
BuildKernel(const char * name,int vectorSize,cl_uint kernel_count,cl_kernel * k,cl_program * p,bool relaxedMode)27 int BuildKernel(const char *name, int vectorSize, cl_uint kernel_count,
28 cl_kernel *k, cl_program *p, bool relaxedMode)
29 {
30 const char *c[] = { "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
31 "__kernel void math_kernel",
32 sizeNames[vectorSize],
33 "( __global double",
34 sizeNames[vectorSize],
35 "* out, __global double",
36 sizeNames[vectorSize],
37 "* in )\n"
38 "{\n"
39 " size_t i = get_global_id(0);\n"
40 " out[i] = ",
41 name,
42 "( in[i] );\n"
43 "}\n" };
44
45 const char *c3[] = {
46 "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
47 "__kernel void math_kernel",
48 sizeNames[vectorSize],
49 "( __global double* out, __global double* in)\n"
50 "{\n"
51 " size_t i = get_global_id(0);\n"
52 " if( i + 1 < get_global_size(0) )\n"
53 " {\n"
54 " double3 f0 = vload3( 0, in + 3 * i );\n"
55 " f0 = ",
56 name,
57 "( f0 );\n"
58 " vstore3( f0, 0, out + 3*i );\n"
59 " }\n"
60 " else\n"
61 " {\n"
62 " size_t parity = i & 1; // Figure out how many elements are "
63 "left over after BUFFER_SIZE % (3*sizeof(float)). Assume power of two "
64 "buffer size \n"
65 " double3 f0;\n"
66 " switch( parity )\n"
67 " {\n"
68 " case 1:\n"
69 " f0 = (double3)( in[3*i], NAN, NAN ); \n"
70 " break;\n"
71 " case 0:\n"
72 " f0 = (double3)( in[3*i], in[3*i+1], NAN ); \n"
73 " break;\n"
74 " }\n"
75 " f0 = ",
76 name,
77 "( f0 );\n"
78 " switch( parity )\n"
79 " {\n"
80 " case 0:\n"
81 " out[3*i+1] = f0.y; \n"
82 " // fall through\n"
83 " case 1:\n"
84 " out[3*i] = f0.x; \n"
85 " break;\n"
86 " }\n"
87 " }\n"
88 "}\n"
89 };
90
91 const char **kern = c;
92 size_t kernSize = sizeof(c) / sizeof(c[0]);
93
94 if (sizeValues[vectorSize] == 3)
95 {
96 kern = c3;
97 kernSize = sizeof(c3) / sizeof(c3[0]);
98 }
99
100 char testName[32];
101 snprintf(testName, sizeof(testName) - 1, "math_kernel%s",
102 sizeNames[vectorSize]);
103
104 return MakeKernels(kern, (cl_uint)kernSize, testName, kernel_count, k, p,
105 relaxedMode);
106 }
107
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)108 cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
109 {
110 BuildKernelInfo *info = (BuildKernelInfo *)p;
111 cl_uint vectorSize = gMinVectorSizeIndex + job_id;
112 return BuildKernel(info->nameInCode, vectorSize, info->threadCount,
113 info->kernels[vectorSize].data(),
114 &(info->programs[vectorSize]), info->relaxedMode);
115 }
116
117 // Thread specific data for a worker thread
118 struct ThreadInfo
119 {
120 // Input and output buffers for the thread
121 clMemWrapper inBuf;
122 Buffers outBuf;
123
124 float maxError; // max error value. Init to 0.
125 double maxErrorValue; // position of the max error value. Init to 0.
126
127 // Per thread command queue to improve performance
128 clCommandQueueWrapper tQueue;
129 };
130
131 struct TestInfo
132 {
133 size_t subBufferSize; // Size of the sub-buffer in elements
134 const Func *f; // A pointer to the function info
135
136 // Programs for various vector sizes.
137 Programs programs;
138
139 // Thread-specific kernels for each vector size:
140 // k[vector_size][thread_id]
141 KernelMatrix k;
142
143 // Array of thread specific information
144 std::vector<ThreadInfo> tinfo;
145
146 cl_uint threadCount; // Number of worker threads
147 cl_uint jobCount; // Number of jobs
148 cl_uint step; // step between each chunk and the next.
149 cl_uint scale; // stride between individual test values
150 float ulps; // max_allowed ulps
151 int ftz; // non-zero if running in flush to zero mode
152
153 int isRangeLimited; // 1 if the function is only to be evaluated over a
154 // range
155 float half_sin_cos_tan_limit;
156 bool relaxedMode; // True if test is running in relaxed mode, false
157 // otherwise.
158 };
159
Test(cl_uint job_id,cl_uint thread_id,void * data)160 cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
161 {
162 TestInfo *job = (TestInfo *)data;
163 size_t buffer_elements = job->subBufferSize;
164 size_t buffer_size = buffer_elements * sizeof(cl_double);
165 cl_uint scale = job->scale;
166 cl_uint base = job_id * (cl_uint)job->step;
167 ThreadInfo *tinfo = &(job->tinfo[thread_id]);
168 float ulps = job->ulps;
169 dptr func = job->f->dfunc;
170 cl_int error;
171 int ftz = job->ftz;
172 bool relaxedMode = job->relaxedMode;
173
174 Force64BitFPUPrecision();
175
176 // start the map of the output arrays
177 cl_event e[VECTOR_SIZE_COUNT];
178 cl_ulong *out[VECTOR_SIZE_COUNT];
179 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
180 {
181 out[j] = (cl_ulong *)clEnqueueMapBuffer(
182 tinfo->tQueue, tinfo->outBuf[j], CL_FALSE, CL_MAP_WRITE, 0,
183 buffer_size, 0, NULL, e + j, &error);
184 if (error || NULL == out[j])
185 {
186 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
187 error);
188 return error;
189 }
190 }
191
192 // Get that moving
193 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush failed\n");
194
195 // Write the new values to the input array
196 cl_double *p = (cl_double *)gIn + thread_id * buffer_elements;
197 for (size_t j = 0; j < buffer_elements; j++)
198 p[j] = DoubleFromUInt32(base + j * scale);
199
200 if ((error = clEnqueueWriteBuffer(tinfo->tQueue, tinfo->inBuf, CL_FALSE, 0,
201 buffer_size, p, 0, NULL, NULL)))
202 {
203 vlog_error("Error: clEnqueueWriteBuffer failed! err: %d\n", error);
204 return error;
205 }
206
207 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
208 {
209 // Wait for the map to finish
210 if ((error = clWaitForEvents(1, e + j)))
211 {
212 vlog_error("Error: clWaitForEvents failed! err: %d\n", error);
213 return error;
214 }
215 if ((error = clReleaseEvent(e[j])))
216 {
217 vlog_error("Error: clReleaseEvent failed! err: %d\n", error);
218 return error;
219 }
220
221 // Fill the result buffer with garbage, so that old results don't carry
222 // over
223 uint32_t pattern = 0xffffdead;
224 memset_pattern4(out[j], &pattern, buffer_size);
225 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
226 out[j], 0, NULL, NULL)))
227 {
228 vlog_error("Error: clEnqueueUnmapMemObject failed! err: %d\n",
229 error);
230 return error;
231 }
232
233 // run the kernel
234 size_t vectorCount =
235 (buffer_elements + sizeValues[j] - 1) / sizeValues[j];
236 cl_kernel kernel = job->k[j][thread_id]; // each worker thread has its
237 // own copy of the cl_kernel
238 cl_program program = job->programs[j];
239
240 if ((error = clSetKernelArg(kernel, 0, sizeof(tinfo->outBuf[j]),
241 &tinfo->outBuf[j])))
242 {
243 LogBuildError(program);
244 return error;
245 }
246 if ((error = clSetKernelArg(kernel, 1, sizeof(tinfo->inBuf),
247 &tinfo->inBuf)))
248 {
249 LogBuildError(program);
250 return error;
251 }
252
253 if ((error = clEnqueueNDRangeKernel(tinfo->tQueue, kernel, 1, NULL,
254 &vectorCount, NULL, 0, NULL, NULL)))
255 {
256 vlog_error("FAILED -- could not execute kernel\n");
257 return error;
258 }
259 }
260
261
262 // Get that moving
263 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 2 failed\n");
264
265 if (gSkipCorrectnessTesting) return CL_SUCCESS;
266
267 // Calculate the correctly rounded reference result
268 cl_double *r = (cl_double *)gOut_Ref + thread_id * buffer_elements;
269 cl_double *s = (cl_double *)p;
270 for (size_t j = 0; j < buffer_elements; j++)
271 r[j] = (cl_double)func.f_f(s[j]);
272
273 // Read the data back -- no need to wait for the first N-1 buffers but wait
274 // for the last buffer. This is an in order queue.
275 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
276 {
277 cl_bool blocking = (j + 1 < gMaxVectorSizeIndex) ? CL_FALSE : CL_TRUE;
278 out[j] = (cl_ulong *)clEnqueueMapBuffer(
279 tinfo->tQueue, tinfo->outBuf[j], blocking, CL_MAP_READ, 0,
280 buffer_size, 0, NULL, NULL, &error);
281 if (error || NULL == out[j])
282 {
283 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
284 error);
285 return error;
286 }
287 }
288
289 // Verify data
290 cl_ulong *t = (cl_ulong *)r;
291 for (size_t j = 0; j < buffer_elements; j++)
292 {
293 for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
294 {
295 cl_ulong *q = out[k];
296
297 // If we aren't getting the correctly rounded result
298 if (t[j] != q[j])
299 {
300 cl_double test = ((cl_double *)q)[j];
301 long double correct = func.f_f(s[j]);
302 float err = Bruteforce_Ulp_Error_Double(test, correct);
303 int fail = !(fabsf(err) <= ulps);
304
305 if (fail)
306 {
307 if (ftz || relaxedMode)
308 {
309 // retry per section 6.5.3.2
310 if (IsDoubleResultSubnormal(correct, ulps))
311 {
312 fail = fail && (test != 0.0f);
313 if (!fail) err = 0.0f;
314 }
315
316 // retry per section 6.5.3.3
317 if (IsDoubleSubnormal(s[j]))
318 {
319 long double correct2 = func.f_f(0.0L);
320 long double correct3 = func.f_f(-0.0L);
321 float err2 =
322 Bruteforce_Ulp_Error_Double(test, correct2);
323 float err3 =
324 Bruteforce_Ulp_Error_Double(test, correct3);
325 fail = fail
326 && ((!(fabsf(err2) <= ulps))
327 && (!(fabsf(err3) <= ulps)));
328 if (fabsf(err2) < fabsf(err)) err = err2;
329 if (fabsf(err3) < fabsf(err)) err = err3;
330
331 // retry per section 6.5.3.4
332 if (IsDoubleResultSubnormal(correct2, ulps)
333 || IsDoubleResultSubnormal(correct3, ulps))
334 {
335 fail = fail && (test != 0.0f);
336 if (!fail) err = 0.0f;
337 }
338 }
339 }
340 }
341 if (fabsf(err) > tinfo->maxError)
342 {
343 tinfo->maxError = fabsf(err);
344 tinfo->maxErrorValue = s[j];
345 }
346 if (fail)
347 {
348 vlog_error("\nERROR: %s%s: %f ulp error at %.13la "
349 "(0x%16.16" PRIx64 "): *%.13la vs. %.13la\n",
350 job->f->name, sizeNames[k], err,
351 ((cl_double *)gIn)[j], ((cl_ulong *)gIn)[j],
352 ((cl_double *)gOut_Ref)[j], test);
353 return -1;
354 }
355 }
356 }
357 }
358
359 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
360 {
361 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
362 out[j], 0, NULL, NULL)))
363 {
364 vlog_error("Error: clEnqueueUnmapMemObject %d failed 2! err: %d\n",
365 j, error);
366 return error;
367 }
368 }
369
370 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 3 failed\n");
371
372
373 if (0 == (base & 0x0fffffff))
374 {
375 if (gVerboseBruteForce)
376 {
377 vlog("base:%14u step:%10u scale:%10zd buf_elements:%10u ulps:%5.3f "
378 "ThreadCount:%2u\n",
379 base, job->step, buffer_elements, job->scale, job->ulps,
380 job->threadCount);
381 }
382 else
383 {
384 vlog(".");
385 }
386 fflush(stdout);
387 }
388
389 return CL_SUCCESS;
390 }
391
392 } // anonymous namespace
393
TestFunc_Double_Double(const Func * f,MTdata d,bool relaxedMode)394 int TestFunc_Double_Double(const Func *f, MTdata d, bool relaxedMode)
395 {
396 TestInfo test_info{};
397 cl_int error;
398 float maxError = 0.0f;
399 double maxErrorVal = 0.0;
400
401 logFunctionInfo(f->name, sizeof(cl_double), relaxedMode);
402 // Init test_info
403 test_info.threadCount = GetThreadCount();
404 test_info.subBufferSize = BUFFER_SIZE
405 / (sizeof(cl_double) * RoundUpToNextPowerOfTwo(test_info.threadCount));
406 test_info.scale = getTestScale(sizeof(cl_double));
407
408 test_info.step = (cl_uint)test_info.subBufferSize * test_info.scale;
409 if (test_info.step / test_info.subBufferSize != test_info.scale)
410 {
411 // there was overflow
412 test_info.jobCount = 1;
413 }
414 else
415 {
416 test_info.jobCount = (cl_uint)((1ULL << 32) / test_info.step);
417 }
418
419 test_info.f = f;
420 test_info.ulps = f->double_ulps;
421 test_info.ftz = f->ftz || gForceFTZ;
422 test_info.relaxedMode = relaxedMode;
423
424 // cl_kernels aren't thread safe, so we make one for each vector size for
425 // every thread
426 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
427 {
428 test_info.k[i].resize(test_info.threadCount, nullptr);
429 }
430
431 test_info.tinfo.resize(test_info.threadCount);
432 for (cl_uint i = 0; i < test_info.threadCount; i++)
433 {
434 cl_buffer_region region = {
435 i * test_info.subBufferSize * sizeof(cl_double),
436 test_info.subBufferSize * sizeof(cl_double)
437 };
438 test_info.tinfo[i].inBuf =
439 clCreateSubBuffer(gInBuffer, CL_MEM_READ_ONLY,
440 CL_BUFFER_CREATE_TYPE_REGION, ®ion, &error);
441 if (error || NULL == test_info.tinfo[i].inBuf)
442 {
443 vlog_error("Error: Unable to create sub-buffer of gInBuffer for "
444 "region {%zd, %zd}\n",
445 region.origin, region.size);
446 goto exit;
447 }
448
449 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
450 {
451 test_info.tinfo[i].outBuf[j] = clCreateSubBuffer(
452 gOutBuffer[j], CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION,
453 ®ion, &error);
454 if (error || NULL == test_info.tinfo[i].outBuf[j])
455 {
456 vlog_error("Error: Unable to create sub-buffer of "
457 "gOutBuffer[%d] for region {%zd, %zd}\n",
458 (int)j, region.origin, region.size);
459 goto exit;
460 }
461 }
462 test_info.tinfo[i].tQueue =
463 clCreateCommandQueue(gContext, gDevice, 0, &error);
464 if (NULL == test_info.tinfo[i].tQueue || error)
465 {
466 vlog_error("clCreateCommandQueue failed. (%d)\n", error);
467 goto exit;
468 }
469 }
470
471 // Init the kernels
472 {
473 BuildKernelInfo build_info{ test_info.threadCount, test_info.k,
474 test_info.programs, f->nameInCode,
475 relaxedMode };
476 if ((error = ThreadPool_Do(BuildKernelFn,
477 gMaxVectorSizeIndex - gMinVectorSizeIndex,
478 &build_info)))
479 goto exit;
480 }
481
482 // Run the kernels
483 if (!gSkipCorrectnessTesting)
484 {
485 error = ThreadPool_Do(Test, test_info.jobCount, &test_info);
486
487 // Accumulate the arithmetic errors
488 for (cl_uint i = 0; i < test_info.threadCount; i++)
489 {
490 if (test_info.tinfo[i].maxError > maxError)
491 {
492 maxError = test_info.tinfo[i].maxError;
493 maxErrorVal = test_info.tinfo[i].maxErrorValue;
494 }
495 }
496
497 if (error) goto exit;
498
499 if (gWimpyMode)
500 vlog("Wimp pass");
501 else
502 vlog("passed");
503
504 vlog("\t%8.2f @ %a", maxError, maxErrorVal);
505 }
506
507 vlog("\n");
508
509 exit:
510 // Release
511 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
512 {
513 for (auto &kernel : test_info.k[i])
514 {
515 clReleaseKernel(kernel);
516 }
517 }
518
519 return error;
520 }
521