• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //    http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "common.h"
18 #include "function_list.h"
19 #include "test_functions.h"
20 #include "utility.h"
21 
22 #include <cstring>
23 
24 namespace {
25 
BuildKernel(const char * name,int vectorSize,cl_kernel * k,cl_program * p,bool relaxedMode)26 int BuildKernel(const char *name, int vectorSize, cl_kernel *k, cl_program *p,
27                 bool relaxedMode)
28 {
29     auto kernel_name = GetKernelName(vectorSize);
30     auto source = GetTernaryKernel(kernel_name, name, ParameterType::Float,
31                                    ParameterType::Float, ParameterType::Float,
32                                    ParameterType::Float, vectorSize);
33     std::array<const char *, 1> sources{ source.c_str() };
34     return MakeKernel(sources.data(), sources.size(), kernel_name.c_str(), k, p,
35                       relaxedMode);
36 }
37 
38 struct BuildKernelInfo2
39 {
40     cl_kernel *kernels;
41     Programs &programs;
42     const char *nameInCode;
43     bool relaxedMode; // Whether to build with -cl-fast-relaxed-math.
44 };
45 
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)46 cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
47 {
48     BuildKernelInfo2 *info = (BuildKernelInfo2 *)p;
49     cl_uint vectorSize = gMinVectorSizeIndex + job_id;
50     return BuildKernel(info->nameInCode, vectorSize, info->kernels + vectorSize,
51                        &(info->programs[vectorSize]), info->relaxedMode);
52 }
53 
54 } // anonymous namespace
55 
TestFunc_mad_Float(const Func * f,MTdata d,bool relaxedMode)56 int TestFunc_mad_Float(const Func *f, MTdata d, bool relaxedMode)
57 {
58     int error;
59 
60     logFunctionInfo(f->name, sizeof(cl_float), relaxedMode);
61 
62     Programs programs;
63     cl_kernel kernels[VECTOR_SIZE_COUNT];
64     float maxError = 0.0f;
65     float maxErrorVal = 0.0f;
66     float maxErrorVal2 = 0.0f;
67     float maxErrorVal3 = 0.0f;
68     uint64_t step = getTestStep(sizeof(float), BUFFER_SIZE);
69 
70     // Init the kernels
71     {
72         BuildKernelInfo2 build_info{ kernels, programs, f->nameInCode,
73                                      relaxedMode };
74         if ((error = ThreadPool_Do(BuildKernelFn,
75                                    gMaxVectorSizeIndex - gMinVectorSizeIndex,
76                                    &build_info)))
77             return error;
78     }
79 
80     for (uint64_t i = 0; i < (1ULL << 32); i += step)
81     {
82         // Init input array
83         cl_uint *p = (cl_uint *)gIn;
84         cl_uint *p2 = (cl_uint *)gIn2;
85         cl_uint *p3 = (cl_uint *)gIn3;
86         for (size_t j = 0; j < BUFFER_SIZE / sizeof(float); j++)
87         {
88             p[j] = genrand_int32(d);
89             p2[j] = genrand_int32(d);
90             p3[j] = genrand_int32(d);
91         }
92 
93         if ((error = clEnqueueWriteBuffer(gQueue, gInBuffer, CL_FALSE, 0,
94                                           BUFFER_SIZE, gIn, 0, NULL, NULL)))
95         {
96             vlog_error("\n*** Error %d in clEnqueueWriteBuffer ***\n", error);
97             return error;
98         }
99 
100         if ((error = clEnqueueWriteBuffer(gQueue, gInBuffer2, CL_FALSE, 0,
101                                           BUFFER_SIZE, gIn2, 0, NULL, NULL)))
102         {
103             vlog_error("\n*** Error %d in clEnqueueWriteBuffer2 ***\n", error);
104             return error;
105         }
106 
107         if ((error = clEnqueueWriteBuffer(gQueue, gInBuffer3, CL_FALSE, 0,
108                                           BUFFER_SIZE, gIn3, 0, NULL, NULL)))
109         {
110             vlog_error("\n*** Error %d in clEnqueueWriteBuffer3 ***\n", error);
111             return error;
112         }
113 
114         // write garbage into output arrays
115         for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
116         {
117             uint32_t pattern = 0xffffdead;
118             memset_pattern4(gOut[j], &pattern, BUFFER_SIZE);
119             if ((error =
120                      clEnqueueWriteBuffer(gQueue, gOutBuffer[j], CL_FALSE, 0,
121                                           BUFFER_SIZE, gOut[j], 0, NULL, NULL)))
122             {
123                 vlog_error("\n*** Error %d in clEnqueueWriteBuffer2(%d) ***\n",
124                            error, j);
125                 goto exit;
126             }
127         }
128 
129         // Run the kernels
130         for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
131         {
132             size_t vectorSize = sizeof(cl_float) * sizeValues[j];
133             size_t localCount = (BUFFER_SIZE + vectorSize - 1)
134                 / vectorSize; // BUFFER_SIZE / vectorSize  rounded up
135             if ((error = clSetKernelArg(kernels[j], 0, sizeof(gOutBuffer[j]),
136                                         &gOutBuffer[j])))
137             {
138                 LogBuildError(programs[j]);
139                 goto exit;
140             }
141             if ((error = clSetKernelArg(kernels[j], 1, sizeof(gInBuffer),
142                                         &gInBuffer)))
143             {
144                 LogBuildError(programs[j]);
145                 goto exit;
146             }
147             if ((error = clSetKernelArg(kernels[j], 2, sizeof(gInBuffer2),
148                                         &gInBuffer2)))
149             {
150                 LogBuildError(programs[j]);
151                 goto exit;
152             }
153             if ((error = clSetKernelArg(kernels[j], 3, sizeof(gInBuffer3),
154                                         &gInBuffer3)))
155             {
156                 LogBuildError(programs[j]);
157                 goto exit;
158             }
159 
160             if ((error =
161                      clEnqueueNDRangeKernel(gQueue, kernels[j], 1, NULL,
162                                             &localCount, NULL, 0, NULL, NULL)))
163             {
164                 vlog_error("FAILED -- could not execute kernel\n");
165                 goto exit;
166             }
167         }
168 
169         // Get that moving
170         if ((error = clFlush(gQueue))) vlog("clFlush failed\n");
171 
172         // Calculate the correctly rounded reference result
173         float *r = (float *)gOut_Ref;
174         float *s = (float *)gIn;
175         float *s2 = (float *)gIn2;
176         float *s3 = (float *)gIn3;
177         for (size_t j = 0; j < BUFFER_SIZE / sizeof(float); j++)
178             r[j] = (float)f->func.f_fff(s[j], s2[j], s3[j]);
179 
180         // Read the data back
181         for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
182         {
183             if ((error =
184                      clEnqueueReadBuffer(gQueue, gOutBuffer[j], CL_TRUE, 0,
185                                          BUFFER_SIZE, gOut[j], 0, NULL, NULL)))
186             {
187                 vlog_error("ReadArray failed %d\n", error);
188                 goto exit;
189             }
190         }
191 
192         if (gSkipCorrectnessTesting) break;
193 
194         // Verify data -- No verification possible.
195         // MAD is a random number generator.
196         if (0 == (i & 0x0fffffff))
197         {
198             vlog(".");
199             fflush(stdout);
200         }
201     }
202 
203     if (!gSkipCorrectnessTesting)
204     {
205         if (gWimpyMode)
206             vlog("Wimp pass");
207         else
208             vlog("passed");
209 
210         vlog("\t%8.2f @ {%a, %a, %a}", maxError, maxErrorVal, maxErrorVal2,
211              maxErrorVal3);
212     }
213 
214     vlog("\n");
215 
216 exit:
217     // Release
218     for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
219     {
220         clReleaseKernel(kernels[k]);
221     }
222 
223     return error;
224 }
225