• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //    http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 #include "procs.h"
17 #include "subhelpers.h"
18 #include "subgroup_common_kernels.h"
19 #include "subgroup_common_templates.h"
20 #include "harness/conversions.h"
21 #include "harness/typeWrappers.h"
22 
23 namespace {
24 // Any/All test functions
25 template <NonUniformVoteOp operation> struct AA
26 {
gen__anonfba9e8980111::AA27     static void gen(cl_int *x, cl_int *t, cl_int *m,
28                     const WorkGroupParams &test_params)
29     {
30         int i, ii, j, k, n;
31         int ng = test_params.global_workgroup_size;
32         int nw = test_params.local_workgroup_size;
33         int ns = test_params.subgroup_size;
34         int nj = (nw + ns - 1) / ns;
35         int e;
36         ng = ng / nw;
37         ii = 0;
38         log_info("  sub_group_%s...\n", operation_names(operation));
39         for (k = 0; k < ng; ++k)
40         {
41             for (j = 0; j < nj; ++j)
42             {
43                 ii = j * ns;
44                 n = ii + ns > nw ? nw - ii : ns;
45                 e = (int)(genrand_int32(gMTdata) % 3);
46 
47                 // Initialize data matrix indexed by local id and sub group id
48                 switch (e)
49                 {
50                     case 0: memset(&t[ii], 0, n * sizeof(cl_int)); break;
51                     case 1:
52                         memset(&t[ii], 0, n * sizeof(cl_int));
53                         i = (int)(genrand_int32(gMTdata) % (cl_uint)n);
54                         t[ii + i] = 41;
55                         break;
56                     case 2: memset(&t[ii], 0xff, n * sizeof(cl_int)); break;
57                 }
58             }
59 
60             // Now map into work group using map from device
61             for (j = 0; j < nw; ++j)
62             {
63                 x[j] = t[j];
64             }
65 
66             x += nw;
67             m += 4 * nw;
68         }
69     }
70 
chk__anonfba9e8980111::AA71     static int chk(cl_int *x, cl_int *y, cl_int *mx, cl_int *my, cl_int *m,
72                    const WorkGroupParams &test_params)
73     {
74         int ii, i, j, k, n;
75         int ng = test_params.global_workgroup_size;
76         int nw = test_params.local_workgroup_size;
77         int ns = test_params.subgroup_size;
78         int nj = (nw + ns - 1) / ns;
79         cl_int taa, raa;
80         ng = ng / nw;
81 
82         for (k = 0; k < ng; ++k)
83         {
84             // Map to array indexed to array indexed by local ID and sub group
85             for (j = 0; j < nw; ++j)
86             {
87                 mx[j] = x[j];
88                 my[j] = y[j];
89             }
90 
91             for (j = 0; j < nj; ++j)
92             {
93                 ii = j * ns;
94                 n = ii + ns > nw ? nw - ii : ns;
95 
96                 // Compute target
97                 if (operation == NonUniformVoteOp::any)
98                 {
99                     taa = 0;
100                     for (i = 0; i < n; ++i) taa |= mx[ii + i] != 0;
101                 }
102 
103                 if (operation == NonUniformVoteOp::all)
104                 {
105                     taa = 1;
106                     for (i = 0; i < n; ++i) taa &= mx[ii + i] != 0;
107                 }
108 
109                 // Check result
110                 for (i = 0; i < n; ++i)
111                 {
112                     raa = my[ii + i] != 0;
113                     if (raa != taa)
114                     {
115                         log_error("ERROR: sub_group_%s mismatch for local id "
116                                   "%d in sub group %d in group %d\n",
117                                   operation_names(operation), i, j, k);
118                         return TEST_FAIL;
119                     }
120                 }
121             }
122 
123             x += nw;
124             y += nw;
125             m += 4 * nw;
126         }
127         log_info("  sub_group_%s... passed\n", operation_names(operation));
128         return TEST_PASS;
129     }
130 };
131 
132 static const char *any_source = "__kernel void test_any(const __global Type "
133                                 "*in, __global int4 *xy, __global Type *out)\n"
134                                 "{\n"
135                                 "    int gid = get_global_id(0);\n"
136                                 "    XY(xy,gid);\n"
137                                 "    out[gid] = sub_group_any(in[gid]);\n"
138                                 "}\n";
139 
140 static const char *all_source = "__kernel void test_all(const __global Type "
141                                 "*in, __global int4 *xy, __global Type *out)\n"
142                                 "{\n"
143                                 "    int gid = get_global_id(0);\n"
144                                 "    XY(xy,gid);\n"
145                                 "    out[gid] = sub_group_all(in[gid]);\n"
146                                 "}\n";
147 
148 
149 template <typename T>
run_broadcast_scan_reduction_for_type(RunTestForType rft)150 int run_broadcast_scan_reduction_for_type(RunTestForType rft)
151 {
152     int error = rft.run_impl<T, BC<T, SubgroupsBroadcastOp::broadcast>>(
153         "test_bcast", bcast_source);
154     error |= rft.run_impl<T, RED_NU<T, ArithmeticOp::add_>>("test_redadd",
155                                                             redadd_source);
156     error |= rft.run_impl<T, RED_NU<T, ArithmeticOp::max_>>("test_redmax",
157                                                             redmax_source);
158     error |= rft.run_impl<T, RED_NU<T, ArithmeticOp::min_>>("test_redmin",
159                                                             redmin_source);
160     error |= rft.run_impl<T, SCIN_NU<T, ArithmeticOp::add_>>("test_scinadd",
161                                                              scinadd_source);
162     error |= rft.run_impl<T, SCIN_NU<T, ArithmeticOp::max_>>("test_scinmax",
163                                                              scinmax_source);
164     error |= rft.run_impl<T, SCIN_NU<T, ArithmeticOp::min_>>("test_scinmin",
165                                                              scinmin_source);
166     error |= rft.run_impl<T, SCEX_NU<T, ArithmeticOp::add_>>("test_scexadd",
167                                                              scexadd_source);
168     error |= rft.run_impl<T, SCEX_NU<T, ArithmeticOp::max_>>("test_scexmax",
169                                                              scexmax_source);
170     error |= rft.run_impl<T, SCEX_NU<T, ArithmeticOp::min_>>("test_scexmin",
171                                                              scexmin_source);
172     return error;
173 }
174 
175 }
176 // Entry point from main
test_subgroup_functions(cl_device_id device,cl_context context,cl_command_queue queue,int num_elements,bool useCoreSubgroups)177 int test_subgroup_functions(cl_device_id device, cl_context context,
178                             cl_command_queue queue, int num_elements,
179                             bool useCoreSubgroups)
180 {
181     constexpr size_t global_work_size = 2000;
182     constexpr size_t local_work_size = 200;
183     WorkGroupParams test_params(global_work_size, local_work_size);
184     RunTestForType rft(device, context, queue, num_elements, test_params);
185     int error =
186         rft.run_impl<cl_int, AA<NonUniformVoteOp::any>>("test_any", any_source);
187     error |=
188         rft.run_impl<cl_int, AA<NonUniformVoteOp::all>>("test_all", all_source);
189     error |= run_broadcast_scan_reduction_for_type<cl_int>(rft);
190     error |= run_broadcast_scan_reduction_for_type<cl_uint>(rft);
191     error |= run_broadcast_scan_reduction_for_type<cl_long>(rft);
192     error |= run_broadcast_scan_reduction_for_type<cl_ulong>(rft);
193     error |= run_broadcast_scan_reduction_for_type<cl_float>(rft);
194     error |= run_broadcast_scan_reduction_for_type<cl_double>(rft);
195     error |= run_broadcast_scan_reduction_for_type<subgroups::cl_half>(rft);
196     return error;
197 }
198 
test_subgroup_functions_core(cl_device_id device,cl_context context,cl_command_queue queue,int num_elements)199 int test_subgroup_functions_core(cl_device_id device, cl_context context,
200                                  cl_command_queue queue, int num_elements)
201 {
202     return test_subgroup_functions(device, context, queue, num_elements, true);
203 }
204 
test_subgroup_functions_ext(cl_device_id device,cl_context context,cl_command_queue queue,int num_elements)205 int test_subgroup_functions_ext(cl_device_id device, cl_context context,
206                                 cl_command_queue queue, int num_elements)
207 {
208     bool hasExtension = is_extension_available(device, "cl_khr_subgroups");
209 
210     if (!hasExtension)
211     {
212         log_info(
213             "Device does not support 'cl_khr_subgroups'. Skipping the test.\n");
214         return TEST_SKIPPED_ITSELF;
215     }
216     return test_subgroup_functions(device, context, queue, num_elements, false);
217 }
218