• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //    http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 #include "harness/compat.h"
17 
18 #include <stdio.h>
19 #include <string.h>
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 
23 #include "procs.h"
24 
25 #include "checker_mem_host_read_only.hpp"
26 #include "checker_mem_host_write_only.hpp"
27 #include "checker_mem_host_no_access.hpp"
28 
test_mem_host_read_only_buffer_RW(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)29 static int test_mem_host_read_only_buffer_RW(cl_device_id deviceID, cl_context context,
30                                              cl_command_queue queue, cl_bool blocking,
31                                              cl_mem_flags buffer_mem_flag,
32                                              cl_mem_flags parent_buffer_flag,
33                                              enum BUFFER_TYPE buffer_type)
34 {
35   log_info("%s\n", __FUNCTION__);
36   cBuffer_check_mem_host_read_only< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
37   checker.m_blocking = blocking;
38   checker.buffer_mem_flag = buffer_mem_flag;
39   cl_int err;
40   switch (buffer_type) {
41     case _BUFFER:
42       err = checker.SetupBuffer();
43       break;
44     case _Sub_BUFFER:
45       err = checker.SetupASSubBuffer(parent_buffer_flag);
46       break;
47   }
48 
49   test_error(err, __FUNCTION__);
50   checker.Setup_Test_Environment();
51   err= checker.verify_RW_Buffer();
52   test_error(err, __FUNCTION__);
53   clFinish(queue);
54 
55   return err;
56 }
57 
test_mem_host_read_only_buffer_RW_Rect(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)58 static int test_mem_host_read_only_buffer_RW_Rect(cl_device_id deviceID, cl_context context,
59                                                   cl_command_queue queue, cl_bool blocking,
60                                                   cl_mem_flags buffer_mem_flag,
61                                                   cl_mem_flags parent_buffer_flag,
62                                                   enum BUFFER_TYPE buffer_type)
63 {
64   log_info("%s\n", __FUNCTION__);
65 
66   cBuffer_check_mem_host_read_only< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
67   checker.m_blocking = blocking;
68   checker.buffer_mem_flag = buffer_mem_flag;
69   cl_int err;
70   switch (buffer_type) {
71     case _BUFFER:
72       err= checker.SetupBuffer();
73       break;
74     case _Sub_BUFFER:
75       err= checker.SetupASSubBuffer(parent_buffer_flag);
76       break;
77   }
78 
79   test_error(err, __FUNCTION__);
80   checker.Setup_Test_Environment();
81   err = checker.verify_RW_Buffer_rect();
82   test_error(err,  __FUNCTION__);
83   clFinish(queue);
84 
85   return err;
86 }
87 
test_mem_host_read_only_buffer_RW_Mapping(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)88 static int test_mem_host_read_only_buffer_RW_Mapping(cl_device_id deviceID, cl_context context,
89                                                      cl_command_queue queue, cl_bool blocking,
90                                                      cl_mem_flags buffer_mem_flag,
91                                                      cl_mem_flags parent_buffer_flag,
92                                                      enum BUFFER_TYPE buffer_type)
93 {
94   log_info("%s\n", __FUNCTION__);
95 
96   cBuffer_check_mem_host_read_only< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
97   checker.m_blocking = blocking;
98   checker.buffer_mem_flag = buffer_mem_flag;
99   cl_int err;
100   switch (buffer_type) {
101     case _BUFFER:
102       err= checker.SetupBuffer();
103       break;
104     case _Sub_BUFFER:
105       err= checker.SetupASSubBuffer(parent_buffer_flag);
106       break;
107   }
108 
109   test_error(err, __FUNCTION__);
110   checker.Setup_Test_Environment();
111   err = checker.verify_RW_Buffer_mapping();
112   test_error(err, __FUNCTION__);
113   clFinish(queue);
114 
115   return err;
116 }
117 
test_mem_host_read_only_buffer(cl_device_id deviceID,cl_context context,cl_command_queue queue,int num_elements)118 int test_mem_host_read_only_buffer(cl_device_id deviceID, cl_context context,
119                                    cl_command_queue queue, int num_elements)
120 {
121   cl_mem_flags buffer_mem_flags[2] = {CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_READ_ONLY,
122     CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR | CL_MEM_HOST_READ_ONLY};
123 
124   cl_int err = CL_SUCCESS;
125 
126   cl_bool blocking[2] = {CL_TRUE, CL_FALSE};
127   for (int k=0; k<2; k++)
128     for (int i=0; i< 2; i++)
129     {
130 
131       err = test_mem_host_read_only_buffer_RW(deviceID, context, queue, blocking[i],
132                                               buffer_mem_flags[k], 0, _BUFFER);
133       test_error(err, __FUNCTION__);
134 
135       err = test_mem_host_read_only_buffer_RW_Rect(deviceID, context, queue, blocking[i],
136                                                    buffer_mem_flags[k],0, _BUFFER);
137       test_error(err, __FUNCTION__);
138 
139       err = test_mem_host_read_only_buffer_RW_Mapping(deviceID, context, queue, blocking[i],
140                                                       buffer_mem_flags[k],0, _BUFFER);
141       test_error(err, __FUNCTION__);
142     }
143 
144   return err;
145 }
146 
test_mem_host_read_only_subbuffer(cl_device_id deviceID,cl_context context,cl_command_queue queue,int num_elements)147 int test_mem_host_read_only_subbuffer(cl_device_id deviceID, cl_context context,
148                                       cl_command_queue queue, int num_elements)
149 {
150   cl_mem_flags parent_buffer_mem_flags[1] = {CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_READ_ONLY};
151 
152   cl_mem_flags buffer_mem_flags[4] = {0, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR,
153     CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR,
154     CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR};
155 
156   cl_int err = CL_SUCCESS;
157 
158   cl_bool blocking[2] = {CL_TRUE, CL_FALSE};
159 
160   for (int p=0; p<1; p++) {
161     for (int k=0; k<4; k++)
162       for (int i=0; i<2; i++)
163       {
164         err = test_mem_host_read_only_buffer_RW(deviceID, context, queue, blocking[i],
165                                                 buffer_mem_flags[k], parent_buffer_mem_flags[p], _Sub_BUFFER);
166         test_error(err, __FUNCTION__);
167 
168         err = test_mem_host_read_only_buffer_RW_Rect(deviceID, context, queue, blocking[i],
169                                                      buffer_mem_flags[k], parent_buffer_mem_flags[p], _Sub_BUFFER);
170         test_error(err, __FUNCTION__);
171 
172         err = test_mem_host_read_only_buffer_RW_Mapping(deviceID, context, queue, blocking[i],
173                                                         buffer_mem_flags[k], parent_buffer_mem_flags[p], _Sub_BUFFER);
174         test_error(err, __FUNCTION__);
175       }
176   }
177 
178   return err;
179 }
180 
181 //=============================== Write only
182 
test_mem_host_write_only_buffer_RW(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)183 static cl_int test_mem_host_write_only_buffer_RW(cl_device_id deviceID, cl_context context,
184                                                  cl_command_queue queue, cl_bool blocking,
185                                                  cl_mem_flags buffer_mem_flag,
186                                                  cl_mem_flags parent_buffer_flag,
187                                                  enum BUFFER_TYPE buffer_type)
188 {
189   log_info("%s\n", __FUNCTION__);
190 
191   cBuffer_check_mem_host_write_only< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
192 
193   checker.m_blocking = blocking;
194   checker.buffer_mem_flag = buffer_mem_flag;
195   cl_int err;
196   switch (buffer_type) {
197     case _BUFFER:
198       err = checker.SetupBuffer();
199       break;
200     case _Sub_BUFFER:
201       err = checker.SetupASSubBuffer( parent_buffer_flag );
202       break;
203   }
204 
205   test_error(err, __FUNCTION__);
206   checker.Setup_Test_Environment();
207   err= checker.verify_RW_Buffer();
208   test_error(err, __FUNCTION__);
209   clFinish(queue);
210 
211   return err;
212 }
213 
test_mem_host_write_only_buffer_RW_Rect(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)214 static cl_int test_mem_host_write_only_buffer_RW_Rect(cl_device_id deviceID, cl_context context,
215                                                       cl_command_queue queue, cl_bool blocking,
216                                                       cl_mem_flags buffer_mem_flag,
217                                                       cl_mem_flags parent_buffer_flag,
218                                                       enum BUFFER_TYPE buffer_type)
219 {
220   log_info("%s\n", __FUNCTION__);
221 
222   cBuffer_check_mem_host_write_only< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
223   checker.m_blocking = blocking;
224   checker.buffer_mem_flag = buffer_mem_flag;
225   cl_int err;
226   switch (buffer_type) {
227     case _BUFFER:
228       err= checker.SetupBuffer();
229       break;
230     case _Sub_BUFFER:
231       err= checker.SetupASSubBuffer(parent_buffer_flag);
232       break;
233   }
234 
235   test_error(err, __FUNCTION__);
236   checker.Setup_Test_Environment();
237   err= checker.verify_RW_Buffer_rect();
238   test_error(err, __FUNCTION__);
239   clFinish(queue);
240 
241   return err;
242 }
243 
test_mem_host_write_only_buffer_RW_Mapping(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)244 static cl_int test_mem_host_write_only_buffer_RW_Mapping(cl_device_id deviceID, cl_context context,
245                                                          cl_command_queue queue, cl_bool blocking,
246                                                          cl_mem_flags buffer_mem_flag,
247                                                          cl_mem_flags parent_buffer_flag,
248                                                          enum BUFFER_TYPE buffer_type)
249 {
250   log_info("%s\n", __FUNCTION__);
251 
252   cBuffer_check_mem_host_write_only< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
253   checker.m_blocking = blocking;
254   checker.buffer_mem_flag = buffer_mem_flag;
255   cl_int err;
256   switch (buffer_type) {
257     case _BUFFER:
258       err= checker.SetupBuffer();
259       break;
260     case _Sub_BUFFER:
261       err= checker.SetupASSubBuffer(parent_buffer_flag);
262       break;
263   }
264 
265   test_error(err, __FUNCTION__);
266   checker.Setup_Test_Environment();
267   err= checker.verify_RW_Buffer_mapping();
268   test_error(err, __FUNCTION__);
269   clFinish(queue);
270 
271   return err;
272 }
273 
test_mem_host_write_only_buffer(cl_device_id deviceID,cl_context context,cl_command_queue queue,int num_elements)274 int test_mem_host_write_only_buffer(cl_device_id deviceID, cl_context context,
275                                     cl_command_queue queue, int num_elements)
276 {
277   cl_mem_flags buffer_mem_flags[2] = {CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_WRITE_ONLY,
278     CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR | CL_MEM_HOST_WRITE_ONLY};
279 
280   cl_int err = CL_SUCCESS;
281 
282   cl_bool blocking[2] = {CL_TRUE, CL_FALSE};
283   for (int k=0; k<2; k++)
284     for (int i=0; i<2; i++)
285     {
286       err = test_mem_host_write_only_buffer_RW(deviceID, context, queue, blocking[i],
287                                                buffer_mem_flags[k], 0, _BUFFER);
288       test_error(err, __FUNCTION__);
289 
290       err = test_mem_host_write_only_buffer_RW_Rect(deviceID, context, queue, blocking[i],
291                                                     buffer_mem_flags[k], 0, _BUFFER);
292       test_error(err, __FUNCTION__);
293 
294       err = test_mem_host_write_only_buffer_RW_Mapping(deviceID, context, queue, blocking[i],
295                                                        buffer_mem_flags[k], 0, _BUFFER);
296       test_error(err, __FUNCTION__);
297     }
298 
299   return err;
300 }
301 
test_mem_host_write_only_subbuffer(cl_device_id deviceID,cl_context context,cl_command_queue queue,int num_elements)302 int test_mem_host_write_only_subbuffer(cl_device_id deviceID, cl_context context,
303                                        cl_command_queue queue, int num_elements)
304 {
305   cl_mem_flags parent_buffer_mem_flags[1] = {CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_WRITE_ONLY};
306 
307   cl_mem_flags buffer_mem_flags[4] = {0, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR,
308     CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR,
309     CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR};
310 
311   cl_int err = CL_SUCCESS;
312 
313   cl_bool blocking[2] = {CL_TRUE, CL_FALSE};
314 
315   for (int p=0; p<1; p++) {
316     for (int m=0; m<4; m++) {
317       for (int i=0; i< 2; i++)
318       {
319         err = test_mem_host_write_only_buffer_RW(deviceID, context, queue, blocking[i],
320                                                  buffer_mem_flags[m], parent_buffer_mem_flags[p], _Sub_BUFFER);
321         test_error(err, __FUNCTION__);
322 
323         err = test_mem_host_write_only_buffer_RW_Rect(deviceID, context, queue, blocking[i],
324                                                       buffer_mem_flags[m], parent_buffer_mem_flags[p], _Sub_BUFFER);
325         test_error(err, __FUNCTION__);
326 
327         err = test_mem_host_write_only_buffer_RW_Mapping(deviceID, context, queue, blocking[i],
328                                                          buffer_mem_flags[m] , parent_buffer_mem_flags[p], _Sub_BUFFER);
329         test_error(err, __FUNCTION__);
330       }
331     }
332   }
333 
334   return err;
335 }
336 
337 //=====================  NO ACCESS
338 
test_mem_host_no_access_buffer_RW(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)339 static cl_int test_mem_host_no_access_buffer_RW(cl_device_id deviceID, cl_context context,
340                                                 cl_command_queue queue, cl_bool blocking,
341                                                 cl_mem_flags buffer_mem_flag,
342                                                 cl_mem_flags parent_buffer_flag,
343                                                 enum BUFFER_TYPE buffer_type)
344 {
345   log_info("%s\n", __FUNCTION__);
346 
347   cBuffer_check_mem_host_no_access< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
348   checker.m_blocking = blocking;
349   checker.buffer_mem_flag = buffer_mem_flag;
350 
351   cl_int err = CL_SUCCESS;
352   switch (buffer_type) {
353     case _BUFFER:
354       err= checker.SetupBuffer();
355       break;
356     case _Sub_BUFFER:
357       err= checker.SetupASSubBuffer(parent_buffer_flag);
358       break;
359   }
360 
361   test_error(err, __FUNCTION__);
362   checker.Setup_Test_Environment();
363   err= checker.verify_RW_Buffer_mapping();
364   test_error(err, __FUNCTION__);
365   clFinish(queue);
366 
367   return err;
368 }
369 
test_mem_host_no_access_buffer_RW_Rect(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)370 static cl_int test_mem_host_no_access_buffer_RW_Rect(cl_device_id deviceID, cl_context context,
371                                                      cl_command_queue queue, cl_bool blocking,
372                                                      cl_mem_flags buffer_mem_flag,
373                                                      cl_mem_flags parent_buffer_flag,
374                                                      enum BUFFER_TYPE buffer_type)
375 {
376   log_info( "%s\n", __FUNCTION__);
377 
378   cBuffer_check_mem_host_no_access< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
379   checker.m_blocking = blocking;
380   checker.buffer_mem_flag = buffer_mem_flag;
381   cl_int err;
382   switch (buffer_type) {
383     case _BUFFER:
384       err= checker.SetupBuffer();
385       break;
386     case _Sub_BUFFER:
387       err= checker.SetupASSubBuffer(parent_buffer_flag);
388       break;
389   }
390 
391   test_error(err, __FUNCTION__);
392   checker.Setup_Test_Environment();
393   err= checker.verify_RW_Buffer_mapping();
394   test_error(err, __FUNCTION__);
395   clFinish(queue);
396 
397   return err;
398 }
399 
test_mem_host_no_access_buffer_RW_Mapping(cl_device_id deviceID,cl_context context,cl_command_queue queue,cl_bool blocking,cl_mem_flags buffer_mem_flag,cl_mem_flags parent_buffer_flag,enum BUFFER_TYPE buffer_type)400 static cl_int test_mem_host_no_access_buffer_RW_Mapping(cl_device_id deviceID, cl_context context,
401                                                         cl_command_queue queue, cl_bool blocking,
402                                                         cl_mem_flags buffer_mem_flag,
403                                                         cl_mem_flags parent_buffer_flag,
404                                                         enum BUFFER_TYPE buffer_type)
405 {
406   log_info("%s\n", __FUNCTION__);
407 
408   cBuffer_check_mem_host_no_access< TEST_ELEMENT_TYPE > checker(deviceID, context, queue);
409 
410   checker.m_blocking = blocking;
411   checker.buffer_mem_flag = buffer_mem_flag;
412   cl_int err;
413   switch (buffer_type) {
414     case _BUFFER:
415       err= checker.SetupBuffer();
416       break;
417     case _Sub_BUFFER:
418       err= checker.SetupASSubBuffer(parent_buffer_flag);
419       break;
420   }
421 
422   test_error(err, __FUNCTION__);
423   checker.Setup_Test_Environment();
424   err= checker.verify_RW_Buffer_mapping();
425   test_error(err, __FUNCTION__);
426   clFinish(queue);
427 
428   return err;
429 }
430 
test_mem_host_no_access_buffer(cl_device_id deviceID,cl_context context,cl_command_queue queue,int num_elements)431 int test_mem_host_no_access_buffer(cl_device_id deviceID, cl_context context,
432                                    cl_command_queue queue, int num_elements)
433 {
434   cl_mem_flags buffer_mem_flag[2] = {CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_NO_ACCESS,
435     CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR | CL_MEM_HOST_NO_ACCESS};
436 
437   cl_int err = CL_SUCCESS;
438 
439   cl_bool blocking[2] = {CL_TRUE, CL_FALSE};
440   for (int k=0; k<2; k++)
441     for (int i=0; i<2; i++) {
442       err = test_mem_host_no_access_buffer_RW(deviceID, context, queue, blocking[i],
443                                               buffer_mem_flag[k], 0, _BUFFER);
444       test_error(err, __FUNCTION__);
445 
446       err = test_mem_host_no_access_buffer_RW_Rect(deviceID, context, queue, blocking[i],
447                                                    buffer_mem_flag[k], 0, _BUFFER);
448       test_error(err, __FUNCTION__);
449 
450       err = test_mem_host_no_access_buffer_RW_Mapping(deviceID, context, queue, blocking[i],
451                                                       buffer_mem_flag[k], 0, _BUFFER);
452       test_error(err, __FUNCTION__);
453     }
454 
455   return err;
456 }
457 
test_mem_host_no_access_subbuffer(cl_device_id deviceID,cl_context context,cl_command_queue queue,int num_elements)458 int test_mem_host_no_access_subbuffer(cl_device_id deviceID, cl_context context,
459                                       cl_command_queue queue, int num_elements)
460 {
461   cl_mem_flags parent_buffer_mem_flags[3] = { CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_NO_ACCESS,
462     CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_NO_ACCESS,
463     CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR | CL_MEM_HOST_NO_ACCESS};
464 
465   cl_mem_flags buffer_mem_flags[4] = {0, CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR,
466     CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR,
467     CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR};
468 
469   cl_int err = CL_SUCCESS;
470 
471   cl_bool blocking[2] = {CL_TRUE, CL_FALSE};
472   for (int p=0; p<3; p++) {
473     for (int k=0; k<4; k++) {
474       for (int i=0; i<2; i++) {
475         err += test_mem_host_no_access_buffer_RW(deviceID, context, queue, blocking[i],
476                                                 buffer_mem_flags[k], parent_buffer_mem_flags[p], _Sub_BUFFER);
477 
478         err += test_mem_host_no_access_buffer_RW_Rect(deviceID, context, queue, blocking[i],
479                                                      buffer_mem_flags[k], parent_buffer_mem_flags[p], _Sub_BUFFER);
480 
481         err += test_mem_host_no_access_buffer_RW_Mapping( deviceID, context, queue, blocking[i],
482                                                         buffer_mem_flags[k], parent_buffer_mem_flags[p], _Sub_BUFFER);
483       }
484     }
485   }
486 
487   return err;
488 }
489