• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can
5  * be found in the LICENSE file.
6  *
7  */
8 
9 //
10 //
11 //
12 
13 #include <stdlib.h>
14 
15 #include "common/cl/assert_cl.h"
16 #include "extent_cl_12.h"
17 #include "runtime_cl_12.h"
18 
19 //
20 // DURABLE R/W HOST EXTENT -- STANDARD CACHED MEMORY
21 //
22 
23 void
skc_extent_phrw_alloc(struct skc_runtime * const runtime,struct skc_extent_phrw * const extent,size_t const size)24 skc_extent_phrw_alloc(struct skc_runtime     * const runtime,
25                       struct skc_extent_phrw * const extent,
26                       size_t                   const size)
27 {
28   extent->hrw = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,size);
29 }
30 
31 void
skc_extent_phrw_free(struct skc_runtime * const runtime,struct skc_extent_phrw * const extent)32 skc_extent_phrw_free(struct skc_runtime     * const runtime,
33                      struct skc_extent_phrw * const extent)
34 {
35   skc_runtime_host_perm_free(runtime,extent->hrw);
36 }
37 
38 //
39 // DURABLE R/W DEVICE EXTENT -- ALLOCATED FROM DEVICE HEAP
40 //
41 
42 void
skc_extent_pdrw_alloc(struct skc_runtime * const runtime,struct skc_extent_pdrw * const extent,size_t const size)43 skc_extent_pdrw_alloc(struct skc_runtime     * const runtime,
44                       struct skc_extent_pdrw * const extent,
45                       size_t                   const size)
46 {
47   extent->drw = skc_runtime_device_perm_alloc(runtime,
48                                               CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS,
49                                               size);
50 }
51 
52 void
skc_extent_pdrw_free(struct skc_runtime * const runtime,struct skc_extent_pdrw * const extent)53 skc_extent_pdrw_free(struct skc_runtime     * const runtime,
54                      struct skc_extent_pdrw * const extent)
55 {
56   skc_runtime_device_perm_free(runtime,extent->drw);
57 }
58 
59 //
60 // EPHEMERAL DEVICE R/W EXTENT -- ALLOCATED QUICKLY FROM A MANAGED RING
61 //
62 
63 void
skc_extent_tdrw_alloc(struct skc_runtime * const runtime,struct skc_extent_tdrw * const extent,size_t const size)64 skc_extent_tdrw_alloc(struct skc_runtime     * const runtime,
65                       struct skc_extent_tdrw * const extent,
66                       size_t                   const size)
67 {
68   extent->size = size;
69   extent->drw  = skc_runtime_device_temp_alloc(runtime,
70                                                CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS,
71                                                size,&extent->id,NULL);
72 }
73 
74 void
skc_extent_tdrw_free(struct skc_runtime * const runtime,struct skc_extent_tdrw * const extent)75 skc_extent_tdrw_free(struct skc_runtime     * const runtime,
76                      struct skc_extent_tdrw * const extent)
77 {
78   skc_runtime_device_temp_free(runtime,extent->drw,extent->id);
79 }
80 
81 void
skc_extent_tdrw_zero(struct skc_extent_tdrw * const extent,cl_command_queue const cq,cl_event * const event)82 skc_extent_tdrw_zero(struct skc_extent_tdrw * const extent,
83                      cl_command_queue         const cq,
84                      cl_event               * const event)
85 {
86   if (extent->size == 0)
87     return;
88 
89   skc_uint const zero = 0;
90 
91   cl(EnqueueFillBuffer(cq,
92                        extent->drw,
93                        &zero,
94                        sizeof(zero),
95                        0,
96                        extent->size,
97                        0,NULL,event));
98 }
99 
100 //
101 // DURABLE SMALL EXTENTS BACKING ATOMICS
102 //
103 
104 void
skc_extent_phr_pdrw_alloc(struct skc_runtime * const runtime,struct skc_extent_phr_pdrw * const extent,size_t const size)105 skc_extent_phr_pdrw_alloc(struct skc_runtime         * const runtime,
106                           struct skc_extent_phr_pdrw * const extent,
107                           size_t                       const size)
108 {
109   extent->size = size;
110   extent->hr   = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_ONLY,size);
111   extent->drw  = skc_runtime_device_perm_alloc(runtime,CL_MEM_READ_WRITE,size);
112 }
113 
114 void
skc_extent_phr_pdrw_free(struct skc_runtime * const runtime,struct skc_extent_phr_pdrw * const extent)115 skc_extent_phr_pdrw_free(struct skc_runtime         * const runtime,
116                          struct skc_extent_phr_pdrw * const extent)
117 {
118   skc_runtime_host_perm_free(runtime,extent->hr);
119   skc_runtime_device_perm_free(runtime,extent->drw);
120 }
121 
122 void
skc_extent_phr_pdrw_read(struct skc_extent_phr_pdrw * const extent,cl_command_queue const cq,cl_event * const event)123 skc_extent_phr_pdrw_read(struct skc_extent_phr_pdrw * const extent,
124                          cl_command_queue             const cq,
125                          cl_event                   * const event)
126 {
127   if (extent->size == 0)
128     return;
129 
130   cl(EnqueueReadBuffer(cq,
131                        extent->drw,
132                        CL_FALSE,
133                        0,
134                        extent->size,
135                        extent->hr,
136                        0,NULL,event));
137 }
138 
139 void
skc_extent_phr_pdrw_zero(struct skc_extent_phr_pdrw * const extent,cl_command_queue const cq,cl_event * const event)140 skc_extent_phr_pdrw_zero(struct skc_extent_phr_pdrw * const extent,
141                          cl_command_queue             const cq,
142                          cl_event                   * const event)
143 {
144   if (extent->size == 0)
145     return;
146 
147   skc_uint const zero = 0;
148 
149   cl(EnqueueFillBuffer(cq,
150                        extent->drw,
151                        &zero,
152                        sizeof(zero),
153                        0,
154                        extent->size,
155                        0,NULL,event));
156 }
157 
158 //
159 // EPHEMERAL SMALL EXTENTS BACKING ATOMICS
160 //
161 
162 void
skc_extent_thr_tdrw_alloc(struct skc_runtime * const runtime,struct skc_extent_thr_tdrw * const extent,size_t const size)163 skc_extent_thr_tdrw_alloc(struct skc_runtime         * const runtime,
164                           struct skc_extent_thr_tdrw * const extent,
165                           size_t                       const size)
166 {
167   extent->size = size;
168   extent->hr   = skc_runtime_host_temp_alloc(runtime,
169                                              SKC_MEM_FLAGS_READ_ONLY,
170                                              size,&extent->id.hr,NULL);
171   extent->drw  = skc_runtime_device_temp_alloc(runtime,
172                                                CL_MEM_READ_WRITE,
173                                                size,
174                                                &extent->id.drw,
175                                                NULL);
176 }
177 
178 void
skc_extent_thr_tdrw_free(struct skc_runtime * const runtime,struct skc_extent_thr_tdrw * const extent)179 skc_extent_thr_tdrw_free(struct skc_runtime         * const runtime,
180                          struct skc_extent_thr_tdrw * const extent)
181 {
182   skc_runtime_host_temp_free(runtime,extent->hr,extent->id.hr);
183   skc_runtime_device_temp_free(runtime,extent->drw,extent->id.drw);
184 }
185 
186 void
skc_extent_thr_tdrw_read(struct skc_extent_thr_tdrw * const extent,cl_command_queue const cq,cl_event * const event)187 skc_extent_thr_tdrw_read(struct skc_extent_thr_tdrw * const extent,
188                          cl_command_queue             const cq,
189                          cl_event                   * const event)
190 {
191   if (extent->size == 0)
192     return;
193 
194   cl(EnqueueReadBuffer(cq,
195                        extent->drw,
196                        CL_FALSE,
197                        0,
198                        extent->size,
199                        extent->hr,
200                        0,NULL,event));
201 }
202 
203 void
skc_extent_thr_tdrw_zero(struct skc_extent_thr_tdrw * const extent,cl_command_queue const cq,cl_event * const event)204 skc_extent_thr_tdrw_zero(struct skc_extent_thr_tdrw * const extent,
205                          cl_command_queue             const cq,
206                          cl_event                   * const event)
207 {
208   if (extent->size == 0)
209     return;
210 
211   skc_uint const zero = 0;
212 
213   cl(EnqueueFillBuffer(cq,
214                        extent->drw,
215                        &zero,
216                        sizeof(zero),
217                        0,
218                        extent->size,
219                        0,NULL,event));
220 }
221 
222 //
223 // DURABLE W/1 HOST RING WITH AN EPHEMERAL R/N DEVICE SNAPSHOT
224 //
225 
226 void
skc_extent_phw1g_tdrNs_alloc(struct skc_runtime * const runtime,struct skc_extent_phw1g_tdrNs * const extent,size_t const size)227 skc_extent_phw1g_tdrNs_alloc(struct skc_runtime            * const runtime,
228                              struct skc_extent_phw1g_tdrNs * const extent,
229                              size_t                          const size)
230 {
231   extent->hw1 = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_WRITE_ONLY,size);
232 }
233 
234 void
skc_extent_phw1g_tdrNs_free(struct skc_runtime * const runtime,struct skc_extent_phw1g_tdrNs * const extent)235 skc_extent_phw1g_tdrNs_free(struct skc_runtime            * const runtime,
236                             struct skc_extent_phw1g_tdrNs * const extent)
237 {
238   skc_runtime_host_perm_free(runtime,extent->hw1);
239 }
240 
241 void
skc_extent_phw1g_tdrNs_snap_init(struct skc_runtime * const runtime,struct skc_extent_ring * const ring,struct skc_extent_phw1g_tdrNs_snap * const snap)242 skc_extent_phw1g_tdrNs_snap_init(struct skc_runtime                 * const runtime,
243                                  struct skc_extent_ring             * const ring,
244                                  struct skc_extent_phw1g_tdrNs_snap * const snap)
245 {
246   snap->snap = skc_extent_ring_snap_alloc(runtime,ring);
247 }
248 
249 void
skc_extent_phw1g_tdrNs_snap_alloc(struct skc_runtime * const runtime,struct skc_extent_phw1g_tdrNs * const extent,struct skc_extent_phw1g_tdrNs_snap * const snap,cl_command_queue const cq,cl_event * const event)250 skc_extent_phw1g_tdrNs_snap_alloc(struct skc_runtime                 * const runtime,
251                                   struct skc_extent_phw1g_tdrNs      * const extent,
252                                   struct skc_extent_phw1g_tdrNs_snap * const snap,
253                                   cl_command_queue                     const cq,
254                                   cl_event                           * const event)
255 {
256   struct skc_extent_ring const * const ring = snap->snap->ring;
257 
258   skc_uint const count = skc_extent_ring_snap_count(snap->snap);
259   size_t   const size  = count * ring->size.elem;
260 
261   snap->drN = skc_runtime_device_temp_alloc(runtime,
262                                             CL_MEM_READ_ONLY | CL_MEM_HOST_WRITE_ONLY,
263                                             size,&snap->id,NULL);
264 
265   if (count == 0)
266     return;
267 
268   // possibly two copies
269   skc_uint const index_lo  = snap->snap->reads & ring->size.mask;
270   skc_uint const count_max = ring->size.pow2 - index_lo;
271   skc_uint const count_lo  = min(count_max,count);
272   size_t   const bytes_lo  = count_lo * ring->size.elem;
273 
274   if (count > count_max)
275     {
276       skc_uint const bytes_hi = (count - count_max) * ring->size.elem;
277 
278       cl(EnqueueWriteBuffer(cq,
279                             snap->drN,
280                             CL_FALSE,
281                             bytes_lo,
282                             bytes_hi,
283                             extent->hw1, // offset_hi = 0
284                             0,NULL,NULL));
285     }
286 
287   size_t const offset_lo = index_lo * ring->size.elem;
288 
289   cl(EnqueueWriteBuffer(cq,
290                         snap->drN,
291                         CL_FALSE,
292                         0,
293                         bytes_lo,
294                         (skc_uchar*)extent->hw1 + offset_lo,
295                         0,NULL,event));
296 
297 }
298 
299 void
skc_extent_phw1g_tdrNs_snap_free(struct skc_runtime * const runtime,struct skc_extent_phw1g_tdrNs_snap * const snap)300 skc_extent_phw1g_tdrNs_snap_free(struct skc_runtime                 * const runtime,
301                                  struct skc_extent_phw1g_tdrNs_snap * const snap)
302 {
303   skc_runtime_device_temp_free(runtime,snap->drN,snap->id);
304   skc_extent_ring_snap_free(runtime,snap->snap);
305 }
306 
307 //
308 // DURABLE R/W HOST RING WITH AN EPHEMERAL R/N DEVICE SNAPSHOT
309 //
310 
311 void
skc_extent_phrwg_tdrNs_alloc(struct skc_runtime * const runtime,struct skc_extent_phrwg_tdrNs * const extent,size_t const size)312 skc_extent_phrwg_tdrNs_alloc(struct skc_runtime            * const runtime,
313                              struct skc_extent_phrwg_tdrNs * const extent,
314                              size_t                          const size)
315 {
316   extent->hrw = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,size); // WRITE-ONCE
317 }
318 
319 void
skc_extent_phrwg_tdrNs_free(struct skc_runtime * const runtime,struct skc_extent_phrwg_tdrNs * const extent)320 skc_extent_phrwg_tdrNs_free(struct skc_runtime            * const runtime,
321                             struct skc_extent_phrwg_tdrNs * const extent)
322 {
323   skc_runtime_host_perm_free(runtime,extent->hrw);
324 }
325 
326 void
skc_extent_phrwg_tdrNs_snap_init(struct skc_runtime * const runtime,struct skc_extent_ring * const ring,struct skc_extent_phrwg_tdrNs_snap * const snap)327 skc_extent_phrwg_tdrNs_snap_init(struct skc_runtime                 * const runtime,
328                                  struct skc_extent_ring             * const ring,
329                                  struct skc_extent_phrwg_tdrNs_snap * const snap)
330 {
331   snap->snap = skc_extent_ring_snap_alloc(runtime,ring);
332 }
333 
334 void
skc_extent_phrwg_tdrNs_snap_alloc(struct skc_runtime * const runtime,struct skc_extent_phrwg_tdrNs * const extent,struct skc_extent_phrwg_tdrNs_snap * const snap,cl_command_queue const cq,cl_event * const event)335 skc_extent_phrwg_tdrNs_snap_alloc(struct skc_runtime                 * const runtime,
336                                   struct skc_extent_phrwg_tdrNs      * const extent,
337                                   struct skc_extent_phrwg_tdrNs_snap * const snap,
338                                   cl_command_queue                     const cq,
339                                   cl_event                           * const event)
340 {
341   struct skc_extent_ring const * const ring = snap->snap->ring;
342 
343   skc_uint const count = skc_extent_ring_snap_count(snap->snap);
344   size_t   const size  = count * ring->size.elem;
345 
346   snap->drN = skc_runtime_device_temp_alloc(runtime,
347                                             CL_MEM_READ_ONLY | CL_MEM_HOST_WRITE_ONLY,
348                                             size,&snap->id,NULL);
349 
350   if (count == 0)
351     return;
352 
353   // possibly two copies
354   skc_uint const index_lo  = snap->snap->reads & ring->size.mask;
355   skc_uint const count_max = ring->size.pow2 - index_lo;
356   skc_uint const count_lo  = min(count_max,count);
357   size_t   const bytes_lo  = count_lo * ring->size.elem;
358 
359   if (count > count_max)
360     {
361       skc_uint const count_hi = count - count_max;
362       skc_uint const bytes_hi = count_hi * ring->size.elem;
363 
364       cl(EnqueueWriteBuffer(cq,
365                             snap->drN,
366                             CL_FALSE,
367                             bytes_lo,
368                             bytes_hi,
369                             extent->hrw, // offset_hi = 0
370                             0,NULL,NULL));
371     }
372 
373   size_t offset_lo = index_lo * ring->size.elem;
374 
375   cl(EnqueueWriteBuffer(cq,
376                         snap->drN,
377                         CL_FALSE,
378                         0,
379                         bytes_lo,
380                         (skc_uchar*)extent->hrw + offset_lo,
381                         0,NULL,event));
382 
383 }
384 
385 void
skc_extent_phrwg_tdrNs_snap_free(struct skc_runtime * const runtime,struct skc_extent_phrwg_tdrNs_snap * const snap)386 skc_extent_phrwg_tdrNs_snap_free(struct skc_runtime                 * const runtime,
387                                  struct skc_extent_phrwg_tdrNs_snap * const snap)
388 {
389   skc_runtime_device_temp_free(runtime,snap->drN,snap->id);
390   skc_extent_ring_snap_free(runtime,snap->snap);
391 }
392 
393 //
394 // DURABLE HOST R/W RING WITH AN EPHEMERAL HOST R/1 SNAPSHOT
395 //
396 // Note that because the ring and snapshot are both in host memory and
397 // the snapshot blocks progress until freed we can simply point the
398 // fake ephemeral snapshot at the ring's durable extent.
399 //
400 
401 void
skc_extent_phrwg_thr1s_alloc(struct skc_runtime * const runtime,struct skc_extent_phrwg_thr1s * const extent,size_t const size)402 skc_extent_phrwg_thr1s_alloc(struct skc_runtime            * const runtime,
403                              struct skc_extent_phrwg_thr1s * const extent,
404                              size_t                          const size)
405 {
406   extent->hrw = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,size); // WRITE-ONCE
407 }
408 
409 void
skc_extent_phrwg_thr1s_free(struct skc_runtime * const runtime,struct skc_extent_phrwg_thr1s * const extent)410 skc_extent_phrwg_thr1s_free(struct skc_runtime            * const runtime,
411                             struct skc_extent_phrwg_thr1s * const extent)
412 {
413   skc_runtime_host_perm_free(runtime,extent->hrw);
414 }
415 
416 void
skc_extent_phrwg_thr1s_snap_init(struct skc_runtime * const runtime,struct skc_extent_ring * const ring,struct skc_extent_phrwg_thr1s_snap * const snap)417 skc_extent_phrwg_thr1s_snap_init(struct skc_runtime                 * const runtime,
418                                  struct skc_extent_ring             * const ring,
419                                  struct skc_extent_phrwg_thr1s_snap * const snap)
420 {
421   snap->snap = skc_extent_ring_snap_alloc(runtime,ring);
422 }
423 
424 void
skc_extent_phrwg_thr1s_snap_alloc(struct skc_runtime * const runtime,struct skc_extent_phrwg_thr1s * const extent,struct skc_extent_phrwg_thr1s_snap * const snap)425 skc_extent_phrwg_thr1s_snap_alloc(struct skc_runtime                 * const runtime,
426                                   struct skc_extent_phrwg_thr1s      * const extent,
427                                   struct skc_extent_phrwg_thr1s_snap * const snap)
428 {
429   struct skc_extent_ring const * const ring = snap->snap->ring;
430 
431   skc_uint const count     = skc_extent_ring_snap_count(snap->snap);
432   skc_uint const index_lo  = snap->snap->reads & ring->size.mask;
433   skc_uint const count_max = ring->size.pow2 - index_lo;
434 
435   snap->count.lo = min(count_max,count);
436   snap->hr1.lo   = (skc_uchar*)extent->hrw + (index_lo * ring->size.elem);
437 
438   if (count > count_max)
439     {
440       snap->count.hi = count - count_max;
441       snap->hr1.hi   = extent->hrw;
442     }
443   else
444     {
445       snap->count.hi = 0;
446       snap->hr1.hi   = NULL;
447     }
448 }
449 
450 void
skc_extent_phrwg_thr1s_snap_free(struct skc_runtime * const runtime,struct skc_extent_phrwg_thr1s_snap * const snap)451 skc_extent_phrwg_thr1s_snap_free(struct skc_runtime                 * const runtime,
452                                  struct skc_extent_phrwg_thr1s_snap * const snap)
453 {
454   skc_extent_ring_snap_free(runtime,snap->snap);
455 }
456 
457 //
458 //
459 //
460