1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can
5 * be found in the LICENSE file.
6 *
7 */
8
9 //
10 //
11 //
12
13 #include <stdio.h>
14 #include <assert.h>
15
16 //
17 //
18 //
19
20 #include "common/cl/assert_cl.h"
21
22 #include "block.h"
23 #include "grid.h"
24 #include "config_cl.h"
25 #include "runtime_cl_12.h"
26
27 //
28 // FIXME -- these comments are now quite stale
29 //
30 //
31 // HANDLE/ACQUIRE RELEASE
32 //
33 // The runtime vends handles just in case we decide to exploit shared
34 // virtual memory. But for most platforms and devices we will have a
35 // pool of host-managed handles and on the device there will be a
36 // table that maps the host handle to a device-managed memory block.
37 //
38 // HANDLE READINESS
39 //
40 // A host handle may reference a path or a raster which is not ready
41 // for use further down the pipeline because it hasn't yet been
42 // processed by the device.
43 //
44 // The simplest scheme for providing every handle a readiness state is
45 // to build a map that that marks a new handle as being not-ready
46 // while being processed by a particular grid id. When the final
47 // sub-pipeline grid responsible for the path or raster is complete,
48 // then mark the handle as being ready and eventually return the grid
49 // id back to the pool. This can be performed on a separate thread.
50 //
51 // The side-benefit of this approach is that a handle's reference
52 // count integral type can spare some bits for its associated grid id.
53 //
54 // A more memory-intensive approach uses a 64-bit epoch+grid key and
55 // relies on the ~56 bits of epoch space to avoid any post
56 // sub-pipeline status update by assuming that a handle and grid will
57 // match or mismatch when queried.
58 //
59
60 #define SKC_HANDLE_REFCNT_HOST_BITS (SKC_MEMBER_SIZE(union skc_handle_refcnt,h) * 8)
61 #define SKC_HANDLE_REFCNT_DEVICE_BITS (SKC_MEMBER_SIZE(union skc_handle_refcnt,d) * 8)
62
63 #define SKC_HANDLE_REFCNT_HOST_MAX SKC_BITS_TO_MASK(SKC_HANDLE_REFCNT_HOST_BITS)
64 #define SKC_HANDLE_REFCNT_DEVICE_MAX SKC_BITS_TO_MASK(SKC_HANDLE_REFCNT_DEVICE_BITS)
65
66 //
67 //
68 //
69
70 static
71 void
skc_handle_reclaim_create(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool,skc_handle_reclaim_type_e const reclaim_type,skc_device_kernel_id const kernel_id)72 skc_handle_reclaim_create(struct skc_runtime * const runtime,
73 struct skc_handle_pool * const handle_pool,
74 skc_handle_reclaim_type_e const reclaim_type,
75 skc_device_kernel_id const kernel_id)
76 {
77 struct skc_handle_reclaim * const reclaim = handle_pool->reclaim + reclaim_type;
78
79 // init counters
80 reclaim->bih.rem = 0;
81
82 // acquire kernel
83 reclaim->kernel = skc_device_acquire_kernel(runtime->device,kernel_id);
84 reclaim->kernel_id = kernel_id;
85
86 // set default args
87 cl(SetKernelArg(reclaim->kernel,0,SKC_CL_ARG(runtime->block_pool.ids.drw)));
88 cl(SetKernelArg(reclaim->kernel,1,SKC_CL_ARG(runtime->block_pool.blocks.drw)));
89 cl(SetKernelArg(reclaim->kernel,2,SKC_CL_ARG(runtime->block_pool.atomics.drw)));
90 cl(SetKernelArg(reclaim->kernel,3,SKC_CL_ARG(runtime->config->block_pool.ring_mask)));
91 cl(SetKernelArg(reclaim->kernel,4,SKC_CL_ARG(runtime->handle_pool.map.drw)));
92 }
93
94 static
95 void
skc_handle_reclaim_dispose(struct skc_runtime * const runtime,skc_handle_reclaim_type_e const reclaim_type)96 skc_handle_reclaim_dispose(struct skc_runtime * const runtime,
97 skc_handle_reclaim_type_e const reclaim_type)
98 {
99 struct skc_handle_reclaim * const reclaim = runtime->handle_pool.reclaim + reclaim_type;
100
101 cl(ReleaseKernel(reclaim->kernel));
102 }
103
104 //
105 //
106 //
107
108 #define SKC_HANDLE_POOL_BLOCKS_PAD 8
109
110 void
skc_handle_pool_create(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool,skc_uint const size,skc_uint const width,skc_uint const recs)111 skc_handle_pool_create(struct skc_runtime * const runtime,
112 struct skc_handle_pool * const handle_pool,
113 skc_uint const size,
114 skc_uint const width,
115 skc_uint const recs)
116 {
117 skc_uint const blocks = (size + width - 1) / width;
118 skc_uint const blocks_padded = blocks + SKC_HANDLE_POOL_BLOCKS_PAD;
119 skc_uint const handles = blocks * width;
120 skc_uint const handles_padded = blocks_padded * width;
121 skc_uint const recs_padded = recs + 2; // one for pointer and one for head node
122
123 skc_extent_pdrw_alloc(runtime,&handle_pool->map,handles * sizeof(skc_block_id_t));
124
125 handle_pool->handle.indices = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,handles_padded * sizeof(*handle_pool->handle.indices));
126 handle_pool->handle.refcnts = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,handles * sizeof(*handle_pool->handle.refcnts));
127 handle_pool->block.indices = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,blocks_padded * sizeof(*handle_pool->block.indices));
128 handle_pool->recs = skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,recs_padded * sizeof(*handle_pool->recs));
129
130 // initialize handles and refcnts
131 for (skc_uint ii=0; ii<handles; ii++)
132 handle_pool->handle.indices[ii] = ii;
133
134 for (skc_uint ii=0; ii<handles; ii++)
135 handle_pool->handle.refcnts[ii].hd = 0;
136
137 handle_pool->handle.count = handles;
138
139 // initialize block accounting
140 for (skc_uint ii=0; ii<blocks_padded; ii++)
141 handle_pool->block.indices[ii] = ii;
142
143 handle_pool->block.count = blocks_padded;
144 handle_pool->block.width = width;
145
146 handle_pool->block.tos = blocks; // pop = pre-decrement / push = post-increment
147 handle_pool->block.bos = blocks; // pop = post-increment / push = pre-decrement
148
149 // initialize recs -- first two elements are interpreted differently
150 handle_pool->recs[0].runtime = runtime;
151 handle_pool->recs[1] = (union skc_handle_reclaim_rec){ .rem = recs, .head = 2 };
152
153 for (skc_uint ii=2; ii<recs_padded; ii++)
154 handle_pool->recs[ii] = (union skc_handle_reclaim_rec){ .index = ii, .next = ii+1 };
155
156 handle_pool->recs[recs_padded-1].next = SKC_UINT_MAX;
157
158 // initialize acquire
159 handle_pool->acquire.rem = 0;
160
161 // create reclaimers
162 skc_handle_reclaim_create(runtime,
163 handle_pool,
164 SKC_HANDLE_RECLAIM_TYPE_PATH,
165 SKC_DEVICE_KERNEL_ID_PATHS_RECLAIM);
166
167 skc_handle_reclaim_create(runtime,
168 handle_pool,
169 SKC_HANDLE_RECLAIM_TYPE_RASTER,
170 SKC_DEVICE_KERNEL_ID_RASTERS_RECLAIM);
171 }
172
173 //
174 //
175 //
176
177 void
skc_handle_pool_dispose(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool)178 skc_handle_pool_dispose(struct skc_runtime * const runtime,
179 struct skc_handle_pool * const handle_pool)
180 {
181 skc_handle_reclaim_dispose(runtime,SKC_HANDLE_RECLAIM_TYPE_RASTER);
182 skc_handle_reclaim_dispose(runtime,SKC_HANDLE_RECLAIM_TYPE_PATH);
183
184 skc_runtime_host_perm_free(runtime,handle_pool->recs);
185 skc_runtime_host_perm_free(runtime,handle_pool->block.indices);
186 skc_runtime_host_perm_free(runtime,handle_pool->handle.refcnts);
187 skc_runtime_host_perm_free(runtime,handle_pool->handle.indices);
188
189 skc_extent_pdrw_free(runtime,&handle_pool->map);
190 }
191
192 //
193 //
194 //
195
196 static
197 skc_uint
skc_handle_pool_block_readable_pop(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool)198 skc_handle_pool_block_readable_pop(struct skc_runtime * const runtime,
199 struct skc_handle_pool * const handle_pool)
200 {
201 SKC_SCHEDULER_WAIT_WHILE(runtime->scheduler,handle_pool->block.tos == 0);
202
203 skc_uint const index = handle_pool->block.indices[--handle_pool->block.tos];
204
205 #if 0
206 skc_handle_t * handles = handle_pool->handle.indices + (index + 1) * handle_pool->block.width;
207 for (skc_uint ii=0; ii<handle_pool->block.width; ii++)
208 printf("R-: %u\n",*--handles);
209 #endif
210
211 return index;
212 }
213
214 static
215 void
skc_handle_pool_block_readable_push(struct skc_handle_pool * const handle_pool,skc_uint const index)216 skc_handle_pool_block_readable_push(struct skc_handle_pool * const handle_pool,
217 skc_uint const index)
218 {
219 handle_pool->block.indices[handle_pool->block.tos++] = index;
220
221 #if 0
222 skc_handle_t * handles = handle_pool->handle.indices + (index + 1) * handle_pool->block.width;
223 for (skc_uint ii=0; ii<handle_pool->block.width; ii++)
224 printf("R+: %u\n",*--handles);
225 #endif
226 }
227
228
229 static
230 skc_uint
skc_handle_pool_block_writable_pop(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool)231 skc_handle_pool_block_writable_pop(struct skc_runtime * const runtime,
232 struct skc_handle_pool * const handle_pool)
233 {
234 SKC_SCHEDULER_WAIT_WHILE(runtime->scheduler,handle_pool->block.bos == handle_pool->block.count);
235
236 return handle_pool->block.indices[handle_pool->block.bos++];
237 }
238
239 static
240 void
skc_handle_pool_block_writable_push(struct skc_handle_pool * const handle_pool,skc_uint const block_idx)241 skc_handle_pool_block_writable_push(struct skc_handle_pool * const handle_pool,
242 skc_uint const block_idx)
243 {
244 handle_pool->block.indices[--handle_pool->block.bos] = block_idx;
245 }
246
247 //
248 // May need to acquire the path or raster handle *early* just to be
249 // sure one exists
250 //
251
252 skc_handle_t
skc_runtime_handle_device_acquire(struct skc_runtime * const runtime)253 skc_runtime_handle_device_acquire(struct skc_runtime * const runtime)
254 {
255 struct skc_handle_pool * const handle_pool = &runtime->handle_pool;
256
257 // acquire a block of handles at a time
258 if (handle_pool->acquire.rem == 0)
259 {
260 skc_uint const block_idx = skc_handle_pool_block_readable_pop(runtime,handle_pool);
261
262 handle_pool->acquire.block = block_idx;
263 handle_pool->acquire.rem = handle_pool->block.width;
264 handle_pool->acquire.handles = handle_pool->handle.indices + (block_idx + 1) * handle_pool->block.width;
265 }
266
267 // load handle from next block slot
268 skc_uint const rem = --handle_pool->acquire.rem;
269 skc_handle_t const handle = *--handle_pool->acquire.handles;
270
271 // initialize refcnt for handle
272 handle_pool->handle.refcnts[handle] = (union skc_handle_refcnt){ .h = 1, .d = 1 };
273
274 // if this was the last handle in the block then move the block id
275 // to the reclamation stack to be used as a scratchpad
276 if (rem == 0) {
277 skc_handle_pool_block_writable_push(handle_pool,handle_pool->acquire.block);
278 }
279
280 return handle;
281 }
282
283 //
284 //
285 //
286
287 static
288 void
skc_handle_reclaim_completion(union skc_handle_reclaim_rec * const recN)289 skc_handle_reclaim_completion(union skc_handle_reclaim_rec * const recN)
290 {
291 // get root rec which contains pointer to runtime
292 union skc_handle_reclaim_rec * const rec0 = recN - recN->index;
293 union skc_handle_reclaim_rec * const rec1 = rec0 + 1;
294
295 // return block for reading
296 skc_handle_pool_block_readable_push(&rec0->runtime->handle_pool,recN->block);
297
298 // recN is new head of list
299 recN->next = rec1->head;
300 rec1->head = recN->index;
301 rec1->rem += 1;
302 }
303
304 static
305 void
skc_handle_reclaim_cb(cl_event event,cl_int status,union skc_handle_reclaim_rec * const recN)306 skc_handle_reclaim_cb(cl_event event, cl_int status, union skc_handle_reclaim_rec * const recN)
307 {
308 SKC_CL_CB(status);
309
310 union skc_handle_reclaim_rec * const rec0 = recN - recN->index;
311
312 // as quickly as possible, enqueue next stage in pipeline to context command scheduler
313 SKC_SCHEDULER_SCHEDULE(rec0->runtime->scheduler,skc_handle_reclaim_completion,recN);
314 }
315
316 //
317 // FIXME -- is there an issue launching on the host thread?
318 //
319
320 static
321 void
skc_handle_reclaim_launch(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool,struct skc_handle_reclaim * const reclaim,union skc_handle_reclaim_rec * const recN)322 skc_handle_reclaim_launch(struct skc_runtime * const runtime,
323 struct skc_handle_pool * const handle_pool,
324 struct skc_handle_reclaim * const reclaim,
325 union skc_handle_reclaim_rec * const recN)
326 {
327 cl(SetKernelArg(reclaim->kernel,
328 5,
329 handle_pool->block.width * sizeof(skc_handle_t),
330 reclaim->bih.handles));
331
332 // acquire a cq
333 cl_command_queue cq = skc_runtime_acquire_cq_in_order(runtime);
334
335 cl_event complete;
336
337 // the kernel grid is shaped by the target device
338 skc_device_enqueue_kernel(runtime->device,
339 reclaim->kernel_id,
340 cq,
341 reclaim->kernel,
342 handle_pool->block.width,
343 0,NULL,&complete);
344
345 cl(SetEventCallback(complete,CL_COMPLETE,skc_handle_reclaim_cb,recN));
346 cl(ReleaseEvent(complete));
347
348 // kickstart kernel execution
349 cl(Flush(cq));
350
351 // release the cq
352 skc_runtime_release_cq_in_order(runtime,cq);
353 }
354
355 //
356 // reclaim a handle
357 //
358
359 static
360 union skc_handle_reclaim_rec *
skc_handle_acquire_reclaim_rec(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool)361 skc_handle_acquire_reclaim_rec(struct skc_runtime * const runtime,
362 struct skc_handle_pool * const handle_pool)
363 {
364 union skc_handle_reclaim_rec * const rec1 = handle_pool->recs + 1;
365
366 SKC_SCHEDULER_WAIT_WHILE(runtime->scheduler,rec1->rem == 0);
367
368 union skc_handle_reclaim_rec * const recN = handle_pool->recs + rec1->head;
369
370 rec1->head = recN->next;
371 rec1->rem -= 1;
372
373 // fprintf(stderr,"rec1->rem = %u\n",rec1->rem);
374
375 return recN;
376 }
377
378 static
379 void
skc_runtime_device_reclaim(struct skc_runtime * const runtime,struct skc_handle_pool * const handle_pool,struct skc_handle_reclaim * const reclaim,skc_handle_t const handle)380 skc_runtime_device_reclaim(struct skc_runtime * const runtime,
381 struct skc_handle_pool * const handle_pool,
382 struct skc_handle_reclaim * const reclaim,
383 skc_handle_t const handle)
384 {
385 // grab a new block?
386 if (reclaim->bih.rem == 0)
387 {
388 skc_uint const block_idx = skc_handle_pool_block_writable_pop(runtime,handle_pool);
389
390 reclaim->bih.block = block_idx;
391 reclaim->bih.rem = handle_pool->block.width;
392 reclaim->bih.handles = handle_pool->handle.indices + (block_idx + 1) * handle_pool->block.width;
393 }
394
395 // store handle -- handle's refcnt was already set to {0:0}
396 *--reclaim->bih.handles = handle;
397
398 // if block is full then launch reclamation kernel
399 if (--reclaim->bih.rem == 0)
400 {
401 union skc_handle_reclaim_rec * recN = skc_handle_acquire_reclaim_rec(runtime,handle_pool);
402
403 recN->block = reclaim->bih.block;
404
405 skc_handle_reclaim_launch(runtime,handle_pool,reclaim,recN);
406 }
407 }
408
409 //
410 // Validate host-provided handles before retaining.
411 //
412 // Retain validation consists of:
413 //
414 // - correct handle type
415 // - handle is in range of pool
416 // - host refcnt is not zero
417 // - host refcnt is not at the maximum value
418 //
419 // After validation, retain the handles for the host
420 //
421
422 static
423 skc_err
skc_runtime_handle_host_validated_retain(struct skc_runtime * const runtime,skc_typed_handle_type_e const handle_type,skc_typed_handle_t const * const typed_handles,uint32_t const count)424 skc_runtime_handle_host_validated_retain(struct skc_runtime * const runtime,
425 skc_typed_handle_type_e const handle_type,
426 skc_typed_handle_t const * const typed_handles,
427 uint32_t const count)
428 {
429 //
430 // FIXME -- test to make sure handles aren't completely out of range integers
431 //
432
433 union skc_handle_refcnt * const refcnts = runtime->handle_pool.handle.refcnts;
434
435 for (skc_uint ii=0; ii<count; ii++)
436 {
437 skc_typed_handle_t const typed_handle = typed_handles[ii];
438
439 if (!SKC_TYPED_HANDLE_IS_TYPE(typed_handle,handle_type))
440 {
441 return SKC_ERR_HANDLE_INVALID;
442 }
443 else
444 {
445 skc_handle_t const handle = SKC_TYPED_HANDLE_TO_HANDLE(typed_handle);
446
447 if (handle >= runtime->handle_pool.handle.count)
448 {
449 return SKC_ERR_HANDLE_INVALID;
450 }
451 else
452 {
453 union skc_handle_refcnt * const refcnt_ptr = refcnts + handle;
454 skc_uint const host = refcnt_ptr->h;
455
456 if (host == 0)
457 {
458 return SKC_ERR_HANDLE_INVALID;
459 }
460 else if (host == SKC_HANDLE_REFCNT_HOST_MAX)
461 {
462 return SKC_ERR_HANDLE_OVERFLOW;
463 }
464 }
465 }
466 }
467
468 //
469 // all the handles validated, so retain them all..
470 //
471 for (skc_uint ii=0; ii<count; ii++)
472 refcnts[SKC_TYPED_HANDLE_TO_HANDLE(typed_handles[ii])].h++;
473
474 return SKC_ERR_SUCCESS;
475 }
476
477 //
478 //
479 //
480
481 skc_err
skc_runtime_path_host_retain(struct skc_runtime * const runtime,skc_path_t const * paths,uint32_t count)482 skc_runtime_path_host_retain(struct skc_runtime * const runtime,
483 skc_path_t const * paths,
484 uint32_t count)
485 {
486 return skc_runtime_handle_host_validated_retain(runtime,
487 SKC_TYPED_HANDLE_TYPE_IS_PATH,
488 paths,
489 count);
490 }
491
492 skc_err
skc_runtime_raster_host_retain(struct skc_runtime * const runtime,skc_path_t const * rasters,uint32_t count)493 skc_runtime_raster_host_retain(struct skc_runtime * const runtime,
494 skc_path_t const * rasters,
495 uint32_t count)
496 {
497 return skc_runtime_handle_host_validated_retain(runtime,
498 SKC_TYPED_HANDLE_TYPE_IS_RASTER,
499 rasters,
500 count);
501 }
502
503 //
504 //
505 //
506
507 skc_err
skc_runtime_raster_host_flush(struct skc_runtime * const runtime,skc_raster_t const * rasters,uint32_t count)508 skc_runtime_raster_host_flush(struct skc_runtime * const runtime,
509 skc_raster_t const * rasters,
510 uint32_t count)
511 {
512 skc_grid_deps_force(runtime->deps,rasters,count);
513
514 return SKC_ERR_SUCCESS;
515 }
516
517 skc_err
skc_runtime_path_host_flush(struct skc_runtime * const runtime,skc_path_t const * paths,uint32_t count)518 skc_runtime_path_host_flush(struct skc_runtime * const runtime,
519 skc_path_t const * paths,
520 uint32_t count)
521 {
522 skc_grid_deps_force(runtime->deps,paths,count);
523
524 return SKC_ERR_SUCCESS;
525 }
526
527 //
528 // Validate host-provided handles before releasing.
529 //
530 // Release validation consists of:
531 //
532 // - correct handle type
533 // - handle is in range of pool
534 // - host refcnt is not zero
535 //
536 // After validation, release the handles for the host
537 //
538
539 static
540 skc_err
skc_runtime_host_validated_release(struct skc_runtime * const runtime,skc_typed_handle_type_e const type,skc_handle_reclaim_type_e const reclaim_type,skc_typed_handle_t const * const handles,uint32_t const count)541 skc_runtime_host_validated_release(struct skc_runtime * const runtime,
542 skc_typed_handle_type_e const type,
543 skc_handle_reclaim_type_e const reclaim_type,
544 skc_typed_handle_t const * const handles,
545 uint32_t const count)
546 {
547 struct skc_handle_pool * const handle_pool = &runtime->handle_pool;
548 union skc_handle_refcnt * const refcnts = handle_pool->handle.refcnts;
549
550 for (skc_uint ii=0; ii<count; ii++)
551 {
552 skc_typed_handle_t const typed_handle = handles[ii];
553
554 if (!SKC_TYPED_HANDLE_IS_TYPE(typed_handle,type))
555 {
556 return SKC_ERR_HANDLE_INVALID;
557 }
558 else
559 {
560 skc_handle_t const handle = SKC_TYPED_HANDLE_TO_HANDLE(typed_handle);
561
562 if (handle >= handle_pool->handle.count)
563 {
564 return SKC_ERR_HANDLE_INVALID;
565 }
566 else
567 {
568 union skc_handle_refcnt * const refcnt_ptr = refcnts + handle;
569 skc_uint const host = refcnt_ptr->h;
570
571 if (host == 0)
572 {
573 return SKC_ERR_HANDLE_INVALID;
574 }
575 }
576 }
577 }
578
579 //
580 // all the handles validated, so release them all..
581 //
582 struct skc_handle_reclaim * const reclaim = handle_pool->reclaim + reclaim_type;
583
584 for (skc_uint ii=0; ii<count; ii++)
585 {
586 skc_handle_t const handle = SKC_TYPED_HANDLE_TO_HANDLE(handles[ii]);
587 union skc_handle_refcnt * const refcnt_ptr = refcnts + handle;
588 union skc_handle_refcnt refcnt = *refcnt_ptr;
589
590 refcnt.h -= 1;
591 *refcnt_ptr = refcnt;
592
593 if (refcnt.hd == 0) {
594 skc_runtime_device_reclaim(runtime,handle_pool,reclaim,handle);
595 }
596 }
597
598 return SKC_ERR_SUCCESS;
599 }
600
601 //
602 //
603 //
604
605 skc_err
skc_runtime_path_host_release(struct skc_runtime * const runtime,skc_path_t const * paths,uint32_t count)606 skc_runtime_path_host_release(struct skc_runtime * const runtime,
607 skc_path_t const * paths,
608 uint32_t count)
609 {
610 return skc_runtime_host_validated_release(runtime,
611 SKC_TYPED_HANDLE_TYPE_IS_PATH,
612 SKC_HANDLE_RECLAIM_TYPE_PATH,
613 paths,
614 count);
615 }
616
617 skc_err
skc_runtime_raster_host_release(struct skc_runtime * const runtime,skc_raster_t const * rasters,uint32_t count)618 skc_runtime_raster_host_release(struct skc_runtime * const runtime,
619 skc_raster_t const * rasters,
620 uint32_t count)
621 {
622 return skc_runtime_host_validated_release(runtime,
623 SKC_TYPED_HANDLE_TYPE_IS_RASTER,
624 SKC_HANDLE_RECLAIM_TYPE_RASTER,
625 rasters,
626 count);
627 }
628
629 //
630 // Validate host-provided handles before retaining on the device.
631 //
632 // - correct handle type
633 // - handle is in range of pool
634 // - host refcnt is not zero
635 // - device refcnt is not at the maximum value
636 //
637
638 skc_err
skc_runtime_handle_device_validate_retain(struct skc_runtime * const runtime,skc_typed_handle_type_e const type,skc_typed_handle_t const * handles,uint32_t count)639 skc_runtime_handle_device_validate_retain(struct skc_runtime * const runtime,
640 skc_typed_handle_type_e const type,
641 skc_typed_handle_t const * handles,
642 uint32_t count)
643 {
644 union skc_handle_refcnt * const refcnts = runtime->handle_pool.handle.refcnts;
645
646 while (count-- > 0)
647 {
648 skc_typed_handle_t const typed_handle = *handles++;
649
650 if (!SKC_TYPED_HANDLE_IS_TYPE(typed_handle,type))
651 {
652 return SKC_ERR_HANDLE_INVALID;
653 }
654 else
655 {
656 skc_handle_t const handle = SKC_TYPED_HANDLE_TO_HANDLE(typed_handle);
657
658 if (handle >= runtime->handle_pool.handle.count)
659 {
660 return SKC_ERR_HANDLE_INVALID;
661 }
662 else
663 {
664 union skc_handle_refcnt * const refcnt_ptr = refcnts + handle;
665 union skc_handle_refcnt refcnt = *refcnt_ptr;
666
667 if (refcnt.h == 0)
668 {
669 return SKC_ERR_HANDLE_INVALID;
670 }
671 else if (refcnt.d == SKC_HANDLE_REFCNT_DEVICE_MAX)
672 {
673 return SKC_ERR_HANDLE_OVERFLOW;
674 }
675 }
676 }
677 }
678
679 return SKC_ERR_SUCCESS;
680 }
681
682 //
683 // After validation, retain the handles for the device
684 //
685
686 void
skc_runtime_handle_device_retain(struct skc_runtime * const runtime,skc_handle_t const * handles,uint32_t count)687 skc_runtime_handle_device_retain(struct skc_runtime * const runtime,
688 skc_handle_t const * handles,
689 uint32_t count)
690 {
691 union skc_handle_refcnt * const refcnts = runtime->handle_pool.handle.refcnts;
692
693 while (count-- > 0)
694 refcnts[SKC_TYPED_HANDLE_TO_HANDLE(*handles++)].d++;
695 }
696
697 //
698 // Release the device-held handles -- no validation required!
699 //
700
701 static
702 void
skc_runtime_handle_device_release(struct skc_runtime * const runtime,skc_handle_reclaim_type_e const reclaim_type,skc_handle_t const * handles,skc_uint count)703 skc_runtime_handle_device_release(struct skc_runtime * const runtime,
704 skc_handle_reclaim_type_e const reclaim_type,
705 skc_handle_t const * handles,
706 skc_uint count)
707 {
708 struct skc_handle_pool * const handle_pool = &runtime->handle_pool;
709 union skc_handle_refcnt * const refcnts = handle_pool->handle.refcnts;
710 struct skc_handle_reclaim * const reclaim = handle_pool->reclaim + reclaim_type;
711
712 while (count-- > 0) {
713 skc_handle_t const handle = *handles++;
714 union skc_handle_refcnt * const refcnt_ptr = refcnts + handle;
715 union skc_handle_refcnt refcnt = *refcnt_ptr;
716
717 refcnt.d -= 1;
718 *refcnt_ptr = refcnt;
719
720 #if 0
721 printf("%8u = { %u, %u }\n",handle,refcnt.h,refcnt.d);
722 #endif
723
724 if (refcnt.hd == 0) {
725 skc_runtime_device_reclaim(runtime,handle_pool,reclaim,handle);
726 }
727 }
728 }
729
730 //
731 //
732 //
733
734 void
skc_runtime_path_device_release(struct skc_runtime * const runtime,skc_handle_t const * handles,skc_uint count)735 skc_runtime_path_device_release(struct skc_runtime * const runtime,
736 skc_handle_t const * handles,
737 skc_uint count)
738 {
739 skc_runtime_handle_device_release(runtime,SKC_HANDLE_RECLAIM_TYPE_PATH,handles,count);
740 }
741
742 void
skc_runtime_raster_device_release(struct skc_runtime * const runtime,skc_handle_t const * handles,skc_uint count)743 skc_runtime_raster_device_release(struct skc_runtime * const runtime,
744 skc_handle_t const * handles,
745 skc_uint count)
746 {
747 skc_runtime_handle_device_release(runtime,SKC_HANDLE_RECLAIM_TYPE_RASTER,handles,count);
748 }
749
750 //
751 //
752 //
753