• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can
5  * be found in the LICENSE file.
6  *
7  */
8 
9 //
10 // There is either no or limited support for C11 atomics in the C
11 // compilers we're targeting so, for now, implement this in C++11
12 //
13 
14 //
15 // FIXME -- get rid of the extent and have supplicants bring their own
16 // task structure with a known base structure
17 //
18 
19 extern "C" {
20 
21 #include "scheduler.h"
22 #include "runtime_cl_12.h" // FIXME -- all allocations are extent structures
23 
24 }
25 
26 //
27 // SUPER-IMPORTANT:
28 //
29 // THIS LOW-LEVEL SCHEDULER ASSUMES THERE ARE A MAXIMUM NUMBER OF
30 // PIPELINE GRIDS ACTIVE AT ANY TIME
31 //
32 // THIS IS A SAFE INVARIANT BECAUSE WE CONTROL THE VARIOUS POOL SIZE
33 // KNOBS AT CONTEXT CONFIGURATION TIME
34 //
35 // TRANSLATION: DO NOT LARD UP THIS IMPLEMENTATION WITH WELL-INTENDED
36 // BUT ENTIRELY UNNECESSARY LOGIC
37 //
38 
39 #include <mutex>
40 #include <condition_variable>
41 
42 //
43 // GRID STATES
44 //
45 
46 typedef enum skc_scheduler_command_state {
47 
48   SKC_SCHEDULER_COMMAND_STATE_READY,
49   SKC_SCHEDULER_COMMAND_STATE_WAITING,
50   SKC_SCHEDULER_COMMAND_STATE_EXECUTING,
51   SKC_SCHEDULER_COMMAND_STATE_COMPLETED,
52 
53   SKC_SCHEDULER_COMMAND_STATE_COUNT
54 
55 } skc_scheduler_command_state;
56 
57 //
58 //
59 //
60 
61 struct skc_scheduler_command
62 {
63   void *                      data;
64   skc_scheduler_command_pfn   pfn;
65   skc_scheduler_command_state state;
66   char const *                name;
67 };
68 
69 #if 0
70 struct skc_scheduler_command
71 {
72   union {
73     struct scheduler             * scheduler;
74     struct skc_scheduler_command * next;
75   };
76   skc_scheduler_command_pfn        pfn;
77 };
78 #endif
79 
80 //
81 //
82 //
83 
84 struct skc_scheduler
85 {
86   //
87   // FIXME -- consider adding a backpointer to the runtime or other
88   // critical state
89   //
90 
91   struct skc_scheduler_command * extent;
92 
93   struct {
94     std::mutex                   mutex;
95 
96     skc_ushort                 * indices;
97     skc_uint                     rem;
98   } available;
99 
100   struct {
101     std::mutex                   mutex;
102     std::condition_variable      condvar;
103 
104     skc_ushort                 * indices;
105     skc_uint                     size;
106     skc_uint                     head;
107     skc_uint                     tail;
108   } waiting;
109 };
110 
111 //
112 //
113 //
114 
115 #if 1
116 #define SKC_SCHEDULER_EXECUTE(sc) \
117   sc->pfn(sc->data)
118 #else
119 #define SKC_SCHEDULER_EXECUTE(sc)               \
120   fprintf(stderr,"EXECUTE+ %s\n",sc->name);     \
121   sc->pfn(sc->data);                            \
122   fprintf(stderr,"EXECUTE- %s\n",sc->name);
123 #endif
124 
125 //
126 //
127 //
128 
129 struct skc_scheduler *
skc_scheduler_create(struct skc_runtime * const runtime,skc_uint const size)130 skc_scheduler_create(struct skc_runtime * const runtime, skc_uint const size)
131 {
132   // allocate scheduler
133   struct skc_scheduler * scheduler = (struct skc_scheduler*)
134     skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,sizeof(*scheduler));
135 
136   // execute various std:atomic constructors
137   new (scheduler) skc_scheduler();
138 
139   // initialize members
140   scheduler->extent            = (struct skc_scheduler_command*)
141     skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,(sizeof(*scheduler->extent) * size));
142 
143   scheduler->available.indices = (skc_ushort*)
144     skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,sizeof(*scheduler->available.indices) * size);
145 
146   scheduler->available.rem     = size;
147 
148   scheduler->waiting.indices   = (skc_ushort*)
149     skc_runtime_host_perm_alloc(runtime,SKC_MEM_FLAGS_READ_WRITE,sizeof(*scheduler->available.indices) * (size + 1));
150 
151   scheduler->waiting.size      = size + 1; // the ring has an extra slot
152   scheduler->waiting.head      = 0;
153   scheduler->waiting.tail      = 0;
154 
155   for (skc_uint ii=0; ii<size; ii++)
156     scheduler->available.indices[ii] = ii;
157 
158   return scheduler;
159 }
160 
161 void
skc_scheduler_dispose(struct skc_runtime * const runtime,struct skc_scheduler * const scheduler)162 skc_scheduler_dispose(struct skc_runtime   * const runtime,
163                       struct skc_scheduler * const scheduler)
164 {
165   // free members
166   skc_runtime_host_perm_free(runtime,scheduler->waiting.indices);
167   skc_runtime_host_perm_free(runtime,scheduler->available.indices);
168   skc_runtime_host_perm_free(runtime,scheduler->extent);
169 
170   // execute various std:atomic destructors
171   scheduler->~skc_scheduler();
172 
173   // free struct
174   skc_runtime_host_perm_free(runtime,scheduler);
175 }
176 
177 //
178 //
179 //
180 
181 static
182 skc_scheduler_command_t
skc_scheduler_acquire(struct skc_scheduler * const scheduler,skc_scheduler_command_pfn pfn,void * data,char const * name)183 skc_scheduler_acquire(struct skc_scheduler * const scheduler,
184                       skc_scheduler_command_pfn    pfn,
185                       void                       * data,
186                       char const                 * name)
187 {
188   skc_scheduler_command_t command = SKC_SCHEDULER_COMMAND_INVALID;
189 
190   {
191     // mutex lock
192     std::lock_guard<std::mutex> lock(scheduler->available.mutex);
193 
194     // get first available index
195     if (scheduler->available.rem > 0)
196       command = scheduler->available.indices[--scheduler->available.rem];
197 
198     // mutex unlock
199   }
200 
201   if (command != SKC_SCHEDULER_COMMAND_INVALID)
202     {
203       // initialize command
204       struct skc_scheduler_command * const sc = scheduler->extent + command;
205 
206       sc->pfn   = pfn;
207       sc->data  = data;
208       sc->name  = name;
209       sc->state = SKC_SCHEDULER_COMMAND_STATE_READY;
210     }
211 
212   // return command handle
213   return command;
214 }
215 
216 //
217 //
218 //
219 
220 static
221 void
skc_scheduler_release(struct skc_scheduler * const scheduler,skc_scheduler_command_t const command)222 skc_scheduler_release(struct skc_scheduler  * const scheduler,
223                       skc_scheduler_command_t const command)
224 {
225   // mutex lock
226   std::lock_guard<std::mutex> lock(scheduler->available.mutex);
227 
228   // get first available index
229   scheduler->available.indices[scheduler->available.rem++] = command;
230 
231   // mutex unlock
232 }
233 
234 //
235 //
236 //
237 
238 static
239 void
skc_scheduler_append(struct skc_scheduler * const scheduler,skc_scheduler_command_t const command)240 skc_scheduler_append(struct skc_scheduler  * const scheduler,
241                      skc_scheduler_command_t const command)
242 {
243   scheduler->extent[command].state = SKC_SCHEDULER_COMMAND_STATE_WAITING;
244 
245   {
246     // mutex unique lock (locks on construction)
247     std::unique_lock<std::mutex> lock(scheduler->waiting.mutex);
248 
249     // note that we guarantee there is always room to store the command
250 
251     // append index to ring
252     scheduler->waiting.indices[scheduler->waiting.tail] = command;
253 
254     // update last
255     if (++scheduler->waiting.tail == scheduler->waiting.size)
256       scheduler->waiting.tail = 0;
257 
258     // mutex unlock
259   }
260 
261   // signal condvar
262   scheduler->waiting.condvar.notify_one();
263 }
264 
265 //
266 //
267 //
268 
269 skc_scheduler_command_t
skc_scheduler_schedule(struct skc_scheduler * const scheduler,skc_scheduler_command_pfn const pfn,void * data,char const * const name)270 skc_scheduler_schedule(struct skc_scheduler    * const scheduler,
271                        skc_scheduler_command_pfn const pfn,
272                        void                    *       data,
273                        char              const * const name)
274 {
275   while (true)
276     {
277       skc_scheduler_command_t const command = skc_scheduler_acquire(scheduler,pfn,data,name);
278 
279       if (command != SKC_SCHEDULER_COMMAND_INVALID)
280         {
281           skc_scheduler_append(scheduler,command);
282 
283           return command;
284         }
285       else
286         {
287           skc_scheduler_wait(scheduler);
288         }
289     }
290 }
291 
292 //
293 // try to pop but don't wait
294 //
295 
296 static
297 void
skc_scheduler_pop(struct skc_scheduler * const scheduler,skc_scheduler_command_t * const command)298 skc_scheduler_pop(struct skc_scheduler    * const scheduler,
299                   skc_scheduler_command_t * const command)
300 {
301   *command = SKC_SCHEDULER_COMMAND_INVALID;
302 
303   // mutex lock
304   std::unique_lock<std::mutex> lock(scheduler->waiting.mutex);
305 
306   if (scheduler->waiting.head != scheduler->waiting.tail)
307     {
308       // get index at first
309       *command = scheduler->waiting.indices[scheduler->waiting.head];
310 
311       // update first
312       if (++scheduler->waiting.head == scheduler->waiting.size)
313         scheduler->waiting.head = 0;
314     }
315 
316   // mutex unlock
317 }
318 
319 static
320 void
skc_scheduler_pop_wait(struct skc_scheduler * const scheduler,skc_scheduler_command_t * const command)321 skc_scheduler_pop_wait(struct skc_scheduler    * const scheduler,
322                        skc_scheduler_command_t * const command)
323 {
324   // mutex unique lock -- locks on construction
325   std::unique_lock<std::mutex> lock(scheduler->waiting.mutex);
326 
327   // wait for command
328   scheduler->waiting.condvar.wait(lock,[scheduler] {
329       return scheduler->waiting.head != scheduler->waiting.tail;
330     });
331 
332   // get index at first
333   *command = scheduler->waiting.indices[scheduler->waiting.head];
334 
335   // update first
336   if (++scheduler->waiting.head == scheduler->waiting.size)
337     scheduler->waiting.head = 0;
338 
339   // mutex unlock
340 }
341 
342 //
343 //
344 //
345 
346 static
347 void
skc_scheduler_command_execute(struct skc_scheduler_command * const sc)348 skc_scheduler_command_execute(struct skc_scheduler_command * const sc)
349 {
350   sc->state = SKC_SCHEDULER_COMMAND_STATE_EXECUTING;
351 
352   SKC_SCHEDULER_EXECUTE(sc);
353 
354   sc->state = SKC_SCHEDULER_COMMAND_STATE_COMPLETED;
355 }
356 
357 static
358 void
skc_scheduler_execute(struct skc_scheduler * const scheduler,skc_scheduler_command_t const command)359 skc_scheduler_execute(struct skc_scheduler  * const scheduler,
360                       skc_scheduler_command_t const command)
361 
362 {
363   // execute
364   skc_scheduler_command_execute(scheduler->extent + command);
365 
366   // release
367   skc_scheduler_release(scheduler,command);
368 }
369 
370 //
371 // drain the scheduler
372 //
373 
374 skc_bool
skc_scheduler_yield(struct skc_scheduler * const scheduler)375 skc_scheduler_yield(struct skc_scheduler * const scheduler) // wait for 0 or more completed grids
376 {
377   // fprintf(stderr,"YIELD+\n");
378 
379   while (true)
380     {
381       skc_scheduler_command_t command;
382 
383       skc_scheduler_pop(scheduler,&command);
384 
385       if (command == SKC_SCHEDULER_COMMAND_INVALID) {
386         // fprintf(stderr,"YIELD!\n");
387         return false;
388       }
389 
390       // otherwise execute the completion record
391       skc_scheduler_execute(scheduler,command);
392     }
393 
394   // fprintf(stderr,"YIELD-\n");
395 
396   return true;
397 }
398 
399 //
400 // wait for at least one grid to complete
401 //
402 
403 void
skc_scheduler_wait(struct skc_scheduler * const scheduler)404 skc_scheduler_wait(struct skc_scheduler * const scheduler)
405 {
406   // fprintf(stderr,"WAIT+\n");
407 
408   skc_scheduler_command_t command;
409 
410   // wait for a completion record
411   skc_scheduler_pop_wait(scheduler,&command);
412 
413   // execute the completion record
414   skc_scheduler_execute(scheduler,command);
415 
416   // process remaining
417   skc_scheduler_yield(scheduler);
418 
419   // fprintf(stderr,"WAIT-\n");
420 }
421 
422 //
423 // wait for one grid to complete
424 //
425 
426 void
skc_scheduler_wait_one(struct skc_scheduler * const scheduler)427 skc_scheduler_wait_one(struct skc_scheduler * const scheduler)
428 {
429   // fprintf(stderr,"WAIT1+\n");
430 
431   skc_scheduler_command_t command;
432 
433   // wait for a completion record
434   skc_scheduler_pop_wait(scheduler,&command);
435 
436   // execute the completion record
437   skc_scheduler_execute(scheduler,command);
438 
439   // fprintf(stderr,"WAIT1-\n");
440 }
441 
442 //
443 //
444 //
445 
446 #if 0
447 
448 //
449 // wait for a specific grid to complete
450 //
451 //   true  : success
452 //   false : command wasn't started
453 //
454 // FIXME -- get rid of this idiom
455 //
456 
457 skc_bool
458 skc_scheduler_wait_for(struct skc_scheduler  * const scheduler,
459                        skc_scheduler_command_t const command)
460 {
461   struct skc_scheduler_command * const sc = scheduler->extent + command;
462 
463   // command not started
464   if (sc->state == SKC_SCHEDULER_COMMAND_STATE_READY)
465     return false; // SKC_ERR_COMMAND_NOT_STARTED;
466 
467   // command is already complete
468   if (sc->state == SKC_SCHEDULER_COMMAND_STATE_COMPLETED)
469     return true; // SKC_ERR_SUCCESS;
470 
471   // force wip grids to start
472   // skc_grid_force(grid_wait_for);
473 
474   // otherwise, wait!
475   while (true)
476     {
477       skc_scheduler_command_t next;
478 
479       // wait for a completion record
480       skc_scheduler_pop_wait(scheduler,&next);
481 
482       // execute the completion record
483       skc_scheduler_execute(scheduler,next);
484 
485       // return if this was a match
486       if (next == command)
487         return true; // SKC_ERR_SUCCESS;
488     }
489 }
490 
491 //
492 //
493 //
494 
495 void
496 skc_thread_sleep(skc_ulong const msecs)
497 {
498   std::this_thread::sleep_for(std::chrono::milliseconds(msecs));
499 }
500 
501 #endif
502 
503 //
504 //
505 //
506