• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /* Test of gpr synchronization support. */
20 
21 #include <grpc/support/sync.h>
22 
23 #include <stdio.h>
24 #include <stdlib.h>
25 
26 #include <grpc/support/alloc.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/time.h>
29 
30 #include "src/core/lib/gprpp/thd.h"
31 #include "test/core/util/test_config.h"
32 
33 /* ==================Example use of interface===================
34 
35    A producer-consumer queue of up to N integers,
36    illustrating the use of the calls in this interface.  */
37 
38 #define N 4
39 
40 typedef struct queue {
41   gpr_cv non_empty; /* Signalled when length becomes non-zero. */
42   gpr_cv non_full;  /* Signalled when length becomes non-N. */
43   gpr_mu mu;        /* Protects all fields below.
44                        (That is, except during initialization or
45                        destruction, the fields below should be accessed
46                        only by a thread that holds mu.) */
47   int head;         /* Index of head of queue 0..N-1. */
48   int length;       /* Number of valid elements in queue 0..N. */
49   int elem[N];      /* elem[head .. head+length-1] are queue elements. */
50 } queue;
51 
52 /* Initialize *q. */
queue_init(queue * q)53 void queue_init(queue* q) {
54   gpr_mu_init(&q->mu);
55   gpr_cv_init(&q->non_empty);
56   gpr_cv_init(&q->non_full);
57   q->head = 0;
58   q->length = 0;
59 }
60 
61 /* Free storage associated with *q. */
queue_destroy(queue * q)62 void queue_destroy(queue* q) {
63   gpr_mu_destroy(&q->mu);
64   gpr_cv_destroy(&q->non_empty);
65   gpr_cv_destroy(&q->non_full);
66 }
67 
68 /* Wait until there is room in *q, then append x to *q. */
queue_append(queue * q,int x)69 void queue_append(queue* q, int x) {
70   gpr_mu_lock(&q->mu);
71   /* To wait for a predicate without a deadline, loop on the negation of the
72      predicate, and use gpr_cv_wait(..., gpr_inf_future(GPR_CLOCK_REALTIME))
73      inside the loop
74      to release the lock, wait, and reacquire on each iteration.  Code that
75      makes the condition true should use gpr_cv_broadcast() on the
76      corresponding condition variable.  The predicate must be on state
77      protected by the lock.  */
78   while (q->length == N) {
79     gpr_cv_wait(&q->non_full, &q->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
80   }
81   if (q->length == 0) { /* Wake threads blocked in queue_remove(). */
82     /* It's normal to use gpr_cv_broadcast() or gpr_signal() while
83        holding the lock. */
84     gpr_cv_broadcast(&q->non_empty);
85   }
86   q->elem[(q->head + q->length) % N] = x;
87   q->length++;
88   gpr_mu_unlock(&q->mu);
89 }
90 
91 /* If it can be done without blocking, append x to *q and return non-zero.
92    Otherwise return 0. */
queue_try_append(queue * q,int x)93 int queue_try_append(queue* q, int x) {
94   int result = 0;
95   if (gpr_mu_trylock(&q->mu)) {
96     if (q->length != N) {
97       if (q->length == 0) { /* Wake threads blocked in queue_remove(). */
98         gpr_cv_broadcast(&q->non_empty);
99       }
100       q->elem[(q->head + q->length) % N] = x;
101       q->length++;
102       result = 1;
103     }
104     gpr_mu_unlock(&q->mu);
105   }
106   return result;
107 }
108 
109 /* Wait until the *q is non-empty or deadline abs_deadline passes.  If the
110    queue is non-empty, remove its head entry, place it in *head, and return
111    non-zero.  Otherwise return 0.  */
queue_remove(queue * q,int * head,gpr_timespec abs_deadline)112 int queue_remove(queue* q, int* head, gpr_timespec abs_deadline) {
113   int result = 0;
114   gpr_mu_lock(&q->mu);
115   /* To wait for a predicate with a deadline, loop on the negation of the
116      predicate or until gpr_cv_wait() returns true.  Code that makes
117      the condition true should use gpr_cv_broadcast() on the corresponding
118      condition variable.  The predicate must be on state protected by the
119      lock. */
120   while (q->length == 0 && !gpr_cv_wait(&q->non_empty, &q->mu, abs_deadline)) {
121   }
122   if (q->length != 0) { /* Queue is non-empty. */
123     result = 1;
124     if (q->length == N) { /* Wake threads blocked in queue_append(). */
125       gpr_cv_broadcast(&q->non_full);
126     }
127     *head = q->elem[q->head];
128     q->head = (q->head + 1) % N;
129     q->length--;
130   } /* else deadline exceeded */
131   gpr_mu_unlock(&q->mu);
132   return result;
133 }
134 
135 /* ------------------------------------------------- */
136 /* Tests for gpr_mu and gpr_cv, and the queue example. */
137 struct test {
138   int nthreads; /* number of threads */
139   grpc_core::Thread* threads;
140 
141   int64_t iterations; /* number of iterations per thread */
142   int64_t counter;
143   int thread_count; /* used to allocate thread ids */
144   int done;         /* threads not yet completed */
145   int incr_step;    /* how much to increment/decrement refcount each time */
146 
147   gpr_mu mu; /* protects iterations, counter, thread_count, done */
148 
149   gpr_cv cv; /* signalling depends on test */
150 
151   gpr_cv done_cv; /* signalled when done == 0 */
152 
153   queue q;
154 
155   gpr_stats_counter stats_counter;
156 
157   gpr_refcount refcount;
158   gpr_refcount thread_refcount;
159   gpr_event event;
160 };
161 
162 /* Return pointer to a new struct test. */
test_new(int nthreads,int64_t iterations,int incr_step)163 static struct test* test_new(int nthreads, int64_t iterations, int incr_step) {
164   struct test* m = static_cast<struct test*>(gpr_malloc(sizeof(*m)));
165   m->nthreads = nthreads;
166   m->threads = static_cast<grpc_core::Thread*>(
167       gpr_malloc(sizeof(*m->threads) * nthreads));
168   m->iterations = iterations;
169   m->counter = 0;
170   m->thread_count = 0;
171   m->done = nthreads;
172   m->incr_step = incr_step;
173   gpr_mu_init(&m->mu);
174   gpr_cv_init(&m->cv);
175   gpr_cv_init(&m->done_cv);
176   queue_init(&m->q);
177   gpr_stats_init(&m->stats_counter, 0);
178   gpr_ref_init(&m->refcount, 0);
179   gpr_ref_init(&m->thread_refcount, nthreads);
180   gpr_event_init(&m->event);
181   return m;
182 }
183 
184 /* Return pointer to a new struct test. */
test_destroy(struct test * m)185 static void test_destroy(struct test* m) {
186   gpr_mu_destroy(&m->mu);
187   gpr_cv_destroy(&m->cv);
188   gpr_cv_destroy(&m->done_cv);
189   queue_destroy(&m->q);
190   gpr_free(m->threads);
191   gpr_free(m);
192 }
193 
194 /* Create m->nthreads threads, each running (*body)(m) */
test_create_threads(struct test * m,void (* body)(void * arg))195 static void test_create_threads(struct test* m, void (*body)(void* arg)) {
196   int i;
197   for (i = 0; i != m->nthreads; i++) {
198     m->threads[i] = grpc_core::Thread("grpc_create_threads", body, m);
199     m->threads[i].Start();
200   }
201 }
202 
203 /* Wait until all threads report done. */
test_wait(struct test * m)204 static void test_wait(struct test* m) {
205   gpr_mu_lock(&m->mu);
206   while (m->done != 0) {
207     gpr_cv_wait(&m->done_cv, &m->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
208   }
209   gpr_mu_unlock(&m->mu);
210   for (int i = 0; i != m->nthreads; i++) {
211     m->threads[i].Join();
212   }
213 }
214 
215 /* Get an integer thread id in the raneg 0..nthreads-1 */
thread_id(struct test * m)216 static int thread_id(struct test* m) {
217   int id;
218   gpr_mu_lock(&m->mu);
219   id = m->thread_count++;
220   gpr_mu_unlock(&m->mu);
221   return id;
222 }
223 
224 /* Indicate that a thread is done, by decrementing m->done
225    and signalling done_cv if m->done==0. */
mark_thread_done(struct test * m)226 static void mark_thread_done(struct test* m) {
227   gpr_mu_lock(&m->mu);
228   GPR_ASSERT(m->done != 0);
229   m->done--;
230   if (m->done == 0) {
231     gpr_cv_signal(&m->done_cv);
232   }
233   gpr_mu_unlock(&m->mu);
234 }
235 
236 /* Test several threads running (*body)(struct test *m) for increasing settings
237    of m->iterations, until about timeout_s to 2*timeout_s seconds have elapsed.
238    If extra!=NULL, run (*extra)(m) in an additional thread.
239    incr_step controls by how much m->refcount should be incremented/decremented
240    (if at all) each time in the tests.
241    */
test(const char * name,void (* body)(void * m),void (* extra)(void * m),int timeout_s,int incr_step)242 static void test(const char* name, void (*body)(void* m),
243                  void (*extra)(void* m), int timeout_s, int incr_step) {
244   int64_t iterations = 256;
245   struct test* m;
246   gpr_timespec start = gpr_now(GPR_CLOCK_REALTIME);
247   gpr_timespec time_taken;
248   gpr_timespec deadline = gpr_time_add(
249       start, gpr_time_from_micros(static_cast<int64_t>(timeout_s) * 1000000,
250                                   GPR_TIMESPAN));
251   fprintf(stderr, "%s:", name);
252   fflush(stderr);
253   while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0) {
254     fprintf(stderr, " %ld", static_cast<long>(iterations));
255     fflush(stderr);
256     m = test_new(10, iterations, incr_step);
257     grpc_core::Thread extra_thd;
258     if (extra != nullptr) {
259       extra_thd = grpc_core::Thread(name, extra, m);
260       extra_thd.Start();
261       m->done++; /* one more thread to wait for */
262     }
263     test_create_threads(m, body);
264     test_wait(m);
265     if (extra != nullptr) {
266       extra_thd.Join();
267     }
268     if (m->counter != m->nthreads * m->iterations * m->incr_step) {
269       fprintf(stderr, "counter %ld  threads %d  iterations %ld\n",
270               static_cast<long>(m->counter), m->nthreads,
271               static_cast<long>(m->iterations));
272       fflush(stderr);
273       GPR_ASSERT(0);
274     }
275     test_destroy(m);
276     iterations <<= 1;
277   }
278   time_taken = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), start);
279   fprintf(stderr, " done %lld.%09d s\n",
280           static_cast<long long>(time_taken.tv_sec),
281           static_cast<int>(time_taken.tv_nsec));
282   fflush(stderr);
283 }
284 
285 /* Increment m->counter on each iteration; then mark thread as done.  */
inc(void * v)286 static void inc(void* v /*=m*/) {
287   struct test* m = static_cast<struct test*>(v);
288   int64_t i;
289   for (i = 0; i != m->iterations; i++) {
290     gpr_mu_lock(&m->mu);
291     m->counter++;
292     gpr_mu_unlock(&m->mu);
293   }
294   mark_thread_done(m);
295 }
296 
297 /* Increment m->counter under lock acquired with trylock, m->iterations times;
298    then mark thread as done.  */
inctry(void * v)299 static void inctry(void* v /*=m*/) {
300   struct test* m = static_cast<struct test*>(v);
301   int64_t i;
302   for (i = 0; i != m->iterations;) {
303     if (gpr_mu_trylock(&m->mu)) {
304       m->counter++;
305       gpr_mu_unlock(&m->mu);
306       i++;
307     }
308   }
309   mark_thread_done(m);
310 }
311 
312 /* Increment counter only when (m->counter%m->nthreads)==m->thread_id; then mark
313    thread as done.  */
inc_by_turns(void * v)314 static void inc_by_turns(void* v /*=m*/) {
315   struct test* m = static_cast<struct test*>(v);
316   int64_t i;
317   int id = thread_id(m);
318   for (i = 0; i != m->iterations; i++) {
319     gpr_mu_lock(&m->mu);
320     while ((m->counter % m->nthreads) != id) {
321       gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
322     }
323     m->counter++;
324     gpr_cv_broadcast(&m->cv);
325     gpr_mu_unlock(&m->mu);
326   }
327   mark_thread_done(m);
328 }
329 
330 /* Wait a millisecond and increment counter on each iteration;
331    then mark thread as done. */
inc_with_1ms_delay(void * v)332 static void inc_with_1ms_delay(void* v /*=m*/) {
333   struct test* m = static_cast<struct test*>(v);
334   int64_t i;
335   for (i = 0; i != m->iterations; i++) {
336     gpr_timespec deadline;
337     gpr_mu_lock(&m->mu);
338     deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
339                             gpr_time_from_micros(1000, GPR_TIMESPAN));
340     while (!gpr_cv_wait(&m->cv, &m->mu, deadline)) {
341     }
342     m->counter++;
343     gpr_mu_unlock(&m->mu);
344   }
345   mark_thread_done(m);
346 }
347 
348 /* Wait a millisecond and increment counter on each iteration, using an event
349    for timing; then mark thread as done. */
inc_with_1ms_delay_event(void * v)350 static void inc_with_1ms_delay_event(void* v /*=m*/) {
351   struct test* m = static_cast<struct test*>(v);
352   int64_t i;
353   for (i = 0; i != m->iterations; i++) {
354     gpr_timespec deadline;
355     deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
356                             gpr_time_from_micros(1000, GPR_TIMESPAN));
357     GPR_ASSERT(gpr_event_wait(&m->event, deadline) == nullptr);
358     gpr_mu_lock(&m->mu);
359     m->counter++;
360     gpr_mu_unlock(&m->mu);
361   }
362   mark_thread_done(m);
363 }
364 
365 /* Produce m->iterations elements on queue m->q, then mark thread as done.
366    Even threads use queue_append(), and odd threads use queue_try_append()
367    until it succeeds. */
many_producers(void * v)368 static void many_producers(void* v /*=m*/) {
369   struct test* m = static_cast<struct test*>(v);
370   int64_t i;
371   int x = thread_id(m);
372   if ((x & 1) == 0) {
373     for (i = 0; i != m->iterations; i++) {
374       queue_append(&m->q, 1);
375     }
376   } else {
377     for (i = 0; i != m->iterations; i++) {
378       while (!queue_try_append(&m->q, 1)) {
379       }
380     }
381   }
382   mark_thread_done(m);
383 }
384 
385 /* Consume elements from m->q until m->nthreads*m->iterations are seen,
386    wait an extra second to confirm that no more elements are arriving,
387    then mark thread as done. */
consumer(void * v)388 static void consumer(void* v /*=m*/) {
389   struct test* m = static_cast<struct test*>(v);
390   int64_t n = m->iterations * m->nthreads;
391   int64_t i;
392   int value;
393   for (i = 0; i != n; i++) {
394     queue_remove(&m->q, &value, gpr_inf_future(GPR_CLOCK_MONOTONIC));
395   }
396   gpr_mu_lock(&m->mu);
397   m->counter = n;
398   gpr_mu_unlock(&m->mu);
399   GPR_ASSERT(
400       !queue_remove(&m->q, &value,
401                     gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
402                                  gpr_time_from_micros(1000000, GPR_TIMESPAN))));
403   mark_thread_done(m);
404 }
405 
406 /* Increment m->stats_counter m->iterations times, transfer counter value to
407    m->counter, then mark thread as done.  */
statsinc(void * v)408 static void statsinc(void* v /*=m*/) {
409   struct test* m = static_cast<struct test*>(v);
410   int64_t i;
411   for (i = 0; i != m->iterations; i++) {
412     gpr_stats_inc(&m->stats_counter, 1);
413   }
414   gpr_mu_lock(&m->mu);
415   m->counter = gpr_stats_read(&m->stats_counter);
416   gpr_mu_unlock(&m->mu);
417   mark_thread_done(m);
418 }
419 
420 /* Increment m->refcount by m->incr_step for m->iterations times. Decrement
421    m->thread_refcount once, and if it reaches zero, set m->event to (void*)1;
422    then mark thread as done.  */
refinc(void * v)423 static void refinc(void* v /*=m*/) {
424   struct test* m = static_cast<struct test*>(v);
425   int64_t i;
426   for (i = 0; i != m->iterations; i++) {
427     if (m->incr_step == 1) {
428       gpr_ref(&m->refcount);
429     } else {
430       gpr_refn(&m->refcount, m->incr_step);
431     }
432   }
433   if (gpr_unref(&m->thread_refcount)) {
434     gpr_event_set(&m->event, (void*)1);
435   }
436   mark_thread_done(m);
437 }
438 
439 /* Wait until m->event is set to (void *)1, then decrement m->refcount by 1
440    (m->nthreads * m->iterations * m->incr_step) times, and ensure that the last
441    decrement caused the counter to reach zero, then mark thread as done.  */
refcheck(void * v)442 static void refcheck(void* v /*=m*/) {
443   struct test* m = static_cast<struct test*>(v);
444   int64_t n = m->iterations * m->nthreads * m->incr_step;
445   int64_t i;
446   GPR_ASSERT(gpr_event_wait(&m->event, gpr_inf_future(GPR_CLOCK_REALTIME)) ==
447              (void*)1);
448   GPR_ASSERT(gpr_event_get(&m->event) == (void*)1);
449   for (i = 1; i != n; i++) {
450     GPR_ASSERT(!gpr_unref(&m->refcount));
451     m->counter++;
452   }
453   GPR_ASSERT(gpr_unref(&m->refcount));
454   m->counter++;
455   mark_thread_done(m);
456 }
457 
458 /* ------------------------------------------------- */
459 
main(int argc,char * argv[])460 int main(int argc, char* argv[]) {
461   grpc_test_init(argc, argv);
462   test("mutex", &inc, nullptr, 1, 1);
463   test("mutex try", &inctry, nullptr, 1, 1);
464   test("cv", &inc_by_turns, nullptr, 1, 1);
465   test("timedcv", &inc_with_1ms_delay, nullptr, 1, 1);
466   test("queue", &many_producers, &consumer, 10, 1);
467   test("stats_counter", &statsinc, nullptr, 1, 1);
468   test("refcount by 1", &refinc, &refcheck, 1, 1);
469   test("refcount by 3", &refinc, &refcheck, 1, 3); /* incr_step of 3 is an
470                                                       arbitrary choice. Any
471                                                       number > 1 is okay here */
472   test("timedevent", &inc_with_1ms_delay_event, nullptr, 1, 1);
473   return 0;
474 }
475