• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /* Test gpr per-cpu support:
20    gpr_cpu_num_cores()
21    gpr_cpu_current_cpu()
22 */
23 
24 #include <grpc/support/cpu.h>
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include <grpc/support/alloc.h>
30 #include <grpc/support/log.h>
31 #include <grpc/support/sync.h>
32 #include <grpc/support/time.h>
33 
34 #include "src/core/lib/gprpp/thd.h"
35 #include "test/core/util/test_config.h"
36 
37 /* Test structure is essentially:
38    1) Figure out how many cores are present on the test system
39    2) Create 3 times that many threads
40    3) Have each thread do some amount of work (basically want to
41       gaurantee that all threads are running at once, and enough of them
42       to run on all cores).
43    4) Each thread checks what core it is running on, and marks that core
44       as "used" in the test.
45    5) Count number of "used" cores.
46 
47    The test will fail if:
48    1) gpr_cpu_num_cores() == 0
49    2) Any result from gpr_cpu_current_cpu() >= gpr_cpu_num_cores()
50    3) Ideally, we would fail if not all cores were seen as used. Unfortunately,
51       this is only probabilistically true, and depends on the OS, it's
52       scheduler, etc. So we just print out an indication of how many were seen;
53       hopefully developers can use this to sanity check their system.
54 */
55 
56 /* Status shared across threads */
57 struct cpu_test {
58   gpr_mu mu;
59   int nthreads;
60   uint32_t ncores;
61   int is_done;
62   gpr_cv done_cv;
63   int* used;  /* is this core used? */
64   unsigned r; /* random number */
65 };
66 
worker_thread(void * arg)67 static void worker_thread(void* arg) {
68   struct cpu_test* ct = static_cast<struct cpu_test*>(arg);
69   uint32_t cpu;
70   unsigned r = 12345678;
71   unsigned i, j;
72   /* Avoid repetitive division calculations */
73   int64_t max_i = 1000 / grpc_test_slowdown_factor();
74   int64_t max_j = 1000 / grpc_test_slowdown_factor();
75   for (i = 0; i < max_i; i++) {
76     /* run for a bit - just calculate something random. */
77     for (j = 0; j < max_j; j++) {
78       r = (r * 17) & ((r - i) | (r * i));
79     }
80     cpu = gpr_cpu_current_cpu();
81     GPR_ASSERT(cpu < ct->ncores);
82     gpr_mu_lock(&ct->mu);
83     ct->used[cpu] = 1;
84     for (j = 0; j < ct->ncores; j++) {
85       if (!ct->used[j]) break;
86     }
87     gpr_mu_unlock(&ct->mu);
88     if (j == ct->ncores) {
89       break; /* all cpus have been used - no further use in running this test */
90     }
91   }
92   gpr_mu_lock(&ct->mu);
93   ct->r = r; /* make it look like we care about r's value... */
94   ct->nthreads--;
95   if (ct->nthreads == 0) {
96     ct->is_done = 1;
97     gpr_cv_signal(&ct->done_cv);
98   }
99   gpr_mu_unlock(&ct->mu);
100 }
101 
cpu_test(void)102 static void cpu_test(void) {
103   uint32_t i;
104   int cores_seen = 0;
105   struct cpu_test ct;
106   ct.ncores = gpr_cpu_num_cores();
107   GPR_ASSERT(ct.ncores > 0);
108   ct.nthreads = static_cast<int>(ct.ncores) * 3;
109   ct.used = static_cast<int*>(gpr_malloc(ct.ncores * sizeof(int)));
110   memset(ct.used, 0, ct.ncores * sizeof(int));
111   gpr_mu_init(&ct.mu);
112   gpr_cv_init(&ct.done_cv);
113   ct.is_done = 0;
114 
115   uint32_t nthreads = ct.ncores * 3;
116   grpc_core::Thread* thd =
117       static_cast<grpc_core::Thread*>(gpr_malloc(sizeof(*thd) * nthreads));
118 
119   for (i = 0; i < nthreads; i++) {
120     thd[i] = grpc_core::Thread("grpc_cpu_test", &worker_thread, &ct);
121     thd[i].Start();
122   }
123   gpr_mu_lock(&ct.mu);
124   while (!ct.is_done) {
125     gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
126   }
127   gpr_mu_unlock(&ct.mu);
128   for (i = 0; i < nthreads; i++) {
129     thd[i].Join();
130   }
131   gpr_free(thd);
132   fprintf(stderr, "Saw cores [");
133   fflush(stderr);
134   for (i = 0; i < ct.ncores; i++) {
135     if (ct.used[i]) {
136       fprintf(stderr, "%d,", i);
137       fflush(stderr);
138       cores_seen++;
139     }
140   }
141   fprintf(stderr, "] (%d/%d)\n", cores_seen, ct.ncores);
142   fflush(stderr);
143   gpr_mu_destroy(&ct.mu);
144   gpr_cv_destroy(&ct.done_cv);
145   gpr_free(ct.used);
146 }
147 
main(int argc,char * argv[])148 int main(int argc, char* argv[]) {
149   grpc::testing::TestEnvironment env(argc, argv);
150   cpu_test();
151   return 0;
152 }
153