1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 // Test gpr per-cpu support:
20 // gpr_cpu_num_cores()
21 // gpr_cpu_current_cpu()
22 //
23
24 #include <grpc/support/alloc.h>
25 #include <grpc/support/cpu.h>
26 #include <grpc/support/sync.h>
27 #include <grpc/support/time.h>
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <string.h>
31
32 #include <memory>
33
34 #include "gtest/gtest.h"
35 #include "src/core/util/thd.h"
36 #include "test/core/test_util/test_config.h"
37
38 // Test structure is essentially:
39 // 1) Figure out how many cores are present on the test system
40 // 2) Create 3 times that many threads
41 // 3) Have each thread do some amount of work (basically want to
42 // guarantee that all threads are running at once, and enough of them
43 // to run on all cores).
44 // 4) Each thread checks what core it is running on, and marks that core
45 // as "used" in the test.
46 // 5) Count number of "used" cores.
47
48 // The test will fail if:
49 // 1) gpr_cpu_num_cores() == 0
50 // 2) Any result from gpr_cpu_current_cpu() >= gpr_cpu_num_cores()
51 // 3) Ideally, we would fail if not all cores were seen as used. Unfortunately,
52 // this is only probabilistically true, and depends on the OS, it's
53 // scheduler, etc. So we just print out an indication of how many were seen;
54 // hopefully developers can use this to sanity check their system.
55 //
56
57 // Status shared across threads
58 struct cpu_test {
59 gpr_mu mu;
60 int nthreads;
61 uint32_t ncores;
62 int is_done;
63 gpr_cv done_cv;
64 int* used; // is this core used?
65 unsigned r; // random number
66 };
67
worker_thread(void * arg)68 static void worker_thread(void* arg) {
69 struct cpu_test* ct = static_cast<struct cpu_test*>(arg);
70 uint32_t cpu;
71 unsigned r = 12345678;
72 unsigned i, j;
73 // Avoid repetitive division calculations
74 int64_t max_i = 1000 / grpc_test_slowdown_factor();
75 int64_t max_j = 1000 / grpc_test_slowdown_factor();
76 for (i = 0; i < max_i; i++) {
77 // run for a bit - just calculate something random.
78 for (j = 0; j < max_j; j++) {
79 r = (r * 17) & ((r - i) | (r * i));
80 }
81 cpu = gpr_cpu_current_cpu();
82 ASSERT_LT(cpu, ct->ncores);
83 gpr_mu_lock(&ct->mu);
84 ct->used[cpu] = 1;
85 for (j = 0; j < ct->ncores; j++) {
86 if (!ct->used[j]) break;
87 }
88 gpr_mu_unlock(&ct->mu);
89 if (j == ct->ncores) {
90 break; // all cpus have been used - no further use in running this test
91 }
92 }
93 gpr_mu_lock(&ct->mu);
94 ct->r = r; // make it look like we care about r's value...
95 ct->nthreads--;
96 if (ct->nthreads == 0) {
97 ct->is_done = 1;
98 gpr_cv_signal(&ct->done_cv);
99 }
100 gpr_mu_unlock(&ct->mu);
101 }
102
cpu_test(void)103 static void cpu_test(void) {
104 uint32_t i;
105 int cores_seen = 0;
106 struct cpu_test ct;
107 ct.ncores = gpr_cpu_num_cores();
108 ASSERT_GT(ct.ncores, 0);
109 ct.nthreads = static_cast<int>(ct.ncores) * 3;
110 ct.used = static_cast<int*>(gpr_malloc(ct.ncores * sizeof(int)));
111 memset(ct.used, 0, ct.ncores * sizeof(int));
112 gpr_mu_init(&ct.mu);
113 gpr_cv_init(&ct.done_cv);
114 ct.is_done = 0;
115
116 uint32_t nthreads = ct.ncores * 3;
117 grpc_core::Thread* thd =
118 static_cast<grpc_core::Thread*>(gpr_malloc(sizeof(*thd) * nthreads));
119
120 for (i = 0; i < nthreads; i++) {
121 thd[i] = grpc_core::Thread("grpc_cpu_test", &worker_thread, &ct);
122 thd[i].Start();
123 }
124 gpr_mu_lock(&ct.mu);
125 while (!ct.is_done) {
126 gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
127 }
128 gpr_mu_unlock(&ct.mu);
129 for (i = 0; i < nthreads; i++) {
130 thd[i].Join();
131 }
132 gpr_free(thd);
133 fprintf(stderr, "Saw cores [");
134 fflush(stderr);
135 for (i = 0; i < ct.ncores; i++) {
136 if (ct.used[i]) {
137 fprintf(stderr, "%d,", i);
138 fflush(stderr);
139 cores_seen++;
140 }
141 }
142 fprintf(stderr, "] (%d/%d)\n", cores_seen, ct.ncores);
143 fflush(stderr);
144 gpr_mu_destroy(&ct.mu);
145 gpr_cv_destroy(&ct.done_cv);
146 gpr_free(ct.used);
147 }
148
TEST(CpuTest,MainTest)149 TEST(CpuTest, MainTest) { cpu_test(); }
150
main(int argc,char ** argv)151 int main(int argc, char** argv) {
152 grpc::testing::TestEnvironment env(&argc, argv);
153 ::testing::InitGoogleTest(&argc, argv);
154 return RUN_ALL_TESTS();
155 }
156