1 /*--------------------------------------------------------------------*/
2 /*--- Callgrind ---*/
3 /*--- ct_threads.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Callgrind, a Valgrind tool for call tracing.
8
9 Copyright (C) 2002-2012, Josef Weidendorfer (Josef.Weidendorfer@gmx.de)
10
11 This program is free software; you can redistribute it and/or
12 modify it under the terms of the GNU General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 02111-1307, USA.
25
26 The GNU General Public License is contained in the file COPYING.
27 */
28
29 #include "global.h"
30
31 #include "pub_tool_threadstate.h"
32
33 /* forward decls */
34 static exec_state* exec_state_save(void);
35 static exec_state* exec_state_restore(void);
36 static exec_state* push_exec_state(int);
37 static exec_state* top_exec_state(void);
38
39 static exec_stack current_states;
40
41
42 /*------------------------------------------------------------*/
43 /*--- Support for multi-threading ---*/
44 /*------------------------------------------------------------*/
45
46
47 /*
48 * For Valgrind, MT is cooperative (no preemting in our code),
49 * so we don't need locks...
50 *
51 * Per-thread data:
52 * - BBCCs
53 * - call stack
54 * - call hash
55 * - event counters: last, current
56 *
57 * Even when ignoring MT, we need this functions to set up some
58 * datastructures for the process (= Thread 1).
59 */
60
61 /* current running thread */
62 ThreadId CLG_(current_tid);
63
64 static thread_info* thread[VG_N_THREADS];
65
CLG_(get_threads)66 thread_info** CLG_(get_threads)()
67 {
68 return thread;
69 }
70
CLG_(get_current_thread)71 thread_info* CLG_(get_current_thread)()
72 {
73 return thread[CLG_(current_tid)];
74 }
75
CLG_(init_threads)76 void CLG_(init_threads)()
77 {
78 Int i;
79 for(i=0;i<VG_N_THREADS;i++)
80 thread[i] = 0;
81 CLG_(current_tid) = VG_INVALID_THREADID;
82 }
83
84 /* switches through all threads and calls func */
CLG_(forall_threads)85 void CLG_(forall_threads)(void (*func)(thread_info*))
86 {
87 Int t, orig_tid = CLG_(current_tid);
88
89 for(t=1;t<VG_N_THREADS;t++) {
90 if (!thread[t]) continue;
91 CLG_(switch_thread)(t);
92 (*func)(thread[t]);
93 }
94 CLG_(switch_thread)(orig_tid);
95 }
96
97
98 static
new_thread(void)99 thread_info* new_thread(void)
100 {
101 thread_info* t;
102
103 t = (thread_info*) CLG_MALLOC("cl.threads.nt.1",
104 sizeof(thread_info));
105
106 /* init state */
107 CLG_(init_exec_stack)( &(t->states) );
108 CLG_(init_call_stack)( &(t->calls) );
109 CLG_(init_fn_stack) ( &(t->fns) );
110 /* t->states.entry[0]->cxt = CLG_(get_cxt)(t->fns.bottom); */
111
112 /* event counters */
113 t->lastdump_cost = CLG_(get_eventset_cost)( CLG_(sets).full );
114 t->sighandler_cost = CLG_(get_eventset_cost)( CLG_(sets).full );
115 CLG_(init_cost)( CLG_(sets).full, t->lastdump_cost );
116 CLG_(init_cost)( CLG_(sets).full, t->sighandler_cost );
117
118 /* init data containers */
119 CLG_(init_fn_array)( &(t->fn_active) );
120 CLG_(init_bbcc_hash)( &(t->bbccs) );
121 CLG_(init_jcc_hash)( &(t->jccs) );
122
123 return t;
124 }
125
126
CLG_(switch_thread)127 void CLG_(switch_thread)(ThreadId tid)
128 {
129 if (tid == CLG_(current_tid)) return;
130
131 CLG_DEBUG(0, ">> thread %d (was %d)\n", tid, CLG_(current_tid));
132
133 if (CLG_(current_tid) != VG_INVALID_THREADID) {
134 /* save thread state */
135 thread_info* t = thread[CLG_(current_tid)];
136
137 CLG_ASSERT(t != 0);
138
139 /* current context (including signal handler contexts) */
140 exec_state_save();
141 CLG_(copy_current_exec_stack)( &(t->states) );
142 CLG_(copy_current_call_stack)( &(t->calls) );
143 CLG_(copy_current_fn_stack) ( &(t->fns) );
144
145 CLG_(copy_current_fn_array) ( &(t->fn_active) );
146 /* If we cumulate costs of threads, use TID 1 for all jccs/bccs */
147 if (!CLG_(clo).separate_threads) t = thread[1];
148 CLG_(copy_current_bbcc_hash)( &(t->bbccs) );
149 CLG_(copy_current_jcc_hash) ( &(t->jccs) );
150 }
151
152 CLG_(current_tid) = tid;
153 CLG_ASSERT(tid < VG_N_THREADS);
154
155 if (tid != VG_INVALID_THREADID) {
156 thread_info* t;
157
158 /* load thread state */
159
160 if (thread[tid] == 0) thread[tid] = new_thread();
161 t = thread[tid];
162
163 /* current context (including signal handler contexts) */
164 CLG_(set_current_exec_stack)( &(t->states) );
165 exec_state_restore();
166 CLG_(set_current_call_stack)( &(t->calls) );
167 CLG_(set_current_fn_stack) ( &(t->fns) );
168
169 CLG_(set_current_fn_array) ( &(t->fn_active) );
170 /* If we cumulate costs of threads, use TID 1 for all jccs/bccs */
171 if (!CLG_(clo).separate_threads) t = thread[1];
172 CLG_(set_current_bbcc_hash) ( &(t->bbccs) );
173 CLG_(set_current_jcc_hash) ( &(t->jccs) );
174 }
175 }
176
177
CLG_(run_thread)178 void CLG_(run_thread)(ThreadId tid)
179 {
180 /* check for dumps needed */
181 static ULong bbs_done = 0;
182 static Char buf[512];
183
184 if (CLG_(clo).dump_every_bb >0) {
185 if (CLG_(stat).bb_executions - bbs_done > CLG_(clo).dump_every_bb) {
186 VG_(sprintf)(buf, "--dump-every-bb=%llu", CLG_(clo).dump_every_bb);
187 CLG_(dump_profile)(buf, False);
188 bbs_done = CLG_(stat).bb_executions;
189 }
190 }
191
192 /* now check for thread switch */
193 CLG_(switch_thread)(tid);
194 }
195
CLG_(pre_signal)196 void CLG_(pre_signal)(ThreadId tid, Int sigNum, Bool alt_stack)
197 {
198 exec_state *es;
199
200 CLG_DEBUG(0, ">> pre_signal(TID %d, sig %d, alt_st %s)\n",
201 tid, sigNum, alt_stack ? "yes":"no");
202
203 /* switch to the thread the handler runs in */
204 CLG_(switch_thread)(tid);
205
206 /* save current execution state */
207 exec_state_save();
208
209 /* setup new cxtinfo struct for this signal handler */
210 es = push_exec_state(sigNum);
211 CLG_(zero_cost)( CLG_(sets).full, es->cost );
212 CLG_(current_state).cost = es->cost;
213 es->call_stack_bottom = CLG_(current_call_stack).sp;
214
215 /* setup current state for a spontaneous call */
216 CLG_(init_exec_state)( &CLG_(current_state) );
217 CLG_(current_state).sig = sigNum;
218 CLG_(push_cxt)(0);
219 }
220
221 /* Run post-signal if the stackpointer for call stack is at
222 * the bottom in current exec state (e.g. a signal handler)
223 *
224 * Called from CLG_(pop_call_stack)
225 */
CLG_(run_post_signal_on_call_stack_bottom)226 void CLG_(run_post_signal_on_call_stack_bottom)()
227 {
228 exec_state* es = top_exec_state();
229 CLG_ASSERT(es != 0);
230 CLG_ASSERT(CLG_(current_state).sig >0);
231
232 if (CLG_(current_call_stack).sp == es->call_stack_bottom)
233 CLG_(post_signal)( CLG_(current_tid), CLG_(current_state).sig );
234 }
235
CLG_(post_signal)236 void CLG_(post_signal)(ThreadId tid, Int sigNum)
237 {
238 exec_state* es;
239 UInt fn_number, *pactive;
240
241 CLG_DEBUG(0, ">> post_signal(TID %d, sig %d)\n",
242 tid, sigNum);
243
244 /* thread switching potentially needed, eg. with instrumentation off */
245 CLG_(switch_thread)(tid);
246 CLG_ASSERT(sigNum == CLG_(current_state).sig);
247
248 /* Unwind call stack of this signal handler.
249 * This should only be needed at finalisation time
250 */
251 es = top_exec_state();
252 CLG_ASSERT(es != 0);
253 while(CLG_(current_call_stack).sp > es->call_stack_bottom)
254 CLG_(pop_call_stack)();
255
256 if (CLG_(current_state).cxt) {
257 /* correct active counts */
258 fn_number = CLG_(current_state).cxt->fn[0]->number;
259 pactive = CLG_(get_fn_entry)(fn_number);
260 (*pactive)--;
261 CLG_DEBUG(0, " set active count of %s back to %d\n",
262 CLG_(current_state).cxt->fn[0]->name, *pactive);
263 }
264
265 if (CLG_(current_fn_stack).top > CLG_(current_fn_stack).bottom) {
266 /* set fn_stack_top back.
267 * top can point to 0 if nothing was executed in the signal handler;
268 * this is possible at end on unwinding handlers.
269 */
270 if (*(CLG_(current_fn_stack).top) != 0) {
271 CLG_(current_fn_stack).top--;
272 CLG_ASSERT(*(CLG_(current_fn_stack).top) == 0);
273 }
274 if (CLG_(current_fn_stack).top > CLG_(current_fn_stack).bottom)
275 CLG_(current_fn_stack).top--;
276 }
277
278 /* sum up costs */
279 CLG_ASSERT(CLG_(current_state).cost == es->cost);
280 CLG_(add_and_zero_cost)( CLG_(sets).full,
281 thread[CLG_(current_tid)]->sighandler_cost,
282 CLG_(current_state).cost );
283
284 /* restore previous context */
285 es->sig = -1;
286 current_states.sp--;
287 es = top_exec_state();
288 CLG_(current_state).sig = es->sig;
289 exec_state_restore();
290
291 /* There is no way to reliable get the thread ID we are switching to
292 * after this handler returns. So we sync with actual TID at start of
293 * CLG_(setup_bb)(), which should be the next for callgrind.
294 */
295 }
296
297
298
299 /*------------------------------------------------------------*/
300 /*--- Execution states in a thread & signal handlers ---*/
301 /*------------------------------------------------------------*/
302
303 /* Each thread can be interrupted by a signal handler, and they
304 * themselves again. But as there's no scheduling among handlers
305 * of the same thread, we don't need additional stacks.
306 * So storing execution contexts and
307 * adding separators in the callstack(needed to not intermix normal/handler
308 * functions in contexts) should be enough.
309 */
310
311 /* not initialized: call_stack_bottom, sig */
CLG_(init_exec_state)312 void CLG_(init_exec_state)(exec_state* es)
313 {
314 es->collect = CLG_(clo).collect_atstart;
315 es->cxt = 0;
316 es->jmps_passed = 0;
317 es->bbcc = 0;
318 es->nonskipped = 0;
319 }
320
321
new_exec_state(Int sigNum)322 static exec_state* new_exec_state(Int sigNum)
323 {
324 exec_state* es;
325 es = (exec_state*) CLG_MALLOC("cl.threads.nes.1",
326 sizeof(exec_state));
327
328 /* allocate real cost space: needed as incremented by
329 * simulation functions */
330 es->cost = CLG_(get_eventset_cost)(CLG_(sets).full);
331 CLG_(zero_cost)( CLG_(sets).full, es->cost );
332 CLG_(init_exec_state)(es);
333 es->sig = sigNum;
334 es->call_stack_bottom = 0;
335
336 return es;
337 }
338
CLG_(init_exec_stack)339 void CLG_(init_exec_stack)(exec_stack* es)
340 {
341 Int i;
342
343 /* The first element is for the main thread */
344 es->entry[0] = new_exec_state(0);
345 for(i=1;i<MAX_SIGHANDLERS;i++)
346 es->entry[i] = 0;
347 es->sp = 0;
348 }
349
CLG_(copy_current_exec_stack)350 void CLG_(copy_current_exec_stack)(exec_stack* dst)
351 {
352 Int i;
353
354 dst->sp = current_states.sp;
355 for(i=0;i<MAX_SIGHANDLERS;i++)
356 dst->entry[i] = current_states.entry[i];
357 }
358
CLG_(set_current_exec_stack)359 void CLG_(set_current_exec_stack)(exec_stack* dst)
360 {
361 Int i;
362
363 current_states.sp = dst->sp;
364 for(i=0;i<MAX_SIGHANDLERS;i++)
365 current_states.entry[i] = dst->entry[i];
366 }
367
368
369 /* Get top context info struct of current thread */
370 static
top_exec_state(void)371 exec_state* top_exec_state(void)
372 {
373 Int sp = current_states.sp;
374 exec_state* es;
375
376 CLG_ASSERT((sp >= 0) && (sp < MAX_SIGHANDLERS));
377 es = current_states.entry[sp];
378 CLG_ASSERT(es != 0);
379 return es;
380 }
381
382 /* Allocates a free context info structure for a new entered
383 * signal handler, putting it on the context stack.
384 * Returns a pointer to the structure.
385 */
push_exec_state(int sigNum)386 static exec_state* push_exec_state(int sigNum)
387 {
388 Int sp;
389 exec_state* es;
390
391 current_states.sp++;
392 sp = current_states.sp;
393
394 CLG_ASSERT((sigNum > 0) && (sigNum <= _VKI_NSIG));
395 CLG_ASSERT((sp > 0) && (sp < MAX_SIGHANDLERS));
396 es = current_states.entry[sp];
397 if (!es) {
398 es = new_exec_state(sigNum);
399 current_states.entry[sp] = es;
400 }
401 else
402 es->sig = sigNum;
403
404 return es;
405 }
406
407 /* Save current context to top cxtinfo struct */
408 static
exec_state_save(void)409 exec_state* exec_state_save(void)
410 {
411 exec_state* es = top_exec_state();
412
413 es->cxt = CLG_(current_state).cxt;
414 es->collect = CLG_(current_state).collect;
415 es->jmps_passed = CLG_(current_state).jmps_passed;
416 es->bbcc = CLG_(current_state).bbcc;
417 es->nonskipped = CLG_(current_state).nonskipped;
418 CLG_ASSERT(es->cost == CLG_(current_state).cost);
419
420 CLG_DEBUGIF(1) {
421 CLG_DEBUG(1, " cxtinfo_save(sig %d): collect %s, jmps_passed %d\n",
422 es->sig, es->collect ? "Yes": "No", es->jmps_passed);
423 CLG_(print_bbcc)(-9, es->bbcc);
424 CLG_(print_cost)(-9, CLG_(sets).full, es->cost);
425 }
426
427 /* signal number does not need to be saved */
428 CLG_ASSERT(CLG_(current_state).sig == es->sig);
429
430 return es;
431 }
432
433 static
exec_state_restore(void)434 exec_state* exec_state_restore(void)
435 {
436 exec_state* es = top_exec_state();
437
438 CLG_(current_state).cxt = es->cxt;
439 CLG_(current_state).collect = es->collect;
440 CLG_(current_state).jmps_passed = es->jmps_passed;
441 CLG_(current_state).bbcc = es->bbcc;
442 CLG_(current_state).nonskipped = es->nonskipped;
443 CLG_(current_state).cost = es->cost;
444 CLG_(current_state).sig = es->sig;
445
446 CLG_DEBUGIF(1) {
447 CLG_DEBUG(1, " exec_state_restore(sig %d): collect %s, jmps_passed %d\n",
448 es->sig, es->collect ? "Yes": "No", es->jmps_passed);
449 CLG_(print_bbcc)(-9, es->bbcc);
450 CLG_(print_cxt)(-9, es->cxt, 0);
451 CLG_(print_cost)(-9, CLG_(sets).full, es->cost);
452 }
453
454 return es;
455 }
456
457