• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*--------------------------------------------------------------------*/
2 /*--- Callgrind                                                    ---*/
3 /*---                                                 ct_threads.c ---*/
4 /*--------------------------------------------------------------------*/
5 
6 /*
7    This file is part of Callgrind, a Valgrind tool for call tracing.
8 
9    Copyright (C) 2002-2017, Josef Weidendorfer (Josef.Weidendorfer@gmx.de)
10 
11    This program is free software; you can redistribute it and/or
12    modify it under the terms of the GNU General Public License as
13    published by the Free Software Foundation; either version 2 of the
14    License, or (at your option) any later version.
15 
16    This program is distributed in the hope that it will be useful, but
17    WITHOUT ANY WARRANTY; without even the implied warranty of
18    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19    General Public License for more details.
20 
21    You should have received a copy of the GNU General Public License
22    along with this program; if not, write to the Free Software
23    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24    02111-1307, USA.
25 
26    The GNU General Public License is contained in the file COPYING.
27 */
28 
29 #include "global.h"
30 
31 #include "pub_tool_threadstate.h"
32 
33 /* forward decls */
34 static exec_state* exec_state_save(void);
35 static exec_state* exec_state_restore(void);
36 static exec_state* push_exec_state(int);
37 static exec_state* top_exec_state(void);
38 
39 static exec_stack current_states;
40 
41 
42 /*------------------------------------------------------------*/
43 /*--- Support for multi-threading                          ---*/
44 /*------------------------------------------------------------*/
45 
46 
47 /*
48  * For Valgrind, MT is cooperative (no preemting in our code),
49  * so we don't need locks...
50  *
51  * Per-thread data:
52  *  - BBCCs
53  *  - call stack
54  *  - call hash
55  *  - event counters: last, current
56  *
57  * Even when ignoring MT, we need this functions to set up some
58  * datastructures for the process (= Thread 1).
59  */
60 
61 /* current running thread */
62 ThreadId CLG_(current_tid);
63 
64 static thread_info** thread;
65 
CLG_(get_threads)66 thread_info** CLG_(get_threads)()
67 {
68   return thread;
69 }
70 
CLG_(get_current_thread)71 thread_info* CLG_(get_current_thread)()
72 {
73   return thread[CLG_(current_tid)];
74 }
75 
CLG_(init_threads)76 void CLG_(init_threads)()
77 {
78     UInt i;
79 
80     thread = CLG_MALLOC("cl.threads.it.1", VG_N_THREADS * sizeof thread[0]);
81 
82     for(i=0;i<VG_N_THREADS;i++)
83 	thread[i] = 0;
84     CLG_(current_tid) = VG_INVALID_THREADID;
85 }
86 
87 /* switches through all threads and calls func */
CLG_(forall_threads)88 void CLG_(forall_threads)(void (*func)(thread_info*))
89 {
90   Int t, orig_tid = CLG_(current_tid);
91 
92   for(t=1;t<VG_N_THREADS;t++) {
93     if (!thread[t]) continue;
94     CLG_(switch_thread)(t);
95     (*func)(thread[t]);
96   }
97   CLG_(switch_thread)(orig_tid);
98 }
99 
100 
101 static
new_thread(void)102 thread_info* new_thread(void)
103 {
104     thread_info* t;
105 
106     t = (thread_info*) CLG_MALLOC("cl.threads.nt.1",
107                                   sizeof(thread_info));
108 
109     /* init state */
110     CLG_(init_exec_stack)( &(t->states) );
111     CLG_(init_call_stack)( &(t->calls) );
112     CLG_(init_fn_stack)  ( &(t->fns) );
113     /* t->states.entry[0]->cxt = CLG_(get_cxt)(t->fns.bottom); */
114 
115     /* event counters */
116     t->lastdump_cost   = CLG_(get_eventset_cost)( CLG_(sets).full );
117     t->sighandler_cost = CLG_(get_eventset_cost)( CLG_(sets).full );
118     CLG_(init_cost)( CLG_(sets).full, t->lastdump_cost );
119     CLG_(init_cost)( CLG_(sets).full, t->sighandler_cost );
120 
121     /* init data containers */
122     CLG_(init_fn_array)( &(t->fn_active) );
123     CLG_(init_bbcc_hash)( &(t->bbccs) );
124     CLG_(init_jcc_hash)( &(t->jccs) );
125 
126     return t;
127 }
128 
129 
CLG_(switch_thread)130 void CLG_(switch_thread)(ThreadId tid)
131 {
132   if (tid == CLG_(current_tid)) return;
133 
134   CLG_DEBUG(0, ">> thread %u (was %u)\n", tid, CLG_(current_tid));
135 
136   if (CLG_(current_tid) != VG_INVALID_THREADID) {
137     /* save thread state */
138     thread_info* t = thread[CLG_(current_tid)];
139 
140     CLG_ASSERT(t != 0);
141 
142     /* current context (including signal handler contexts) */
143     exec_state_save();
144     CLG_(copy_current_exec_stack)( &(t->states) );
145     CLG_(copy_current_call_stack)( &(t->calls) );
146     CLG_(copy_current_fn_stack)  ( &(t->fns) );
147 
148     CLG_(copy_current_fn_array) ( &(t->fn_active) );
149     /* If we cumulate costs of threads, use TID 1 for all jccs/bccs */
150     if (!CLG_(clo).separate_threads) t = thread[1];
151     CLG_(copy_current_bbcc_hash)( &(t->bbccs) );
152     CLG_(copy_current_jcc_hash) ( &(t->jccs) );
153   }
154 
155   CLG_(current_tid) = tid;
156   CLG_ASSERT(tid < VG_N_THREADS);
157 
158   if (tid != VG_INVALID_THREADID) {
159     thread_info* t;
160 
161     /* load thread state */
162 
163     if (thread[tid] == 0) thread[tid] = new_thread();
164     t = thread[tid];
165 
166     /* current context (including signal handler contexts) */
167     CLG_(set_current_exec_stack)( &(t->states) );
168     exec_state_restore();
169     CLG_(set_current_call_stack)( &(t->calls) );
170     CLG_(set_current_fn_stack)  ( &(t->fns) );
171 
172     CLG_(set_current_fn_array)  ( &(t->fn_active) );
173     /* If we cumulate costs of threads, use TID 1 for all jccs/bccs */
174     if (!CLG_(clo).separate_threads) t = thread[1];
175     CLG_(set_current_bbcc_hash) ( &(t->bbccs) );
176     CLG_(set_current_jcc_hash)  ( &(t->jccs) );
177   }
178 }
179 
180 
CLG_(run_thread)181 void CLG_(run_thread)(ThreadId tid)
182 {
183     /* check for dumps needed */
184     static ULong bbs_done = 0;
185     HChar buf[50];   // large enough
186 
187     if (CLG_(clo).dump_every_bb >0) {
188        if (CLG_(stat).bb_executions - bbs_done > CLG_(clo).dump_every_bb) {
189            VG_(sprintf)(buf, "--dump-every-bb=%llu", CLG_(clo).dump_every_bb);
190 	   CLG_(dump_profile)(buf, False);
191            bbs_done = CLG_(stat).bb_executions;
192        }
193     }
194 
195     /* now check for thread switch */
196     CLG_(switch_thread)(tid);
197 }
198 
CLG_(pre_signal)199 void CLG_(pre_signal)(ThreadId tid, Int sigNum, Bool alt_stack)
200 {
201     exec_state *es;
202 
203     CLG_DEBUG(0, ">> pre_signal(TID %u, sig %d, alt_st %s)\n",
204 	     tid, sigNum, alt_stack ? "yes":"no");
205 
206     /* switch to the thread the handler runs in */
207     CLG_(switch_thread)(tid);
208 
209     /* save current execution state */
210     exec_state_save();
211 
212     /* setup new cxtinfo struct for this signal handler */
213     es = push_exec_state(sigNum);
214     CLG_(zero_cost)( CLG_(sets).full, es->cost );
215     CLG_(current_state).cost = es->cost;
216     es->call_stack_bottom = CLG_(current_call_stack).sp;
217 
218     /* setup current state for a spontaneous call */
219     CLG_(init_exec_state)( &CLG_(current_state) );
220     CLG_(current_state).sig = sigNum;
221     CLG_(push_cxt)(0);
222 }
223 
224 /* Run post-signal if the stackpointer for call stack is at
225  * the bottom in current exec state (e.g. a signal handler)
226  *
227  * Called from CLG_(pop_call_stack)
228  */
CLG_(run_post_signal_on_call_stack_bottom)229 void CLG_(run_post_signal_on_call_stack_bottom)()
230 {
231     exec_state* es = top_exec_state();
232     CLG_ASSERT(es != 0);
233     CLG_ASSERT(CLG_(current_state).sig >0);
234 
235     if (CLG_(current_call_stack).sp == es->call_stack_bottom)
236 	CLG_(post_signal)( CLG_(current_tid), CLG_(current_state).sig );
237 }
238 
CLG_(post_signal)239 void CLG_(post_signal)(ThreadId tid, Int sigNum)
240 {
241     exec_state* es;
242     UInt fn_number, *pactive;
243 
244     CLG_DEBUG(0, ">> post_signal(TID %u, sig %d)\n",
245 	     tid, sigNum);
246 
247     /* thread switching potentially needed, eg. with instrumentation off */
248     CLG_(switch_thread)(tid);
249     CLG_ASSERT(sigNum == CLG_(current_state).sig);
250 
251     /* Unwind call stack of this signal handler.
252      * This should only be needed at finalisation time
253      */
254     es = top_exec_state();
255     CLG_ASSERT(es != 0);
256     while(CLG_(current_call_stack).sp > es->call_stack_bottom)
257       CLG_(pop_call_stack)();
258 
259     if (CLG_(current_state).cxt) {
260       /* correct active counts */
261       fn_number = CLG_(current_state).cxt->fn[0]->number;
262       pactive = CLG_(get_fn_entry)(fn_number);
263       (*pactive)--;
264       CLG_DEBUG(0, "  set active count of %s back to %u\n",
265 	       CLG_(current_state).cxt->fn[0]->name, *pactive);
266     }
267 
268     if (CLG_(current_fn_stack).top > CLG_(current_fn_stack).bottom) {
269 	/* set fn_stack_top back.
270 	 * top can point to 0 if nothing was executed in the signal handler;
271 	 * this is possible at end on unwinding handlers.
272 	 */
273 	if (*(CLG_(current_fn_stack).top) != 0) {
274 	    CLG_(current_fn_stack).top--;
275 	    CLG_ASSERT(*(CLG_(current_fn_stack).top) == 0);
276 	}
277       if (CLG_(current_fn_stack).top > CLG_(current_fn_stack).bottom)
278 	CLG_(current_fn_stack).top--;
279     }
280 
281     /* sum up costs */
282     CLG_ASSERT(CLG_(current_state).cost == es->cost);
283     CLG_(add_and_zero_cost)( CLG_(sets).full,
284 			    thread[CLG_(current_tid)]->sighandler_cost,
285 			    CLG_(current_state).cost );
286 
287     /* restore previous context */
288     es->sig = -1;
289     current_states.sp--;
290     es = top_exec_state();
291     CLG_(current_state).sig = es->sig;
292     exec_state_restore();
293 
294     /* There is no way to reliable get the thread ID we are switching to
295      * after this handler returns. So we sync with actual TID at start of
296      * CLG_(setup_bb)(), which should be the next for callgrind.
297      */
298 }
299 
300 
301 
302 /*------------------------------------------------------------*/
303 /*--- Execution states in a thread & signal handlers       ---*/
304 /*------------------------------------------------------------*/
305 
306 /* Each thread can be interrupted by a signal handler, and they
307  * themselves again. But as there's no scheduling among handlers
308  * of the same thread, we don't need additional stacks.
309  * So storing execution contexts and
310  * adding separators in the callstack(needed to not intermix normal/handler
311  * functions in contexts) should be enough.
312  */
313 
314 /* not initialized: call_stack_bottom, sig */
CLG_(init_exec_state)315 void CLG_(init_exec_state)(exec_state* es)
316 {
317   es->collect = CLG_(clo).collect_atstart;
318   es->cxt  = 0;
319   es->jmps_passed = 0;
320   es->bbcc = 0;
321   es->nonskipped = 0;
322 }
323 
324 
new_exec_state(Int sigNum)325 static exec_state* new_exec_state(Int sigNum)
326 {
327     exec_state* es;
328     es = (exec_state*) CLG_MALLOC("cl.threads.nes.1",
329                                   sizeof(exec_state));
330 
331     /* allocate real cost space: needed as incremented by
332      * simulation functions */
333     es->cost       = CLG_(get_eventset_cost)(CLG_(sets).full);
334     CLG_(zero_cost)( CLG_(sets).full, es->cost );
335     CLG_(init_exec_state)(es);
336     es->sig        = sigNum;
337     es->call_stack_bottom  = 0;
338 
339     return es;
340 }
341 
CLG_(init_exec_stack)342 void CLG_(init_exec_stack)(exec_stack* es)
343 {
344   Int i;
345 
346   /* The first element is for the main thread */
347   es->entry[0] = new_exec_state(0);
348   for(i=1;i<MAX_SIGHANDLERS;i++)
349     es->entry[i] = 0;
350   es->sp = 0;
351 }
352 
CLG_(copy_current_exec_stack)353 void CLG_(copy_current_exec_stack)(exec_stack* dst)
354 {
355   Int i;
356 
357   dst->sp = current_states.sp;
358   for(i=0;i<MAX_SIGHANDLERS;i++)
359     dst->entry[i] = current_states.entry[i];
360 }
361 
CLG_(set_current_exec_stack)362 void CLG_(set_current_exec_stack)(exec_stack* dst)
363 {
364   Int i;
365 
366   current_states.sp = dst->sp;
367   for(i=0;i<MAX_SIGHANDLERS;i++)
368     current_states.entry[i] = dst->entry[i];
369 }
370 
371 
372 /* Get top context info struct of current thread */
373 static
top_exec_state(void)374 exec_state* top_exec_state(void)
375 {
376   Int sp = current_states.sp;
377   exec_state* es;
378 
379   CLG_ASSERT((sp >= 0) && (sp < MAX_SIGHANDLERS));
380   es = current_states.entry[sp];
381   CLG_ASSERT(es != 0);
382   return es;
383 }
384 
385 /* Allocates a free context info structure for a new entered
386  * signal handler, putting it on the context stack.
387  * Returns a pointer to the structure.
388  */
push_exec_state(int sigNum)389 static exec_state* push_exec_state(int sigNum)
390 {
391   Int sp;
392   exec_state* es;
393 
394   current_states.sp++;
395   sp = current_states.sp;
396 
397   CLG_ASSERT((sigNum > 0) && (sigNum <= _VKI_NSIG));
398   CLG_ASSERT((sp > 0) && (sp < MAX_SIGHANDLERS));
399   es = current_states.entry[sp];
400   if (!es) {
401     es = new_exec_state(sigNum);
402     current_states.entry[sp] = es;
403   }
404   else
405     es->sig = sigNum;
406 
407   return es;
408 }
409 
410 /* Save current context to top cxtinfo struct */
411 static
exec_state_save(void)412 exec_state* exec_state_save(void)
413 {
414   exec_state* es = top_exec_state();
415 
416   es->cxt         = CLG_(current_state).cxt;
417   es->collect     = CLG_(current_state).collect;
418   es->jmps_passed = CLG_(current_state).jmps_passed;
419   es->bbcc        = CLG_(current_state).bbcc;
420   es->nonskipped  = CLG_(current_state).nonskipped;
421   CLG_ASSERT(es->cost == CLG_(current_state).cost);
422 
423   CLG_DEBUGIF(1) {
424     CLG_DEBUG(1, "  cxtinfo_save(sig %d): collect %s, jmps_passed %d\n",
425 	     es->sig, es->collect ? "Yes": "No", es->jmps_passed);
426     CLG_(print_bbcc)(-9, es->bbcc);
427     CLG_(print_cost)(-9, CLG_(sets).full, es->cost);
428   }
429 
430   /* signal number does not need to be saved */
431   CLG_ASSERT(CLG_(current_state).sig == es->sig);
432 
433   return es;
434 }
435 
436 static
exec_state_restore(void)437 exec_state* exec_state_restore(void)
438 {
439   exec_state* es = top_exec_state();
440 
441   CLG_(current_state).cxt     = es->cxt;
442   CLG_(current_state).collect = es->collect;
443   CLG_(current_state).jmps_passed = es->jmps_passed;
444   CLG_(current_state).bbcc    = es->bbcc;
445   CLG_(current_state).nonskipped = es->nonskipped;
446   CLG_(current_state).cost    = es->cost;
447   CLG_(current_state).sig     = es->sig;
448 
449   CLG_DEBUGIF(1) {
450 	CLG_DEBUG(1, "  exec_state_restore(sig %d): collect %s, jmps_passed %d\n",
451 		  es->sig, es->collect ? "Yes": "No", es->jmps_passed);
452 	CLG_(print_bbcc)(-9, es->bbcc);
453 	CLG_(print_cxt)(-9, es->cxt, 0);
454 	CLG_(print_cost)(-9, CLG_(sets).full, es->cost);
455   }
456 
457   return es;
458 }
459