• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-basic-offset: 3; -*- */
2 /*
3   This file is part of drd, a thread error detector.
4 
5   Copyright (C) 2006-2010 Bart Van Assche <bvanassche@acm.org>.
6 
7   This program is free software; you can redistribute it and/or
8   modify it under the terms of the GNU General Public License as
9   published by the Free Software Foundation; either version 2 of the
10   License, or (at your option) any later version.
11 
12   This program is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU General Public License
18   along with this program; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20   02111-1307, USA.
21 
22   The GNU General Public License is contained in the file COPYING.
23 */
24 
25 
26 #include "drd_bitmap.h"
27 #include "drd_thread_bitmap.h"
28 #include "drd_vc.h"            /* DRD_(vc_snprint)() */
29 
30 /* Include several source files here in order to allow the compiler to */
31 /* do more inlining.                                                   */
32 #include "drd_bitmap.c"
33 #include "drd_load_store.h"
34 #include "drd_segment.c"
35 #include "drd_thread.c"
36 #include "drd_vc.c"
37 #include "libvex_guest_offsets.h"
38 
39 
40 /* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */
41 #if defined(VGA_x86)
42 #define STACK_POINTER_OFFSET OFFSET_x86_ESP
43 #elif defined(VGA_amd64)
44 #define STACK_POINTER_OFFSET OFFSET_amd64_RSP
45 #elif defined(VGA_ppc32)
46 #define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1
47 #elif defined(VGA_ppc64)
48 #define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1
49 #elif defined(VGA_arm)
50 #define STACK_POINTER_OFFSET OFFSET_arm_R13
51 #else
52 #error Unknown architecture.
53 #endif
54 
55 
56 /* Local variables. */
57 
58 static Bool s_check_stack_accesses = False;
59 static Bool s_first_race_only      = False;
60 
61 
62 /* Function definitions. */
63 
DRD_(get_check_stack_accesses)64 Bool DRD_(get_check_stack_accesses)()
65 {
66    return s_check_stack_accesses;
67 }
68 
DRD_(set_check_stack_accesses)69 void DRD_(set_check_stack_accesses)(const Bool c)
70 {
71    tl_assert(c == False || c == True);
72    s_check_stack_accesses = c;
73 }
74 
DRD_(get_first_race_only)75 Bool DRD_(get_first_race_only)()
76 {
77    return s_first_race_only;
78 }
79 
DRD_(set_first_race_only)80 void DRD_(set_first_race_only)(const Bool fro)
81 {
82    tl_assert(fro == False || fro == True);
83    s_first_race_only = fro;
84 }
85 
DRD_(trace_mem_access)86 void DRD_(trace_mem_access)(const Addr addr, const SizeT size,
87                             const BmAccessTypeT access_type)
88 {
89    if (DRD_(is_any_traced)(addr, addr + size))
90    {
91       char* vc;
92 
93       vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)()));
94       VG_(message)(Vg_UserMsg,
95                    "%s 0x%lx size %ld (thread %d / vc %s)\n",
96                    access_type == eLoad
97                    ? "load "
98                    : access_type == eStore
99                    ? "store"
100                    : access_type == eStart
101                    ? "start"
102                    : access_type == eEnd
103                    ? "end  "
104                    : "????",
105                    addr,
106                    size,
107                    DRD_(thread_get_running_tid)(),
108                    vc);
109       VG_(free)(vc);
110       VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
111                                  VG_(clo_backtrace_size));
112       tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)())
113                 == VG_(get_running_tid)());
114    }
115 }
116 
drd_trace_mem_load(const Addr addr,const SizeT size)117 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size)
118 {
119    return DRD_(trace_mem_access)(addr, size, eLoad);
120 }
121 
drd_trace_mem_store(const Addr addr,const SizeT size)122 static VG_REGPARM(2) void drd_trace_mem_store(const Addr addr,const SizeT size)
123 {
124    return DRD_(trace_mem_access)(addr, size, eStore);
125 }
126 
drd_report_race(const Addr addr,const SizeT size,const BmAccessTypeT access_type)127 static void drd_report_race(const Addr addr, const SizeT size,
128                             const BmAccessTypeT access_type)
129 {
130    DataRaceErrInfo drei;
131 
132    drei.tid  = DRD_(thread_get_running_tid)();
133    drei.addr = addr;
134    drei.size = size;
135    drei.access_type = access_type;
136    VG_(maybe_record_error)(VG_(get_running_tid)(),
137                            DataRaceErr,
138                            VG_(get_IP)(VG_(get_running_tid)()),
139                            "Conflicting accesses",
140                            &drei);
141 
142    if (s_first_race_only)
143    {
144       DRD_(start_suppression)(addr, addr + size, "first race only");
145    }
146 }
147 
DRD_(trace_load)148 VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size)
149 {
150 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
151    /* The assert below has been commented out because of performance reasons.*/
152    tl_assert(DRD_(thread_get_running_tid)()
153              == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid())));
154 #endif
155 
156    if (DRD_(running_thread_is_recording_loads)()
157        && (s_check_stack_accesses
158            || ! DRD_(thread_address_on_stack)(addr))
159        && bm_access_load_triggers_conflict(addr, addr + size)
160        && ! DRD_(is_suppressed)(addr, addr + size))
161    {
162       drd_report_race(addr, size, eLoad);
163    }
164 }
165 
drd_trace_load_1(Addr addr)166 static VG_REGPARM(1) void drd_trace_load_1(Addr addr)
167 {
168    if (DRD_(running_thread_is_recording_loads)()
169        && (s_check_stack_accesses
170            || ! DRD_(thread_address_on_stack)(addr))
171        && bm_access_load_1_triggers_conflict(addr)
172        && ! DRD_(is_suppressed)(addr, addr + 1))
173    {
174       drd_report_race(addr, 1, eLoad);
175    }
176 }
177 
drd_trace_load_2(Addr addr)178 static VG_REGPARM(1) void drd_trace_load_2(Addr addr)
179 {
180    if (DRD_(running_thread_is_recording_loads)()
181        && (s_check_stack_accesses
182            || ! DRD_(thread_address_on_stack)(addr))
183        && bm_access_load_2_triggers_conflict(addr)
184        && ! DRD_(is_suppressed)(addr, addr + 2))
185    {
186       drd_report_race(addr, 2, eLoad);
187    }
188 }
189 
drd_trace_load_4(Addr addr)190 static VG_REGPARM(1) void drd_trace_load_4(Addr addr)
191 {
192    if (DRD_(running_thread_is_recording_loads)()
193        && (s_check_stack_accesses
194            || ! DRD_(thread_address_on_stack)(addr))
195        && bm_access_load_4_triggers_conflict(addr)
196        && ! DRD_(is_suppressed)(addr, addr + 4))
197    {
198       drd_report_race(addr, 4, eLoad);
199    }
200 }
201 
drd_trace_load_8(Addr addr)202 static VG_REGPARM(1) void drd_trace_load_8(Addr addr)
203 {
204    if (DRD_(running_thread_is_recording_loads)()
205        && (s_check_stack_accesses
206            || ! DRD_(thread_address_on_stack)(addr))
207        && bm_access_load_8_triggers_conflict(addr)
208        && ! DRD_(is_suppressed)(addr, addr + 8))
209    {
210       drd_report_race(addr, 8, eLoad);
211    }
212 }
213 
DRD_(trace_store)214 VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size)
215 {
216 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
217    /* The assert below has been commented out because of performance reasons.*/
218    tl_assert(DRD_(thread_get_running_tid)()
219              == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid())));
220 #endif
221 
222    if (DRD_(running_thread_is_recording_stores)()
223        && (s_check_stack_accesses
224            || ! DRD_(thread_address_on_stack)(addr))
225        && bm_access_store_triggers_conflict(addr, addr + size)
226        && ! DRD_(is_suppressed)(addr, addr + size))
227    {
228       drd_report_race(addr, size, eStore);
229    }
230 }
231 
drd_trace_store_1(Addr addr)232 static VG_REGPARM(1) void drd_trace_store_1(Addr addr)
233 {
234    if (DRD_(running_thread_is_recording_stores)()
235        && (s_check_stack_accesses
236            || ! DRD_(thread_address_on_stack)(addr))
237        && bm_access_store_1_triggers_conflict(addr)
238        && ! DRD_(is_suppressed)(addr, addr + 1))
239    {
240       drd_report_race(addr, 1, eStore);
241    }
242 }
243 
drd_trace_store_2(Addr addr)244 static VG_REGPARM(1) void drd_trace_store_2(Addr addr)
245 {
246    if (DRD_(running_thread_is_recording_stores)()
247        && (s_check_stack_accesses
248            || ! DRD_(thread_address_on_stack)(addr))
249        && bm_access_store_2_triggers_conflict(addr)
250        && ! DRD_(is_suppressed)(addr, addr + 2))
251    {
252       drd_report_race(addr, 2, eStore);
253    }
254 }
255 
drd_trace_store_4(Addr addr)256 static VG_REGPARM(1) void drd_trace_store_4(Addr addr)
257 {
258    if (DRD_(running_thread_is_recording_stores)()
259        && (s_check_stack_accesses
260            || ! DRD_(thread_address_on_stack)(addr))
261        && bm_access_store_4_triggers_conflict(addr)
262        && ! DRD_(is_suppressed)(addr, addr + 4))
263    {
264       drd_report_race(addr, 4, eStore);
265    }
266 }
267 
drd_trace_store_8(Addr addr)268 static VG_REGPARM(1) void drd_trace_store_8(Addr addr)
269 {
270    if (DRD_(running_thread_is_recording_stores)()
271        && (s_check_stack_accesses
272            || ! DRD_(thread_address_on_stack)(addr))
273        && bm_access_store_8_triggers_conflict(addr)
274        && ! DRD_(is_suppressed)(addr, addr + 8))
275    {
276       drd_report_race(addr, 8, eStore);
277    }
278 }
279 
280 /**
281  * Return true if and only if addr_expr matches the pattern (SP) or
282  * <offset>(SP).
283  */
is_stack_access(IRSB * const bb,IRExpr * const addr_expr)284 static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr)
285 {
286    Bool result = False;
287 
288    if (addr_expr->tag == Iex_RdTmp)
289    {
290       int i;
291       for (i = 0; i < bb->stmts_size; i++)
292       {
293          if (bb->stmts[i]
294              && bb->stmts[i]->tag == Ist_WrTmp
295              && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp)
296          {
297             IRExpr* e = bb->stmts[i]->Ist.WrTmp.data;
298             if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET)
299             {
300                result = True;
301             }
302 
303             //ppIRExpr(e);
304             //VG_(printf)(" (%s)\n", result ? "True" : "False");
305             break;
306          }
307       }
308    }
309    return result;
310 }
311 
instrument_load(IRSB * const bb,IRExpr * const addr_expr,const HWord size)312 static void instrument_load(IRSB* const bb,
313                             IRExpr* const addr_expr,
314                             const HWord size)
315 {
316    IRExpr* size_expr;
317    IRExpr** argv;
318    IRDirty* di;
319 
320    if (UNLIKELY(DRD_(any_address_is_traced)()))
321    {
322       addStmtToIRSB(bb,
323          IRStmt_Dirty(
324             unsafeIRDirty_0_N(/*regparms*/2,
325                               "drd_trace_load",
326                               VG_(fnptr_to_fnentry)
327                               (drd_trace_mem_load),
328                               mkIRExprVec_2(addr_expr,
329                                             mkIRExpr_HWord(size)))));
330    }
331 
332    if (! s_check_stack_accesses && is_stack_access(bb, addr_expr))
333       return;
334 
335    switch (size)
336    {
337    case 1:
338       argv = mkIRExprVec_1(addr_expr);
339       di = unsafeIRDirty_0_N(/*regparms*/1,
340                              "drd_trace_load_1",
341                              VG_(fnptr_to_fnentry)(drd_trace_load_1),
342                              argv);
343       break;
344    case 2:
345       argv = mkIRExprVec_1(addr_expr);
346       di = unsafeIRDirty_0_N(/*regparms*/1,
347                              "drd_trace_load_2",
348                              VG_(fnptr_to_fnentry)(drd_trace_load_2),
349                              argv);
350       break;
351    case 4:
352       argv = mkIRExprVec_1(addr_expr);
353       di = unsafeIRDirty_0_N(/*regparms*/1,
354                              "drd_trace_load_4",
355                              VG_(fnptr_to_fnentry)(drd_trace_load_4),
356                              argv);
357       break;
358    case 8:
359       argv = mkIRExprVec_1(addr_expr);
360       di = unsafeIRDirty_0_N(/*regparms*/1,
361                              "drd_trace_load_8",
362                              VG_(fnptr_to_fnentry)(drd_trace_load_8),
363                              argv);
364       break;
365    default:
366       size_expr = mkIRExpr_HWord(size);
367       argv = mkIRExprVec_2(addr_expr, size_expr);
368       di = unsafeIRDirty_0_N(/*regparms*/2,
369                              "drd_trace_load",
370                              VG_(fnptr_to_fnentry)(DRD_(trace_load)),
371                              argv);
372       break;
373    }
374    addStmtToIRSB(bb, IRStmt_Dirty(di));
375 }
376 
instrument_store(IRSB * const bb,IRExpr * const addr_expr,const HWord size)377 static void instrument_store(IRSB* const bb,
378                              IRExpr* const addr_expr,
379                              const HWord size)
380 {
381    IRExpr* size_expr;
382    IRExpr** argv;
383    IRDirty* di;
384 
385    if (UNLIKELY(DRD_(any_address_is_traced)()))
386    {
387       addStmtToIRSB(bb,
388                     IRStmt_Dirty(
389                                  unsafeIRDirty_0_N(/*regparms*/2,
390                                                    "drd_trace_store",
391                                                    VG_(fnptr_to_fnentry)
392                                                    (drd_trace_mem_store),
393                                                    mkIRExprVec_2(addr_expr,
394                                                                  mkIRExpr_HWord(size)))));
395    }
396 
397    if (! s_check_stack_accesses && is_stack_access(bb, addr_expr))
398       return;
399 
400    switch (size)
401    {
402    case 1:
403       argv = mkIRExprVec_1(addr_expr);
404       di = unsafeIRDirty_0_N(/*regparms*/1,
405                              "drd_trace_store_1",
406                              VG_(fnptr_to_fnentry)(drd_trace_store_1),
407                              argv);
408       break;
409    case 2:
410       argv = mkIRExprVec_1(addr_expr);
411       di = unsafeIRDirty_0_N(/*regparms*/1,
412                              "drd_trace_store_2",
413                              VG_(fnptr_to_fnentry)(drd_trace_store_2),
414                              argv);
415       break;
416    case 4:
417       argv = mkIRExprVec_1(addr_expr);
418       di = unsafeIRDirty_0_N(/*regparms*/1,
419                              "drd_trace_store_4",
420                              VG_(fnptr_to_fnentry)(drd_trace_store_4),
421                              argv);
422       break;
423    case 8:
424       argv = mkIRExprVec_1(addr_expr);
425       di = unsafeIRDirty_0_N(/*regparms*/1,
426                              "drd_trace_store_8",
427                              VG_(fnptr_to_fnentry)(drd_trace_store_8),
428                              argv);
429       break;
430    default:
431       size_expr = mkIRExpr_HWord(size);
432       argv = mkIRExprVec_2(addr_expr, size_expr);
433       di = unsafeIRDirty_0_N(/*regparms*/2,
434                              "drd_trace_store",
435                              VG_(fnptr_to_fnentry)(DRD_(trace_store)),
436                              argv);
437       break;
438    }
439    addStmtToIRSB(bb, IRStmt_Dirty(di));
440 }
441 
DRD_(instrument)442 IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
443                        IRSB* const bb_in,
444                        VexGuestLayout* const layout,
445                        VexGuestExtents* const vge,
446                        IRType const gWordTy,
447                        IRType const hWordTy)
448 {
449    IRDirty* di;
450    Int      i;
451    IRSB*    bb;
452    IRExpr** argv;
453    Bool     instrument = True;
454 
455    /* Set up BB */
456    bb           = emptyIRSB();
457    bb->tyenv    = deepCopyIRTypeEnv(bb_in->tyenv);
458    bb->next     = deepCopyIRExpr(bb_in->next);
459    bb->jumpkind = bb_in->jumpkind;
460 
461    for (i = 0; i < bb_in->stmts_used; i++)
462    {
463       IRStmt* const st = bb_in->stmts[i];
464       tl_assert(st);
465       tl_assert(isFlatIRStmt(st));
466 
467       switch (st->tag)
468       {
469          /* Note: the code for not instrumenting the code in .plt          */
470          /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21    */
471          /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4).             */
472          /* This is because on this platform dynamic library symbols are   */
473          /* relocated in another way than by later binutils versions. The  */
474          /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
475       case Ist_IMark:
476          instrument = VG_(DebugInfo_sect_kind)(NULL, 0, st->Ist.IMark.addr)
477             != Vg_SectPLT;
478          addStmtToIRSB(bb, st);
479          break;
480 
481       case Ist_MBE:
482          switch (st->Ist.MBE.event)
483          {
484          case Imbe_Fence:
485             break; /* not interesting */
486          default:
487             tl_assert(0);
488          }
489          addStmtToIRSB(bb, st);
490          break;
491 
492       case Ist_Store:
493          if (instrument)
494          {
495             instrument_store(bb,
496                              st->Ist.Store.addr,
497                              sizeofIRType(typeOfIRExpr(bb->tyenv,
498                                                        st->Ist.Store.data)));
499          }
500          addStmtToIRSB(bb, st);
501          break;
502 
503       case Ist_WrTmp:
504          if (instrument)
505          {
506             const IRExpr* const data = st->Ist.WrTmp.data;
507             if (data->tag == Iex_Load)
508             {
509                instrument_load(bb,
510                                data->Iex.Load.addr,
511                                sizeofIRType(data->Iex.Load.ty));
512             }
513          }
514          addStmtToIRSB(bb, st);
515          break;
516 
517       case Ist_Dirty:
518          if (instrument)
519          {
520             IRDirty* d = st->Ist.Dirty.details;
521             IREffect const mFx = d->mFx;
522             switch (mFx) {
523             case Ifx_None:
524                break;
525             case Ifx_Read:
526             case Ifx_Write:
527             case Ifx_Modify:
528                tl_assert(d->mAddr);
529                tl_assert(d->mSize > 0);
530                argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
531                if (mFx == Ifx_Read || mFx == Ifx_Modify) {
532                   di = unsafeIRDirty_0_N(
533                           /*regparms*/2,
534                           "drd_trace_load",
535                           VG_(fnptr_to_fnentry)(DRD_(trace_load)),
536                           argv);
537                   addStmtToIRSB(bb, IRStmt_Dirty(di));
538                }
539                if (mFx == Ifx_Write || mFx == Ifx_Modify)
540                {
541                   di = unsafeIRDirty_0_N(
542                           /*regparms*/2,
543                           "drd_trace_store",
544                           VG_(fnptr_to_fnentry)(DRD_(trace_store)),
545                           argv);
546                   addStmtToIRSB(bb, IRStmt_Dirty(di));
547                }
548                break;
549             default:
550                tl_assert(0);
551             }
552          }
553          addStmtToIRSB(bb, st);
554          break;
555 
556       case Ist_CAS:
557          if (instrument)
558          {
559             /*
560              * Treat compare-and-swap as a read. By handling atomic
561              * instructions as read instructions no data races are reported
562              * between conflicting atomic operations nor between atomic
563              * operations and non-atomic reads. Conflicts between atomic
564              * operations and non-atomic write operations are still reported
565              * however.
566              */
567             Int    dataSize;
568             IRCAS* cas = st->Ist.CAS.details;
569             tl_assert(cas->addr != NULL);
570             tl_assert(cas->dataLo != NULL);
571             dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo));
572             if (cas->dataHi != NULL)
573                dataSize *= 2; /* since it's a doubleword-CAS */
574             instrument_load(bb, cas->addr, dataSize);
575          }
576          addStmtToIRSB(bb, st);
577          break;
578 
579       case Ist_LLSC: {
580          /* Ignore store-conditionals, and handle load-linked's
581             exactly like normal loads. */
582          IRType dataTy;
583          if (st->Ist.LLSC.storedata == NULL)
584          {
585             /* LL */
586             dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result);
587             if (instrument) {
588                instrument_load(bb,
589                                st->Ist.LLSC.addr,
590                                sizeofIRType(dataTy));
591             }
592          }
593          else
594          {
595             /* SC */
596             /*ignore */
597          }
598          addStmtToIRSB(bb, st);
599          break;
600       }
601 
602       case Ist_NoOp:
603       case Ist_AbiHint:
604       case Ist_Put:
605       case Ist_PutI:
606       case Ist_Exit:
607          /* None of these can contain any memory references. */
608          addStmtToIRSB(bb, st);
609          break;
610 
611       default:
612          ppIRStmt(st);
613          tl_assert(0);
614       }
615    }
616 
617    return bb;
618 }
619 
620