1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_bitmap.h"
26 #include "drd_thread_bitmap.h"
27 #include "drd_vc.h" /* DRD_(vc_snprint)() */
28
29 /* Include several source files here in order to allow the compiler to */
30 /* do more inlining. */
31 #include "drd_bitmap.c"
32 #include "drd_load_store.h"
33 #include "drd_segment.c"
34 #include "drd_thread.c"
35 #include "drd_vc.c"
36 #include "libvex_guest_offsets.h"
37
38
39 /* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */
40 #if defined(VGA_x86)
41 #define STACK_POINTER_OFFSET OFFSET_x86_ESP
42 #elif defined(VGA_amd64)
43 #define STACK_POINTER_OFFSET OFFSET_amd64_RSP
44 #elif defined(VGA_ppc32)
45 #define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1
46 #elif defined(VGA_ppc64)
47 #define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1
48 #elif defined(VGA_arm)
49 #define STACK_POINTER_OFFSET OFFSET_arm_R13
50 #elif defined(VGA_arm64)
51 #define STACK_POINTER_OFFSET OFFSET_arm64_XSP
52 #elif defined(VGA_s390x)
53 #define STACK_POINTER_OFFSET OFFSET_s390x_r15
54 #elif defined(VGA_mips32)
55 #define STACK_POINTER_OFFSET OFFSET_mips32_r29
56 #elif defined(VGA_mips64)
57 #define STACK_POINTER_OFFSET OFFSET_mips64_r29
58 #else
59 #error Unknown architecture.
60 #endif
61
62
63 /* Local variables. */
64
65 static Bool s_check_stack_accesses = False;
66 static Bool s_first_race_only = False;
67
68
69 /* Function definitions. */
70
DRD_(get_check_stack_accesses)71 Bool DRD_(get_check_stack_accesses)()
72 {
73 return s_check_stack_accesses;
74 }
75
DRD_(set_check_stack_accesses)76 void DRD_(set_check_stack_accesses)(const Bool c)
77 {
78 tl_assert(c == False || c == True);
79 s_check_stack_accesses = c;
80 }
81
DRD_(get_first_race_only)82 Bool DRD_(get_first_race_only)()
83 {
84 return s_first_race_only;
85 }
86
DRD_(set_first_race_only)87 void DRD_(set_first_race_only)(const Bool fro)
88 {
89 tl_assert(fro == False || fro == True);
90 s_first_race_only = fro;
91 }
92
DRD_(trace_mem_access)93 void DRD_(trace_mem_access)(const Addr addr, const SizeT size,
94 const BmAccessTypeT access_type,
95 const HWord stored_value_hi,
96 const HWord stored_value_lo)
97 {
98 if (DRD_(is_any_traced)(addr, addr + size))
99 {
100 HChar* vc;
101
102 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)()));
103 if (access_type == eStore && size <= sizeof(HWord)) {
104 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %ld/0x%lx (thread %d /"
105 " vc %s)", addr, size, stored_value_lo,
106 stored_value_lo, DRD_(thread_get_running_tid)(),
107 vc);
108 } else if (access_type == eStore && size > sizeof(HWord)) {
109 ULong sv;
110
111 tl_assert(sizeof(HWord) == 4);
112 sv = ((ULong)stored_value_hi << 32) | stored_value_lo;
113 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %lld/0x%llx (thread %d"
114 " / vc %s)", addr, size, sv, sv,
115 DRD_(thread_get_running_tid)(), vc);
116 } else {
117 DRD_(trace_msg_w_bt)("%s 0x%lx size %ld (thread %d / vc %s)",
118 access_type == eLoad ? "load "
119 : access_type == eStore ? "store"
120 : access_type == eStart ? "start"
121 : access_type == eEnd ? "end " : "????",
122 addr, size, DRD_(thread_get_running_tid)(), vc);
123 }
124 VG_(free)(vc);
125 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)())
126 == VG_(get_running_tid)());
127 }
128 }
129
drd_trace_mem_load(const Addr addr,const SizeT size)130 static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size)
131 {
132 return DRD_(trace_mem_access)(addr, size, eLoad, 0, 0);
133 }
134
drd_trace_mem_store(const Addr addr,const SizeT size,const HWord stored_value_hi,const HWord stored_value_lo)135 static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size,
136 const HWord stored_value_hi,
137 const HWord stored_value_lo)
138 {
139 return DRD_(trace_mem_access)(addr, size, eStore, stored_value_hi,
140 stored_value_lo);
141 }
142
drd_report_race(const Addr addr,const SizeT size,const BmAccessTypeT access_type)143 static void drd_report_race(const Addr addr, const SizeT size,
144 const BmAccessTypeT access_type)
145 {
146 ThreadId vg_tid;
147
148 vg_tid = VG_(get_running_tid)();
149 if (!DRD_(get_check_stack_accesses)()
150 && DRD_(thread_address_on_any_stack)(addr)) {
151 #if 0
152 GenericErrInfo GEI = {
153 .tid = DRD_(thread_get_running_tid)(),
154 .addr = addr,
155 };
156 VG_(maybe_record_error)(vg_tid, GenericErr, VG_(get_IP)(vg_tid),
157 "--check-stack-var=no skips checking stack"
158 " variables shared over threads",
159 &GEI);
160 #endif
161 } else {
162 DataRaceErrInfo drei = {
163 .tid = DRD_(thread_get_running_tid)(),
164 .addr = addr,
165 .size = size,
166 .access_type = access_type,
167 };
168 VG_(maybe_record_error)(vg_tid, DataRaceErr, VG_(get_IP)(vg_tid),
169 "Conflicting access", &drei);
170
171 if (s_first_race_only)
172 DRD_(start_suppression)(addr, addr + size, "first race only");
173 }
174 }
175
DRD_(trace_load)176 VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size)
177 {
178 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
179 /* The assert below has been commented out because of performance reasons.*/
180 tl_assert(DRD_(thread_get_running_tid)()
181 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid())));
182 #endif
183
184 if (DRD_(running_thread_is_recording_loads)()
185 && (s_check_stack_accesses
186 || ! DRD_(thread_address_on_stack)(addr))
187 && bm_access_load_triggers_conflict(addr, addr + size)
188 && ! DRD_(is_suppressed)(addr, addr + size))
189 {
190 drd_report_race(addr, size, eLoad);
191 }
192 }
193
drd_trace_load_1(Addr addr)194 static VG_REGPARM(1) void drd_trace_load_1(Addr addr)
195 {
196 if (DRD_(running_thread_is_recording_loads)()
197 && (s_check_stack_accesses
198 || ! DRD_(thread_address_on_stack)(addr))
199 && bm_access_load_1_triggers_conflict(addr)
200 && ! DRD_(is_suppressed)(addr, addr + 1))
201 {
202 drd_report_race(addr, 1, eLoad);
203 }
204 }
205
drd_trace_load_2(Addr addr)206 static VG_REGPARM(1) void drd_trace_load_2(Addr addr)
207 {
208 if (DRD_(running_thread_is_recording_loads)()
209 && (s_check_stack_accesses
210 || ! DRD_(thread_address_on_stack)(addr))
211 && bm_access_load_2_triggers_conflict(addr)
212 && ! DRD_(is_suppressed)(addr, addr + 2))
213 {
214 drd_report_race(addr, 2, eLoad);
215 }
216 }
217
drd_trace_load_4(Addr addr)218 static VG_REGPARM(1) void drd_trace_load_4(Addr addr)
219 {
220 if (DRD_(running_thread_is_recording_loads)()
221 && (s_check_stack_accesses
222 || ! DRD_(thread_address_on_stack)(addr))
223 && bm_access_load_4_triggers_conflict(addr)
224 && ! DRD_(is_suppressed)(addr, addr + 4))
225 {
226 drd_report_race(addr, 4, eLoad);
227 }
228 }
229
drd_trace_load_8(Addr addr)230 static VG_REGPARM(1) void drd_trace_load_8(Addr addr)
231 {
232 if (DRD_(running_thread_is_recording_loads)()
233 && (s_check_stack_accesses
234 || ! DRD_(thread_address_on_stack)(addr))
235 && bm_access_load_8_triggers_conflict(addr)
236 && ! DRD_(is_suppressed)(addr, addr + 8))
237 {
238 drd_report_race(addr, 8, eLoad);
239 }
240 }
241
DRD_(trace_store)242 VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size)
243 {
244 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
245 /* The assert below has been commented out because of performance reasons.*/
246 tl_assert(DRD_(thread_get_running_tid)()
247 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid())));
248 #endif
249
250 if (DRD_(running_thread_is_recording_stores)()
251 && (s_check_stack_accesses
252 || ! DRD_(thread_address_on_stack)(addr))
253 && bm_access_store_triggers_conflict(addr, addr + size)
254 && ! DRD_(is_suppressed)(addr, addr + size))
255 {
256 drd_report_race(addr, size, eStore);
257 }
258 }
259
drd_trace_store_1(Addr addr)260 static VG_REGPARM(1) void drd_trace_store_1(Addr addr)
261 {
262 if (DRD_(running_thread_is_recording_stores)()
263 && (s_check_stack_accesses
264 || ! DRD_(thread_address_on_stack)(addr))
265 && bm_access_store_1_triggers_conflict(addr)
266 && ! DRD_(is_suppressed)(addr, addr + 1))
267 {
268 drd_report_race(addr, 1, eStore);
269 }
270 }
271
drd_trace_store_2(Addr addr)272 static VG_REGPARM(1) void drd_trace_store_2(Addr addr)
273 {
274 if (DRD_(running_thread_is_recording_stores)()
275 && (s_check_stack_accesses
276 || ! DRD_(thread_address_on_stack)(addr))
277 && bm_access_store_2_triggers_conflict(addr)
278 && ! DRD_(is_suppressed)(addr, addr + 2))
279 {
280 drd_report_race(addr, 2, eStore);
281 }
282 }
283
drd_trace_store_4(Addr addr)284 static VG_REGPARM(1) void drd_trace_store_4(Addr addr)
285 {
286 if (DRD_(running_thread_is_recording_stores)()
287 && (s_check_stack_accesses
288 || !DRD_(thread_address_on_stack)(addr))
289 && bm_access_store_4_triggers_conflict(addr)
290 && !DRD_(is_suppressed)(addr, addr + 4))
291 {
292 drd_report_race(addr, 4, eStore);
293 }
294 }
295
drd_trace_store_8(Addr addr)296 static VG_REGPARM(1) void drd_trace_store_8(Addr addr)
297 {
298 if (DRD_(running_thread_is_recording_stores)()
299 && (s_check_stack_accesses
300 || ! DRD_(thread_address_on_stack)(addr))
301 && bm_access_store_8_triggers_conflict(addr)
302 && ! DRD_(is_suppressed)(addr, addr + 8))
303 {
304 drd_report_race(addr, 8, eStore);
305 }
306 }
307
308 /**
309 * Return true if and only if addr_expr matches the pattern (SP) or
310 * <offset>(SP).
311 */
is_stack_access(IRSB * const bb,IRExpr * const addr_expr)312 static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr)
313 {
314 Bool result = False;
315
316 if (addr_expr->tag == Iex_RdTmp)
317 {
318 int i;
319 for (i = 0; i < bb->stmts_used; i++)
320 {
321 if (bb->stmts[i]
322 && bb->stmts[i]->tag == Ist_WrTmp
323 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp)
324 {
325 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data;
326 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET)
327 {
328 result = True;
329 }
330
331 //ppIRExpr(e);
332 //VG_(printf)(" (%s)\n", result ? "True" : "False");
333 break;
334 }
335 }
336 }
337 return result;
338 }
339
340 static const IROp u_widen_irop[5][9] = {
341 [Ity_I1 - Ity_I1] = { [4] = Iop_1Uto32, [8] = Iop_1Uto64 },
342 [Ity_I8 - Ity_I1] = { [4] = Iop_8Uto32, [8] = Iop_8Uto64 },
343 [Ity_I16 - Ity_I1] = { [4] = Iop_16Uto32, [8] = Iop_16Uto64 },
344 [Ity_I32 - Ity_I1] = { [8] = Iop_32Uto64 },
345 };
346
347 /**
348 * Instrument the client code to trace a memory load (--trace-addr).
349 */
instr_trace_mem_load(IRSB * const bb,IRExpr * addr_expr,const HWord size,IRExpr * const guard)350 static IRExpr* instr_trace_mem_load(IRSB* const bb, IRExpr* addr_expr,
351 const HWord size,
352 IRExpr* const guard/* NULL => True */)
353 {
354 IRTemp tmp;
355
356 tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr));
357 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr));
358 addr_expr = IRExpr_RdTmp(tmp);
359 IRDirty* di
360 = unsafeIRDirty_0_N(/*regparms*/2,
361 "drd_trace_mem_load",
362 VG_(fnptr_to_fnentry)
363 (drd_trace_mem_load),
364 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size)));
365 if (guard) di->guard = guard;
366 addStmtToIRSB(bb, IRStmt_Dirty(di));
367
368 return addr_expr;
369 }
370
371 /**
372 * Instrument the client code to trace a memory store (--trace-addr).
373 */
instr_trace_mem_store(IRSB * const bb,IRExpr * const addr_expr,IRExpr * data_expr_hi,IRExpr * data_expr_lo,IRExpr * const guard)374 static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr,
375 IRExpr* data_expr_hi, IRExpr* data_expr_lo,
376 IRExpr* const guard/* NULL => True */)
377 {
378 IRType ty_data_expr;
379 HWord size;
380
381 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8);
382 tl_assert(!data_expr_hi || typeOfIRExpr(bb->tyenv, data_expr_hi) == Ity_I32);
383
384 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr_lo);
385 size = sizeofIRType(ty_data_expr);
386
387 #if 0
388 // Test code
389 if (ty_data_expr == Ity_I32) {
390 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F32);
391 data_expr_lo = IRExpr_Unop(Iop_ReinterpI32asF32, data_expr_lo);
392 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo));
393 data_expr_lo = IRExpr_RdTmp(tmp);
394 ty_data_expr = Ity_F32;
395 } else if (ty_data_expr == Ity_I64) {
396 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F64);
397 data_expr_lo = IRExpr_Unop(Iop_ReinterpI64asF64, data_expr_lo);
398 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo));
399 data_expr_lo = IRExpr_RdTmp(tmp);
400 ty_data_expr = Ity_F64;
401 }
402 #endif
403
404 if (ty_data_expr == Ity_F32) {
405 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I32);
406 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF32asI32,
407 data_expr_lo)));
408 data_expr_lo = IRExpr_RdTmp(tmp);
409 ty_data_expr = Ity_I32;
410 } else if (ty_data_expr == Ity_F64) {
411 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I64);
412 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF64asI64,
413 data_expr_lo)));
414 data_expr_lo = IRExpr_RdTmp(tmp);
415 ty_data_expr = Ity_I64;
416 }
417
418 if (size == sizeof(HWord)
419 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64))
420 {
421 /* No conversion necessary */
422 } else {
423 IROp widen_op;
424
425 if (Ity_I1 <= ty_data_expr
426 && ty_data_expr
427 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0]))
428 {
429 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)];
430 if (!widen_op)
431 widen_op = Iop_INVALID;
432 } else {
433 widen_op = Iop_INVALID;
434 }
435 if (widen_op != Iop_INVALID) {
436 IRTemp tmp;
437
438 /* Widen the integer expression to a HWord */
439 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64);
440 addStmtToIRSB(bb,
441 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr_lo)));
442 data_expr_lo = IRExpr_RdTmp(tmp);
443 } else if (size > sizeof(HWord) && !data_expr_hi
444 && ty_data_expr == Ity_I64) {
445 IRTemp tmp;
446
447 tl_assert(sizeof(HWord) == 4);
448 tl_assert(size == 8);
449 tmp = newIRTemp(bb->tyenv, Ity_I32);
450 addStmtToIRSB(bb,
451 IRStmt_WrTmp(tmp,
452 IRExpr_Unop(Iop_64HIto32, data_expr_lo)));
453 data_expr_hi = IRExpr_RdTmp(tmp);
454 tmp = newIRTemp(bb->tyenv, Ity_I32);
455 addStmtToIRSB(bb, IRStmt_WrTmp(tmp,
456 IRExpr_Unop(Iop_64to32, data_expr_lo)));
457 data_expr_lo = IRExpr_RdTmp(tmp);
458 } else {
459 data_expr_lo = mkIRExpr_HWord(0);
460 }
461 }
462 IRDirty* di
463 = unsafeIRDirty_0_N(/*regparms*/3,
464 "drd_trace_mem_store",
465 VG_(fnptr_to_fnentry)(drd_trace_mem_store),
466 mkIRExprVec_4(addr_expr, mkIRExpr_HWord(size),
467 data_expr_hi ? data_expr_hi
468 : mkIRExpr_HWord(0), data_expr_lo));
469 if (guard) di->guard = guard;
470 addStmtToIRSB(bb, IRStmt_Dirty(di) );
471 }
472
instrument_load(IRSB * const bb,IRExpr * const addr_expr,const HWord size,IRExpr * const guard)473 static void instrument_load(IRSB* const bb, IRExpr* const addr_expr,
474 const HWord size,
475 IRExpr* const guard/* NULL => True */)
476 {
477 IRExpr* size_expr;
478 IRExpr** argv;
479 IRDirty* di;
480
481 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr))
482 return;
483
484 switch (size)
485 {
486 case 1:
487 argv = mkIRExprVec_1(addr_expr);
488 di = unsafeIRDirty_0_N(/*regparms*/1,
489 "drd_trace_load_1",
490 VG_(fnptr_to_fnentry)(drd_trace_load_1),
491 argv);
492 break;
493 case 2:
494 argv = mkIRExprVec_1(addr_expr);
495 di = unsafeIRDirty_0_N(/*regparms*/1,
496 "drd_trace_load_2",
497 VG_(fnptr_to_fnentry)(drd_trace_load_2),
498 argv);
499 break;
500 case 4:
501 argv = mkIRExprVec_1(addr_expr);
502 di = unsafeIRDirty_0_N(/*regparms*/1,
503 "drd_trace_load_4",
504 VG_(fnptr_to_fnentry)(drd_trace_load_4),
505 argv);
506 break;
507 case 8:
508 argv = mkIRExprVec_1(addr_expr);
509 di = unsafeIRDirty_0_N(/*regparms*/1,
510 "drd_trace_load_8",
511 VG_(fnptr_to_fnentry)(drd_trace_load_8),
512 argv);
513 break;
514 default:
515 size_expr = mkIRExpr_HWord(size);
516 argv = mkIRExprVec_2(addr_expr, size_expr);
517 di = unsafeIRDirty_0_N(/*regparms*/2,
518 "drd_trace_load",
519 VG_(fnptr_to_fnentry)(DRD_(trace_load)),
520 argv);
521 break;
522 }
523 if (guard) di->guard = guard;
524 addStmtToIRSB(bb, IRStmt_Dirty(di));
525 }
526
instrument_store(IRSB * const bb,IRExpr * addr_expr,IRExpr * const data_expr,IRExpr * const guard_expr)527 static void instrument_store(IRSB* const bb, IRExpr* addr_expr,
528 IRExpr* const data_expr,
529 IRExpr* const guard_expr/* NULL => True */)
530 {
531 IRExpr* size_expr;
532 IRExpr** argv;
533 IRDirty* di;
534 HWord size;
535
536 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr));
537
538 if (UNLIKELY(DRD_(any_address_is_traced)())) {
539 IRTemp tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr));
540 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr));
541 addr_expr = IRExpr_RdTmp(tmp);
542 instr_trace_mem_store(bb, addr_expr, NULL, data_expr, guard_expr);
543 }
544
545 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr))
546 return;
547
548 switch (size)
549 {
550 case 1:
551 argv = mkIRExprVec_1(addr_expr);
552 di = unsafeIRDirty_0_N(/*regparms*/1,
553 "drd_trace_store_1",
554 VG_(fnptr_to_fnentry)(drd_trace_store_1),
555 argv);
556 break;
557 case 2:
558 argv = mkIRExprVec_1(addr_expr);
559 di = unsafeIRDirty_0_N(/*regparms*/1,
560 "drd_trace_store_2",
561 VG_(fnptr_to_fnentry)(drd_trace_store_2),
562 argv);
563 break;
564 case 4:
565 argv = mkIRExprVec_1(addr_expr);
566 di = unsafeIRDirty_0_N(/*regparms*/1,
567 "drd_trace_store_4",
568 VG_(fnptr_to_fnentry)(drd_trace_store_4),
569 argv);
570 break;
571 case 8:
572 argv = mkIRExprVec_1(addr_expr);
573 di = unsafeIRDirty_0_N(/*regparms*/1,
574 "drd_trace_store_8",
575 VG_(fnptr_to_fnentry)(drd_trace_store_8),
576 argv);
577 break;
578 default:
579 size_expr = mkIRExpr_HWord(size);
580 argv = mkIRExprVec_2(addr_expr, size_expr);
581 di = unsafeIRDirty_0_N(/*regparms*/2,
582 "drd_trace_store",
583 VG_(fnptr_to_fnentry)(DRD_(trace_store)),
584 argv);
585 break;
586 }
587 if (guard_expr) di->guard = guard_expr;
588 addStmtToIRSB(bb, IRStmt_Dirty(di));
589 }
590
DRD_(instrument)591 IRSB* DRD_(instrument)(VgCallbackClosure* const closure,
592 IRSB* const bb_in,
593 VexGuestLayout* const layout,
594 VexGuestExtents* const vge,
595 VexArchInfo* archinfo_host,
596 IRType const gWordTy,
597 IRType const hWordTy)
598 {
599 IRDirty* di;
600 Int i;
601 IRSB* bb;
602 IRExpr** argv;
603 Bool instrument = True;
604
605 /* Set up BB */
606 bb = emptyIRSB();
607 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv);
608 bb->next = deepCopyIRExpr(bb_in->next);
609 bb->jumpkind = bb_in->jumpkind;
610 bb->offsIP = bb_in->offsIP;
611
612 for (i = 0; i < bb_in->stmts_used; i++)
613 {
614 IRStmt* const st = bb_in->stmts[i];
615 tl_assert(st);
616 tl_assert(isFlatIRStmt(st));
617
618 switch (st->tag)
619 {
620 /* Note: the code for not instrumenting the code in .plt */
621 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */
622 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */
623 /* This is because on this platform dynamic library symbols are */
624 /* relocated in another way than by later binutils versions. The */
625 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */
626 case Ist_IMark:
627 instrument = VG_(DebugInfo_sect_kind)(NULL, 0, st->Ist.IMark.addr)
628 != Vg_SectPLT;
629 addStmtToIRSB(bb, st);
630 break;
631
632 case Ist_MBE:
633 switch (st->Ist.MBE.event)
634 {
635 case Imbe_Fence:
636 break; /* not interesting */
637 default:
638 tl_assert(0);
639 }
640 addStmtToIRSB(bb, st);
641 break;
642
643 case Ist_Store:
644 if (instrument)
645 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data,
646 NULL/* no guard */);
647 addStmtToIRSB(bb, st);
648 break;
649
650 case Ist_StoreG: {
651 IRStoreG* sg = st->Ist.StoreG.details;
652 IRExpr* data = sg->data;
653 IRExpr* addr = sg->addr;
654 if (instrument)
655 instrument_store(bb, addr, data, sg->guard);
656 addStmtToIRSB(bb, st);
657 break;
658 }
659
660 case Ist_LoadG: {
661 IRLoadG* lg = st->Ist.LoadG.details;
662 IRType type = Ity_INVALID; /* loaded type */
663 IRType typeWide = Ity_INVALID; /* after implicit widening */
664 IRExpr* addr_expr = lg->addr;
665 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
666 tl_assert(type != Ity_INVALID);
667 if (UNLIKELY(DRD_(any_address_is_traced)())) {
668 addr_expr = instr_trace_mem_load(bb, addr_expr,
669 sizeofIRType(type), lg->guard);
670 }
671 instrument_load(bb, lg->addr,
672 sizeofIRType(type), lg->guard);
673 addStmtToIRSB(bb, st);
674 break;
675 }
676
677 case Ist_WrTmp:
678 if (instrument) {
679 const IRExpr* const data = st->Ist.WrTmp.data;
680 IRExpr* addr_expr = data->Iex.Load.addr;
681 if (data->tag == Iex_Load) {
682 if (UNLIKELY(DRD_(any_address_is_traced)())) {
683 addr_expr = instr_trace_mem_load(bb, addr_expr,
684 sizeofIRType(data->Iex.Load.ty),
685 NULL/* no guard */);
686 }
687 instrument_load(bb, addr_expr, sizeofIRType(data->Iex.Load.ty),
688 NULL/* no guard */);
689 }
690 }
691 addStmtToIRSB(bb, st);
692 break;
693
694 case Ist_Dirty:
695 if (instrument) {
696 IRDirty* d = st->Ist.Dirty.details;
697 IREffect const mFx = d->mFx;
698 switch (mFx) {
699 case Ifx_None:
700 break;
701 case Ifx_Read:
702 case Ifx_Write:
703 case Ifx_Modify:
704 tl_assert(d->mAddr);
705 tl_assert(d->mSize > 0);
706 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize));
707 if (mFx == Ifx_Read || mFx == Ifx_Modify) {
708 di = unsafeIRDirty_0_N(
709 /*regparms*/2,
710 "drd_trace_load",
711 VG_(fnptr_to_fnentry)(DRD_(trace_load)),
712 argv);
713 addStmtToIRSB(bb, IRStmt_Dirty(di));
714 }
715 if (mFx == Ifx_Write || mFx == Ifx_Modify)
716 {
717 di = unsafeIRDirty_0_N(
718 /*regparms*/2,
719 "drd_trace_store",
720 VG_(fnptr_to_fnentry)(DRD_(trace_store)),
721 argv);
722 addStmtToIRSB(bb, IRStmt_Dirty(di));
723 }
724 break;
725 default:
726 tl_assert(0);
727 }
728 }
729 addStmtToIRSB(bb, st);
730 break;
731
732 case Ist_CAS:
733 if (instrument) {
734 /*
735 * Treat compare-and-swap as a read. By handling atomic
736 * instructions as read instructions no data races are reported
737 * between conflicting atomic operations nor between atomic
738 * operations and non-atomic reads. Conflicts between atomic
739 * operations and non-atomic write operations are still reported
740 * however.
741 */
742 Int dataSize;
743 IRCAS* cas = st->Ist.CAS.details;
744
745 tl_assert(cas->addr != NULL);
746 tl_assert(cas->dataLo != NULL);
747 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo));
748 if (cas->dataHi != NULL)
749 dataSize *= 2; /* since it's a doubleword-CAS */
750
751 if (UNLIKELY(DRD_(any_address_is_traced)()))
752 instr_trace_mem_store(bb, cas->addr, cas->dataHi, cas->dataLo,
753 NULL/* no guard */);
754
755 instrument_load(bb, cas->addr, dataSize, NULL/*no guard*/);
756 }
757 addStmtToIRSB(bb, st);
758 break;
759
760 case Ist_LLSC: {
761 /*
762 * Ignore store-conditionals (except for tracing), and handle
763 * load-linked's exactly like normal loads.
764 */
765 IRType dataTy;
766
767 if (st->Ist.LLSC.storedata == NULL) {
768 /* LL */
769 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result);
770 if (instrument) {
771 IRExpr* addr_expr = st->Ist.LLSC.addr;
772 if (UNLIKELY(DRD_(any_address_is_traced)()))
773 addr_expr = instr_trace_mem_load(bb, addr_expr,
774 sizeofIRType(dataTy),
775 NULL /* no guard */);
776
777 instrument_load(bb, addr_expr, sizeofIRType(dataTy),
778 NULL/*no guard*/);
779 }
780 } else {
781 /* SC */
782 instr_trace_mem_store(bb, st->Ist.LLSC.addr, NULL,
783 st->Ist.LLSC.storedata,
784 NULL/* no guard */);
785 }
786 addStmtToIRSB(bb, st);
787 break;
788 }
789
790 case Ist_NoOp:
791 case Ist_AbiHint:
792 case Ist_Put:
793 case Ist_PutI:
794 case Ist_Exit:
795 /* None of these can contain any memory references. */
796 addStmtToIRSB(bb, st);
797 break;
798
799 default:
800 ppIRStmt(st);
801 tl_assert(0);
802 }
803 }
804
805 return bb;
806 }
807
808