• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-basic-offset: 3; -*- */
2 /*
3   This file is part of drd, a thread error detector.
4 
5   Copyright (C) 2006-2010 Bart Van Assche <bvanassche@acm.org>.
6 
7   This program is free software; you can redistribute it and/or
8   modify it under the terms of the GNU General Public License as
9   published by the Free Software Foundation; either version 2 of the
10   License, or (at your option) any later version.
11 
12   This program is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15   General Public License for more details.
16 
17   You should have received a copy of the GNU General Public License
18   along with this program; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20   02111-1307, USA.
21 
22   The GNU General Public License is contained in the file COPYING.
23 */
24 
25 
26 #include "drd_clientobj.h"
27 #include "drd_error.h"
28 #include "drd_rwlock.h"
29 #include "pub_tool_vki.h"
30 #include "pub_tool_errormgr.h"    // VG_(maybe_record_error)()
31 #include "pub_tool_libcassert.h"  // tl_assert()
32 #include "pub_tool_libcprint.h"   // VG_(message)()
33 #include "pub_tool_libcproc.h"    // VG_(read_millisecond_timer)()
34 #include "pub_tool_machine.h"     // VG_(get_IP)()
35 #include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
36 #include "pub_tool_threadstate.h" // VG_(get_running_tid)()
37 
38 
39 /* Local type definitions. */
40 
41 struct rwlock_thread_info
42 {
43    UWord    tid;                 // DrdThreadId.
44    UInt     reader_nesting_count;
45    UInt     writer_nesting_count;
46    // Segment of last unlock call by this thread that unlocked a writer lock.
47    Segment* latest_wrlocked_segment;
48    // Segment of last unlock call by this thread that unlocked a reader lock.
49    Segment* latest_rdlocked_segment;
50 };
51 
52 
53 /* Local functions. */
54 
55 static void rwlock_cleanup(struct rwlock_info* p);
56 static void rwlock_delete_thread(struct rwlock_info* const p,
57                                  const DrdThreadId tid);
58 
59 
60 /* Local variables. */
61 
62 static Bool DRD_(s_trace_rwlock);
63 static UInt DRD_(s_exclusive_threshold_ms);
64 static UInt DRD_(s_shared_threshold_ms);
65 static ULong DRD_(s_rwlock_segment_creation_count);
66 
67 
68 /* Function definitions. */
69 
DRD_(rwlock_set_trace)70 void DRD_(rwlock_set_trace)(const Bool trace_rwlock)
71 {
72    tl_assert(trace_rwlock == False || trace_rwlock == True);
73    DRD_(s_trace_rwlock) = trace_rwlock;
74 }
75 
DRD_(rwlock_set_exclusive_threshold)76 void DRD_(rwlock_set_exclusive_threshold)(const UInt exclusive_threshold_ms)
77 {
78    DRD_(s_exclusive_threshold_ms) = exclusive_threshold_ms;
79 }
80 
DRD_(rwlock_set_shared_threshold)81 void DRD_(rwlock_set_shared_threshold)(const UInt shared_threshold_ms)
82 {
83    DRD_(s_shared_threshold_ms) = shared_threshold_ms;
84 }
85 
DRD_(rwlock_is_rdlocked)86 static Bool DRD_(rwlock_is_rdlocked)(struct rwlock_info* p)
87 {
88    struct rwlock_thread_info* q;
89 
90    VG_(OSetGen_ResetIter)(p->thread_info);
91    for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
92    {
93       return q->reader_nesting_count > 0;
94    }
95    return False;
96 }
97 
DRD_(rwlock_is_wrlocked)98 static Bool DRD_(rwlock_is_wrlocked)(struct rwlock_info* p)
99 {
100    struct rwlock_thread_info* q;
101 
102    VG_(OSetGen_ResetIter)(p->thread_info);
103    for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
104    {
105       return q->writer_nesting_count > 0;
106    }
107    return False;
108 }
109 
DRD_(rwlock_is_locked)110 static Bool DRD_(rwlock_is_locked)(struct rwlock_info* p)
111 {
112    return DRD_(rwlock_is_rdlocked)(p) || DRD_(rwlock_is_wrlocked)(p);
113 }
114 
DRD_(rwlock_is_rdlocked_by)115 static Bool DRD_(rwlock_is_rdlocked_by)(struct rwlock_info* p,
116                                         const DrdThreadId tid)
117 {
118    const UWord uword_tid = tid;
119    struct rwlock_thread_info* q;
120 
121    q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
122    return q && q->reader_nesting_count > 0;
123 }
124 
DRD_(rwlock_is_wrlocked_by)125 static Bool DRD_(rwlock_is_wrlocked_by)(struct rwlock_info* p,
126                                         const DrdThreadId tid)
127 {
128    const UWord uword_tid = tid;
129    struct rwlock_thread_info* q;
130 
131    q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
132    return q && q->writer_nesting_count > 0;
133 }
134 
DRD_(rwlock_is_locked_by)135 static Bool DRD_(rwlock_is_locked_by)(struct rwlock_info* p,
136                                       const DrdThreadId tid)
137 {
138    return (DRD_(rwlock_is_rdlocked_by)(p, tid)
139            || DRD_(rwlock_is_wrlocked_by)(p, tid));
140 }
141 
142 /** Either look up or insert a node corresponding to DRD thread id 'tid'. */
143 static
144 struct rwlock_thread_info*
DRD_(lookup_or_insert_node)145 DRD_(lookup_or_insert_node)(OSet* oset, const UWord tid)
146 {
147    struct rwlock_thread_info* q;
148 
149    q = VG_(OSetGen_Lookup)(oset, &tid);
150    if (q == 0)
151    {
152       q = VG_(OSetGen_AllocNode)(oset, sizeof(*q));
153       q->tid                       = tid;
154       q->reader_nesting_count      = 0;
155       q->writer_nesting_count      = 0;
156       q->latest_wrlocked_segment   = 0;
157       q->latest_rdlocked_segment   = 0;
158       VG_(OSetGen_Insert)(oset, q);
159    }
160    tl_assert(q);
161    return q;
162 }
163 
164 /**
165  * Combine the vector clock corresponding to the last unlock operation of
166  * reader-writer lock p into the vector clock of thread 'tid'.
167  */
DRD_(rwlock_combine_other_vc)168 static void DRD_(rwlock_combine_other_vc)(struct rwlock_info* const p,
169                                           const DrdThreadId tid,
170                                           const Bool readers_too)
171 {
172    struct rwlock_thread_info* q;
173    VectorClock old_vc;
174 
175    DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc);
176    VG_(OSetGen_ResetIter)(p->thread_info);
177    for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
178    {
179       if (q->tid != tid)
180       {
181          if (q->latest_wrlocked_segment)
182          {
183             DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc,
184                              &q->latest_wrlocked_segment->vc);
185          }
186          if (readers_too && q->latest_rdlocked_segment)
187          {
188             DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc,
189                              &q->latest_rdlocked_segment->vc);
190          }
191       }
192    }
193    DRD_(thread_update_conflict_set)(tid, &old_vc);
194    DRD_(vc_cleanup)(&old_vc);
195 }
196 
197 /**
198  * Compare the type of the rwlock specified at initialization time with
199  * the type passed as an argument, and complain if these two types do not
200  * match.
201  */
drd_rwlock_check_type(struct rwlock_info * const p,const RwLockT rwlock_type)202 static Bool drd_rwlock_check_type(struct rwlock_info* const p,
203                                   const RwLockT rwlock_type)
204 {
205    tl_assert(p);
206    /* The code below has to be updated if additional rwlock types are added. */
207    tl_assert(rwlock_type == pthread_rwlock || rwlock_type == user_rwlock);
208    tl_assert(p->rwlock_type == pthread_rwlock || p->rwlock_type == user_rwlock);
209 
210    if (p->rwlock_type == rwlock_type)
211       return True;
212 
213    {
214       RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
215       VG_(maybe_record_error)
216          (VG_(get_running_tid)(),
217           RwlockErr,
218           VG_(get_IP)(VG_(get_running_tid)()),
219           rwlock_type == pthread_rwlock
220           ? "Attempt to use a user-defined rwlock as a POSIX rwlock"
221           : "Attempt to use a POSIX rwlock as a user-defined rwlock",
222           &REI);
223    }
224    return False;
225 }
226 
227 /** Initialize the rwlock_info data structure *p. */
228 static
DRD_(rwlock_initialize)229 void DRD_(rwlock_initialize)(struct rwlock_info* const p, const Addr rwlock,
230                              const RwLockT rwlock_type)
231 {
232    tl_assert(rwlock != 0);
233    tl_assert(p->a1 == rwlock);
234    tl_assert(p->type == ClientRwlock);
235 
236    p->cleanup         = (void(*)(DrdClientobj*))rwlock_cleanup;
237    p->delete_thread
238       = (void(*)(DrdClientobj*, DrdThreadId))rwlock_delete_thread;
239    p->rwlock_type     = rwlock_type;
240    p->thread_info     = VG_(OSetGen_Create)(
241       0, 0, VG_(malloc), "drd.rwlock.ri.1", VG_(free));
242    p->acquiry_time_ms = 0;
243    p->acquired_at     = 0;
244 }
245 
246 /** Deallocate the memory that was allocated by rwlock_initialize(). */
rwlock_cleanup(struct rwlock_info * p)247 static void rwlock_cleanup(struct rwlock_info* p)
248 {
249    struct rwlock_thread_info* q;
250 
251    tl_assert(p);
252 
253    if (DRD_(s_trace_rwlock))
254    {
255       VG_(message)(Vg_UserMsg,
256                    "[%d] rwlock_destroy     0x%lx\n",
257                    DRD_(thread_get_running_tid)(),
258                    p->a1);
259    }
260 
261    if (DRD_(rwlock_is_locked)(p))
262    {
263       RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
264       VG_(maybe_record_error)(VG_(get_running_tid)(),
265                               RwlockErr,
266                               VG_(get_IP)(VG_(get_running_tid)()),
267                               "Destroying locked rwlock",
268                               &REI);
269    }
270 
271    VG_(OSetGen_ResetIter)(p->thread_info);
272    for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
273    {
274       DRD_(sg_put)(q->latest_wrlocked_segment);
275       DRD_(sg_put)(q->latest_rdlocked_segment);
276    }
277 
278    VG_(OSetGen_Destroy)(p->thread_info);
279 }
280 
281 static
282 struct rwlock_info*
DRD_(rwlock_get_or_allocate)283 DRD_(rwlock_get_or_allocate)(const Addr rwlock, const RwLockT rwlock_type)
284 {
285    struct rwlock_info* p;
286 
287    tl_assert(offsetof(DrdClientobj, rwlock) == 0);
288    p = &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
289    if (p)
290    {
291       drd_rwlock_check_type(p, rwlock_type);
292       return p;
293    }
294 
295    if (DRD_(clientobj_present)(rwlock, rwlock + 1))
296    {
297       GenericErrInfo GEI = {
298 	 .tid  = DRD_(thread_get_running_tid)(),
299 	 .addr = rwlock,
300       };
301       VG_(maybe_record_error)(VG_(get_running_tid)(),
302                               GenericErr,
303                               VG_(get_IP)(VG_(get_running_tid)()),
304                               "Not a reader-writer lock",
305                               &GEI);
306       return 0;
307    }
308 
309    p = &(DRD_(clientobj_add)(rwlock, ClientRwlock)->rwlock);
310    DRD_(rwlock_initialize)(p, rwlock, rwlock_type);
311    return p;
312 }
313 
DRD_(rwlock_get)314 static struct rwlock_info* DRD_(rwlock_get)(const Addr rwlock)
315 {
316    tl_assert(offsetof(DrdClientobj, rwlock) == 0);
317    return &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
318 }
319 
320 /** Called before pthread_rwlock_init(). */
DRD_(rwlock_pre_init)321 struct rwlock_info* DRD_(rwlock_pre_init)(const Addr rwlock,
322                                           const RwLockT rwlock_type)
323 {
324    struct rwlock_info* p;
325 
326    if (DRD_(s_trace_rwlock))
327    {
328       VG_(message)(Vg_UserMsg,
329                    "[%d] rwlock_init        0x%lx\n",
330                    DRD_(thread_get_running_tid)(),
331                    rwlock);
332    }
333 
334    p = DRD_(rwlock_get)(rwlock);
335 
336    if (p)
337 	drd_rwlock_check_type(p, rwlock_type);
338 
339    if (p)
340    {
341       const ThreadId vg_tid = VG_(get_running_tid)();
342       RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
343       VG_(maybe_record_error)(vg_tid,
344                               RwlockErr,
345                               VG_(get_IP)(vg_tid),
346                               "Reader-writer lock reinitialization",
347                               &REI);
348       return p;
349    }
350 
351    p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type);
352 
353    return p;
354 }
355 
356 /** Called after pthread_rwlock_destroy(). */
DRD_(rwlock_post_destroy)357 void DRD_(rwlock_post_destroy)(const Addr rwlock, const RwLockT rwlock_type)
358 {
359    struct rwlock_info* p;
360 
361    p = DRD_(rwlock_get)(rwlock);
362    if (p == 0)
363    {
364       GenericErrInfo GEI = {
365 	 .tid = DRD_(thread_get_running_tid)(),
366 	 .addr = rwlock,
367       };
368       VG_(maybe_record_error)(VG_(get_running_tid)(),
369                               GenericErr,
370                               VG_(get_IP)(VG_(get_running_tid)()),
371                               "Not a reader-writer lock",
372                               &GEI);
373       return;
374    }
375 
376    drd_rwlock_check_type(p, rwlock_type);
377 
378    DRD_(clientobj_remove)(rwlock, ClientRwlock);
379 }
380 
381 /**
382  * Called before pthread_rwlock_rdlock() is invoked. If a data structure for
383  * the client-side object was not yet created, do this now. Also check whether
384  * an attempt is made to lock recursively a synchronization object that must
385  * not be locked recursively.
386  */
DRD_(rwlock_pre_rdlock)387 void DRD_(rwlock_pre_rdlock)(const Addr rwlock, const RwLockT rwlock_type)
388 {
389    struct rwlock_info* p;
390 
391    if (DRD_(s_trace_rwlock))
392    {
393       VG_(message)(Vg_UserMsg,
394                    "[%d] pre_rwlock_rdlock  0x%lx\n",
395                    DRD_(thread_get_running_tid)(),
396                    rwlock);
397    }
398 
399    p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type);
400    tl_assert(p);
401 
402    if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
403    {
404       VG_(message)(Vg_UserMsg,
405                    "reader-writer lock 0x%lx is already locked for"
406                    " writing by calling thread\n",
407                    p->a1);
408    }
409 }
410 
411 /**
412  * Update rwlock_info state when locking the pthread_rwlock_t mutex.
413  * Note: this function must be called after pthread_rwlock_rdlock() has been
414  * called, or a race condition is triggered !
415  */
DRD_(rwlock_post_rdlock)416 void DRD_(rwlock_post_rdlock)(const Addr rwlock, const RwLockT rwlock_type,
417                               const Bool took_lock)
418 {
419    const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
420    struct rwlock_info* p;
421    struct rwlock_thread_info* q;
422 
423    if (DRD_(s_trace_rwlock))
424    {
425       VG_(message)(Vg_UserMsg,
426                    "[%d] post_rwlock_rdlock 0x%lx\n",
427                    drd_tid,
428                    rwlock);
429    }
430 
431    p = DRD_(rwlock_get)(rwlock);
432 
433    if (! p || ! took_lock)
434       return;
435 
436    tl_assert(! DRD_(rwlock_is_wrlocked)(p));
437 
438    q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
439    if (++q->reader_nesting_count == 1)
440    {
441       DRD_(thread_new_segment)(drd_tid);
442       DRD_(s_rwlock_segment_creation_count)++;
443       DRD_(rwlock_combine_other_vc)(p, drd_tid, False);
444 
445       p->acquiry_time_ms = VG_(read_millisecond_timer)();
446       p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
447    }
448 }
449 
450 /**
451  * Called before pthread_rwlock_wrlock() is invoked. If a data structure for
452  * the client-side object was not yet created, do this now. Also check whether
453  * an attempt is made to lock recursively a synchronization object that must
454  * not be locked recursively.
455  */
DRD_(rwlock_pre_wrlock)456 void DRD_(rwlock_pre_wrlock)(const Addr rwlock, const RwLockT rwlock_type)
457 {
458    struct rwlock_info* p;
459 
460    p = DRD_(rwlock_get)(rwlock);
461 
462    if (DRD_(s_trace_rwlock))
463    {
464       VG_(message)(Vg_UserMsg,
465                    "[%d] pre_rwlock_wrlock  0x%lx\n",
466                    DRD_(thread_get_running_tid)(),
467                    rwlock);
468    }
469 
470    if (p == 0)
471       p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type);
472 
473    tl_assert(p);
474 
475    if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
476    {
477       RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
478       VG_(maybe_record_error)(VG_(get_running_tid)(),
479                               RwlockErr,
480                               VG_(get_IP)(VG_(get_running_tid)()),
481                               "Recursive writer locking not allowed",
482                               &REI);
483    }
484 }
485 
486 /**
487  * Update rwlock_info state when locking the pthread_rwlock_t rwlock.
488  * Note: this function must be called after pthread_rwlock_wrlock() has
489  * finished, or a race condition is triggered !
490  */
DRD_(rwlock_post_wrlock)491 void DRD_(rwlock_post_wrlock)(const Addr rwlock, const RwLockT rwlock_type,
492                               const Bool took_lock)
493 {
494    const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
495    struct rwlock_info* p;
496    struct rwlock_thread_info* q;
497 
498    p = DRD_(rwlock_get)(rwlock);
499 
500    if (DRD_(s_trace_rwlock))
501    {
502       VG_(message)(Vg_UserMsg,
503                    "[%d] post_rwlock_wrlock 0x%lx\n",
504                    drd_tid,
505                    rwlock);
506    }
507 
508    if (! p || ! took_lock)
509       return;
510 
511    q = DRD_(lookup_or_insert_node)(p->thread_info,
512                                    DRD_(thread_get_running_tid)());
513    tl_assert(q->writer_nesting_count == 0);
514    q->writer_nesting_count++;
515    tl_assert(q->writer_nesting_count == 1);
516    DRD_(thread_new_segment)(drd_tid);
517    DRD_(s_rwlock_segment_creation_count)++;
518    DRD_(rwlock_combine_other_vc)(p, drd_tid, True);
519    p->acquiry_time_ms = VG_(read_millisecond_timer)();
520    p->acquired_at     = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
521 }
522 
523 /**
524  * Update rwlock_info state when unlocking the pthread_rwlock_t rwlock.
525  *
526  * @param rwlock Pointer to pthread_rwlock_t data structure in the client space.
527  *
528  * @return New value of the rwlock recursion count.
529  *
530  * @note This function must be called before pthread_rwlock_unlock() is called,
531  *   or a race condition is triggered !
532  */
DRD_(rwlock_pre_unlock)533 void DRD_(rwlock_pre_unlock)(const Addr rwlock, const RwLockT rwlock_type)
534 {
535    const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
536    const ThreadId vg_tid = VG_(get_running_tid)();
537    struct rwlock_info* p;
538    struct rwlock_thread_info* q;
539 
540    if (DRD_(s_trace_rwlock))
541    {
542       VG_(message)(Vg_UserMsg,
543                    "[%d] rwlock_unlock      0x%lx\n",
544                    drd_tid,
545                    rwlock);
546    }
547 
548    p = DRD_(rwlock_get)(rwlock);
549    if (p == 0)
550    {
551       GenericErrInfo GEI = {
552 	 .tid = DRD_(thread_get_running_tid)(),
553 	 .addr = rwlock,
554       };
555       VG_(maybe_record_error)(VG_(get_running_tid)(),
556                               GenericErr,
557                               VG_(get_IP)(VG_(get_running_tid)()),
558                               "Not a reader-writer lock",
559                               &GEI);
560       return;
561    }
562 
563    drd_rwlock_check_type(p, rwlock_type);
564 
565    if (! DRD_(rwlock_is_locked_by)(p, drd_tid))
566    {
567       RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
568       VG_(maybe_record_error)(vg_tid,
569                               RwlockErr,
570                               VG_(get_IP)(vg_tid),
571                               "Reader-writer lock not locked by calling thread",
572                               &REI);
573       return;
574    }
575    q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
576    tl_assert(q);
577    if (q->reader_nesting_count > 0)
578    {
579       q->reader_nesting_count--;
580       if (q->reader_nesting_count == 0 && DRD_(s_shared_threshold_ms) > 0)
581       {
582          Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
583          if (held > DRD_(s_shared_threshold_ms))
584          {
585             HoldtimeErrInfo HEI
586                = { DRD_(thread_get_running_tid)(),
587                    rwlock, p->acquired_at, held, DRD_(s_shared_threshold_ms) };
588             VG_(maybe_record_error)(vg_tid,
589                                     HoldtimeErr,
590                                     VG_(get_IP)(vg_tid),
591                                     "rwlock",
592                                     &HEI);
593          }
594       }
595       if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
596       {
597          /*
598           * This pthread_rwlock_unlock() call really unlocks the rwlock. Save
599           * the current vector clock of the thread such that it is available
600           * when this rwlock is locked again.
601           */
602          DRD_(thread_get_latest_segment)(&q->latest_rdlocked_segment, drd_tid);
603          DRD_(thread_new_segment)(drd_tid);
604          DRD_(s_rwlock_segment_creation_count)++;
605       }
606    }
607    else if (q->writer_nesting_count > 0)
608    {
609       q->writer_nesting_count--;
610       if (q->writer_nesting_count == 0 && DRD_(s_exclusive_threshold_ms) > 0)
611       {
612          Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
613          if (held > DRD_(s_exclusive_threshold_ms))
614          {
615             HoldtimeErrInfo HEI
616                = { DRD_(thread_get_running_tid)(),
617                    rwlock, p->acquired_at, held,
618                    DRD_(s_exclusive_threshold_ms) };
619             VG_(maybe_record_error)(vg_tid,
620                                     HoldtimeErr,
621                                     VG_(get_IP)(vg_tid),
622                                     "rwlock",
623                                     &HEI);
624          }
625       }
626       if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
627       {
628          /*
629           * This pthread_rwlock_unlock() call really unlocks the rwlock. Save
630           * the current vector clock of the thread such that it is available
631           * when this rwlock is locked again.
632           */
633          DRD_(thread_get_latest_segment)(&q->latest_wrlocked_segment, drd_tid);
634          DRD_(thread_new_segment)(drd_tid);
635          DRD_(s_rwlock_segment_creation_count)++;
636       }
637    }
638    else
639    {
640       tl_assert(False);
641    }
642 }
643 
644 /** Called when thread tid stops to exist. */
rwlock_delete_thread(struct rwlock_info * const p,const DrdThreadId tid)645 static void rwlock_delete_thread(struct rwlock_info* const p,
646                                  const DrdThreadId tid)
647 {
648    struct rwlock_thread_info* q;
649 
650    if (DRD_(rwlock_is_locked_by)(p, tid))
651    {
652       RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
653       VG_(maybe_record_error)(VG_(get_running_tid)(),
654                               RwlockErr,
655                               VG_(get_IP)(VG_(get_running_tid)()),
656                               "Reader-writer lock still locked at thread exit",
657                               &REI);
658       q = DRD_(lookup_or_insert_node)(p->thread_info, tid);
659       q->reader_nesting_count = 0;
660       q->writer_nesting_count = 0;
661    }
662 }
663 
DRD_(get_rwlock_segment_creation_count)664 ULong DRD_(get_rwlock_segment_creation_count)(void)
665 {
666    return DRD_(s_rwlock_segment_creation_count);
667 }
668