• Home
  • Raw
  • Download

Lines Matching refs:req_szB

74       SizeT       req_szB;  member
97 tl_assert(b1->req_szB > 0); in interval_tree_Cmp()
98 tl_assert(b2->req_szB > 0); in interval_tree_Cmp()
99 if (b1->payload + b1->req_szB <= b2->payload) return -1; in interval_tree_Cmp()
100 if (b2->payload + b2->req_szB <= b1->payload) return 1; in interval_tree_Cmp()
116 && a < fbc_cache0->payload + fbc_cache0->req_szB)) { in find_Block_containing()
123 && a < fbc_cache1->payload + fbc_cache1->req_szB)) { in find_Block_containing()
133 fake.req_szB = 1; in find_Block_containing()
159 fake.req_szB = 1; in delete_Block_starting_at()
260 api->cur_bytes_live += bk->req_szB; in intro_Block()
268 api->tot_bytes += bk->req_szB; in intro_Block()
272 g_tot_bytes += bk->req_szB; in intro_Block()
275 g_cur_bytes_live += bk->req_szB; in intro_Block()
309 bk->ap, api->cur_bytes_live, (ULong)bk->req_szB); in retire_Block()
314 tl_assert(api->cur_bytes_live >= bk->req_szB); in retire_Block()
316 api->cur_bytes_live -= bk->req_szB; in retire_Block()
326 tl_assert(g_cur_bytes_live >= bk->req_szB); in retire_Block()
327 g_cur_bytes_live -= bk->req_szB; in retire_Block()
342 api->xsize = bk->req_szB; in retire_Block()
354 if (bk->req_szB != api->xsize) { in retire_Block()
356 api, api->xsize, bk->req_szB); in retire_Block()
378 tl_assert(api->xsize == bk->req_szB); in retire_Block()
393 VG_(printf)("block retiring, histo %lu: ", bk->req_szB); in retire_Block()
395 for (i = 0; i < bk->req_szB; i++) in retire_Block()
399 VG_(printf)("block retiring, no histo %lu\n", bk->req_szB); in retire_Block()
451 void* new_block ( ThreadId tid, void* p, SizeT req_szB, SizeT req_alignB, in new_block() argument
457 if ((SSizeT)req_szB < 0) return NULL; in new_block()
459 if (req_szB == 0) in new_block()
460 req_szB = 1; /* can't allow zero-sized blocks in the interval tree */ in new_block()
464 p = VG_(cli_malloc)( req_alignB, req_szB ); in new_block()
468 if (is_zeroed) VG_(memset)(p, 0, req_szB); in new_block()
470 tl_assert(actual_szB >= req_szB); in new_block()
479 bk->req_szB = req_szB; in new_block()
486 if (req_szB <= HISTOGRAM_SIZE_LIMIT) { in new_block()
487 bk->histoW = VG_(malloc)("dh.new_block.2", req_szB * sizeof(UShort)); in new_block()
488 VG_(memset)(bk->histoW, 0, req_szB * sizeof(UShort)); in new_block()
497 if (0) VG_(printf)("ALLOC %lu -> %p\n", req_szB, p); in new_block()
513 tl_assert(bk->req_szB > 0); in die_block()
516 tl_assert( (Addr)p < bk->payload + bk->req_szB ); in die_block()
551 tl_assert(bk->req_szB > 0); in renew_block()
554 tl_assert( (Addr)p_old < bk->payload + bk->req_szB ); in renew_block()
568 if (new_req_szB <= bk->req_szB) { in renew_block()
572 (Long)new_req_szB - (Long)bk->req_szB); in renew_block()
573 bk->req_szB = new_req_szB; in renew_block()
587 VG_(memcpy)(p_new, p_old, bk->req_szB); in renew_block()
599 (Long)new_req_szB - (Long)bk->req_szB); in renew_block()
601 bk->req_szB = new_req_szB; in renew_block()
675 return bk ? bk->req_szB : 0; in dh_malloc_usable_size()
688 tl_assert(offMin < bk->req_szB); in inc_histo_for_block()
690 if (offMax1 > bk->req_szB) in inc_histo_for_block()
691 offMax1 = bk->req_szB; in inc_histo_for_block()