1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
accumulate_large_allocs(arena_t * arena)17 static size_t accumulate_large_allocs(arena_t* arena) {
18 size_t total_bytes = 0;
19
20 /* Accumulate the large allocation stats.
21 * Do not include stats.allocated_large, it is only updated by
22 * arena_stats_merge, and would include the data counted below.
23 */
24 for (unsigned j = 0; j < NSIZES - NBINS; j++) {
25 /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
26 uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
27 uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
28 size_t allocs = (size_t)(nmalloc - ndalloc);
29 total_bytes += sz_index2size(NBINS + j) * allocs;
30 }
31 return total_bytes;
32 }
33
accumulate_small_allocs(arena_t * arena)34 static size_t accumulate_small_allocs(arena_t* arena) {
35 size_t total_bytes = 0;
36 for (unsigned j = 0; j < NBINS; j++) {
37 bin_t* bin = &arena->bins[j];
38
39 /* NOTE: This includes allocations cached on every thread. */
40 malloc_mutex_lock(TSDN_NULL, &bin->lock);
41 total_bytes += bin_infos[j].reg_size * bin->stats.curregs;
42 malloc_mutex_unlock(TSDN_NULL, &bin->lock);
43 }
44 return total_bytes;
45 }
46
47
48 /* Only use bin locks since the stats are now all atomic and can be read
49 * without taking the stats lock.
50 */
je_mallinfo()51 struct mallinfo je_mallinfo() {
52 struct mallinfo mi;
53 memset(&mi, 0, sizeof(mi));
54
55 malloc_mutex_lock(TSDN_NULL, &arenas_lock);
56 for (unsigned i = 0; i < narenas_auto; i++) {
57 arena_t* arena = atomic_load_p(&arenas[i], ATOMIC_ACQUIRE);
58 if (arena != NULL) {
59 mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
60
61 mi.uordblks += accumulate_small_allocs(arena);
62 mi.uordblks += accumulate_large_allocs(arena);
63 }
64 }
65 malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
66 mi.fordblks = mi.hblkhd - mi.uordblks;
67 mi.usmblks = mi.hblkhd;
68 return mi;
69 }
70
je_mallinfo_narenas()71 size_t je_mallinfo_narenas() {
72 return narenas_auto;
73 }
74
je_mallinfo_nbins()75 size_t je_mallinfo_nbins() {
76 return NBINS;
77 }
78
je_mallinfo_arena_info(size_t aidx)79 struct mallinfo je_mallinfo_arena_info(size_t aidx) {
80 struct mallinfo mi;
81 memset(&mi, 0, sizeof(mi));
82
83 malloc_mutex_lock(TSDN_NULL, &arenas_lock);
84 if (aidx < narenas_auto) {
85 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
86 if (arena != NULL) {
87 mi.hblkhd = atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
88 mi.ordblks = accumulate_large_allocs(arena);
89 mi.fsmblks = accumulate_small_allocs(arena);
90 }
91 }
92 malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
93 return mi;
94 }
95
je_mallinfo_bin_info(size_t aidx,size_t bidx)96 struct mallinfo je_mallinfo_bin_info(size_t aidx, size_t bidx) {
97 struct mallinfo mi;
98 memset(&mi, 0, sizeof(mi));
99
100 malloc_mutex_lock(TSDN_NULL, &arenas_lock);
101 if (aidx < narenas_auto && bidx < NBINS) {
102 arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
103 if (arena != NULL) {
104 bin_t* bin = &arena->bins[bidx];
105
106 malloc_mutex_lock(TSDN_NULL, &bin->lock);
107 mi.ordblks = bin_infos[bidx].reg_size * bin->stats.curregs;
108 mi.uordblks = (size_t) bin->stats.nmalloc;
109 mi.fordblks = (size_t) bin->stats.ndalloc;
110 malloc_mutex_unlock(TSDN_NULL, &bin->lock);
111 }
112 }
113 malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
114 return mi;
115 }
116