• Home
  • Raw
  • Download

Lines Matching refs:slabs

49    struct list_head slabs;  member
54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) in pb_slab_reclaim() argument
64 struct pb_slab_group *group = &slabs->groups[entry->group_index]; in pb_slab_reclaim()
65 list_addtail(&slab->head, &group->slabs); in pb_slab_reclaim()
70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim()
77 pb_slabs_reclaim_locked(struct pb_slabs *slabs) in pb_slabs_reclaim_locked() argument
81 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) { in pb_slabs_reclaim_locked()
82 if (slabs->can_reclaim(slabs->priv, entry)) { in pb_slabs_reclaim_locked()
83 pb_slab_reclaim(slabs, entry); in pb_slabs_reclaim_locked()
99 pb_slabs_reclaim_all_locked(struct pb_slabs *slabs) in pb_slabs_reclaim_all_locked() argument
102 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) { in pb_slabs_reclaim_all_locked()
103 if (slabs->can_reclaim(slabs->priv, entry)) { in pb_slabs_reclaim_all_locked()
104 pb_slab_reclaim(slabs, entry); in pb_slabs_reclaim_all_locked()
119 pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bool reclaim_all) in pb_slab_alloc_reclaimed() argument
121 unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size)); in pb_slab_alloc_reclaimed()
132 if (slabs->allow_three_fourths_allocations && size <= entry_size * 3 / 4) { in pb_slab_alloc_reclaimed()
137 assert(order < slabs->min_order + slabs->num_orders); in pb_slab_alloc_reclaimed()
138 assert(heap < slabs->num_heaps); in pb_slab_alloc_reclaimed()
140 group_index = (heap * slabs->num_orders + (order - slabs->min_order)) * in pb_slab_alloc_reclaimed()
141 (1 + slabs->allow_three_fourths_allocations) + three_fourths; in pb_slab_alloc_reclaimed()
142 group = &slabs->groups[group_index]; in pb_slab_alloc_reclaimed()
144 simple_mtx_lock(&slabs->mutex); in pb_slab_alloc_reclaimed()
149 if (list_is_empty(&group->slabs) || in pb_slab_alloc_reclaimed()
150 list_is_empty(&list_entry(group->slabs.next, struct pb_slab, head)->free)) { in pb_slab_alloc_reclaimed()
152 pb_slabs_reclaim_all_locked(slabs); in pb_slab_alloc_reclaimed()
154 pb_slabs_reclaim_locked(slabs); in pb_slab_alloc_reclaimed()
158 while (!list_is_empty(&group->slabs)) { in pb_slab_alloc_reclaimed()
159 slab = list_entry(group->slabs.next, struct pb_slab, head); in pb_slab_alloc_reclaimed()
166 if (list_is_empty(&group->slabs)) { in pb_slab_alloc_reclaimed()
174 simple_mtx_unlock(&slabs->mutex); in pb_slab_alloc_reclaimed()
175 slab = slabs->slab_alloc(slabs->priv, heap, entry_size, group_index); in pb_slab_alloc_reclaimed()
178 simple_mtx_lock(&slabs->mutex); in pb_slab_alloc_reclaimed()
180 list_add(&slab->head, &group->slabs); in pb_slab_alloc_reclaimed()
187 simple_mtx_unlock(&slabs->mutex); in pb_slab_alloc_reclaimed()
193 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) in pb_slab_alloc() argument
195 return pb_slab_alloc_reclaimed(slabs, size, heap, false); in pb_slab_alloc()
205 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry) in pb_slab_free() argument
207 simple_mtx_lock(&slabs->mutex); in pb_slab_free()
208 list_addtail(&entry->head, &slabs->reclaim); in pb_slab_free()
209 simple_mtx_unlock(&slabs->mutex); in pb_slab_free()
219 pb_slabs_reclaim(struct pb_slabs *slabs) in pb_slabs_reclaim() argument
221 simple_mtx_lock(&slabs->mutex); in pb_slabs_reclaim()
222 pb_slabs_reclaim_locked(slabs); in pb_slabs_reclaim()
223 simple_mtx_unlock(&slabs->mutex); in pb_slabs_reclaim()
234 pb_slabs_init(struct pb_slabs *slabs, in pb_slabs_init() argument
248 slabs->min_order = min_order; in pb_slabs_init()
249 slabs->num_orders = max_order - min_order + 1; in pb_slabs_init()
250 slabs->num_heaps = num_heaps; in pb_slabs_init()
251 slabs->allow_three_fourths_allocations = allow_three_fourth_allocations; in pb_slabs_init()
253 slabs->priv = priv; in pb_slabs_init()
254 slabs->can_reclaim = can_reclaim; in pb_slabs_init()
255 slabs->slab_alloc = slab_alloc; in pb_slabs_init()
256 slabs->slab_free = slab_free; in pb_slabs_init()
258 list_inithead(&slabs->reclaim); in pb_slabs_init()
260 num_groups = slabs->num_orders * slabs->num_heaps * in pb_slabs_init()
262 slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups)); in pb_slabs_init()
263 if (!slabs->groups) in pb_slabs_init()
267 struct pb_slab_group *group = &slabs->groups[i]; in pb_slabs_init()
268 list_inithead(&group->slabs); in pb_slabs_init()
271 (void) simple_mtx_init(&slabs->mutex, mtx_plain); in pb_slabs_init()
283 pb_slabs_deinit(struct pb_slabs *slabs) in pb_slabs_deinit() argument
288 while (!list_is_empty(&slabs->reclaim)) { in pb_slabs_deinit()
290 list_entry(slabs->reclaim.next, struct pb_slab_entry, head); in pb_slabs_deinit()
291 pb_slab_reclaim(slabs, entry); in pb_slabs_deinit()
294 FREE(slabs->groups); in pb_slabs_deinit()
295 simple_mtx_destroy(&slabs->mutex); in pb_slabs_deinit()