• Home
  • Raw
  • Download

Lines Matching full:pa

193  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
197 * - new PA: buddy += N; PA = N
198 * - use inode PA: on-disk += N; PA -= N
199 * - discard inode PA buddy -= on-disk - PA; PA = 0
200 * - use locality group PA on-disk += N; PA -= N
201 * - discard locality group PA buddy -= PA; PA = 0
202 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
204 * bits from PA, only from on-disk bitmap
214 * bit set and PA claims same block, it's OK. IOW, one can set bit in
215 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
220 * - new PA
221 * blocks for PA are allocated in the buddy, buddy must be referenced
222 * until PA is linked to allocation group to avoid concurrent buddy init
223 * - use inode PA
224 * we need to make sure that either on-disk bitmap or PA has uptodate data
225 * given (3) we care that PA-=N operation doesn't interfere with init
226 * - discard inode PA
228 * - use locality group PA
229 * again PA-=N must be serialized with init
230 * - discard locality group PA
232 * - new PA vs.
233 * - use inode PA
235 * - discard inode PA
236 * discard process must wait until PA isn't used by another process
237 * - use locality group PA
239 * - discard locality group PA
240 * discard process must wait until PA isn't used by another process
241 * - use inode PA
242 * - use inode PA
244 * - discard inode PA
245 * discard process must wait until PA isn't used by another process
246 * - use locality group PA
248 * - discard locality group PA
249 * discard process must wait until PA isn't used by another process
252 * - PA is referenced and while it is no discard is possible
253 * - PA is referenced until block isn't marked in on-disk bitmap
254 * - PA changes only after on-disk bitmap
259 * a special case when we've used PA to emptiness. no need to modify buddy
274 * find proper PA (per-inode or group)
278 * release PA
290 * remove PA from object (inode or locality group)
302 * - per-pa lock (pa)
305 * - new pa
309 * - find and use pa:
310 * pa
312 * - release consumed pa:
313 * pa
319 * pa
323 * pa
328 * pa
692 struct ext4_prealloc_space *pa; in __mb_check_buddy() local
693 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in __mb_check_buddy()
694 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); in __mb_check_buddy()
696 for (i = 0; i < pa->pa_len; i++) in __mb_check_buddy()
3043 struct ext4_prealloc_space *pa; in ext4_mb_cleanup_pa() local
3048 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_cleanup_pa()
3049 list_del(&pa->pa_group_list); in ext4_mb_cleanup_pa()
3051 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_cleanup_pa()
3545 struct ext4_prealloc_space *pa; in ext4_mb_normalize_request() local
3652 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
3655 if (pa->pa_deleted) in ext4_mb_normalize_request()
3657 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
3658 if (pa->pa_deleted) { in ext4_mb_normalize_request()
3659 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3663 pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa); in ext4_mb_normalize_request()
3665 /* PA must not overlap original request */ in ext4_mb_normalize_request()
3667 ac->ac_o_ex.fe_logical < pa->pa_lstart)); in ext4_mb_normalize_request()
3670 if (pa->pa_lstart >= end || pa_end <= start) { in ext4_mb_normalize_request()
3671 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3674 BUG_ON(pa->pa_lstart <= start && pa_end >= end); in ext4_mb_normalize_request()
3676 /* adjust start or end to be adjacent to this pa */ in ext4_mb_normalize_request()
3680 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
3681 BUG_ON(pa->pa_lstart > end); in ext4_mb_normalize_request()
3682 end = pa->pa_lstart; in ext4_mb_normalize_request()
3684 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3691 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
3694 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
3695 if (pa->pa_deleted == 0) { in ext4_mb_normalize_request()
3696 pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa); in ext4_mb_normalize_request()
3697 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); in ext4_mb_normalize_request()
3699 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3768 * Called on failure; free up any blocks from the inode PA for this
3775 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks() local
3779 if (pa == NULL) { in ext4_discard_allocated_blocks()
3799 if (pa->pa_type == MB_INODE_PA) in ext4_discard_allocated_blocks()
3800 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
3807 struct ext4_prealloc_space *pa) in ext4_mb_use_inode_pa() argument
3815 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
3816 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), in ext4_mb_use_inode_pa()
3823 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
3825 BUG_ON(start < pa->pa_pstart); in ext4_mb_use_inode_pa()
3826 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); in ext4_mb_use_inode_pa()
3827 BUG_ON(pa->pa_free < len); in ext4_mb_use_inode_pa()
3829 pa->pa_free -= len; in ext4_mb_use_inode_pa()
3831 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
3838 struct ext4_prealloc_space *pa) in ext4_mb_use_group_pa() argument
3842 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
3847 ac->ac_pa = pa; in ext4_mb_use_group_pa()
3851 * instead we correct pa later, after blocks are marked in ext4_mb_use_group_pa()
3853 * Other CPUs are prevented from allocating from this pa by lg_mutex in ext4_mb_use_group_pa()
3855 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
3856 pa->pa_lstart-len, len, pa); in ext4_mb_use_group_pa()
3867 struct ext4_prealloc_space *pa, in ext4_mb_check_group_pa() argument
3873 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
3874 return pa; in ext4_mb_check_group_pa()
3877 new_distance = abs(goal_block - pa->pa_pstart); in ext4_mb_check_group_pa()
3884 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
3885 return pa; in ext4_mb_check_group_pa()
3898 struct ext4_prealloc_space *pa, *cpa = NULL; in ext4_mb_use_preallocated() local
3907 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_use_preallocated()
3911 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || in ext4_mb_use_preallocated()
3912 ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa)) in ext4_mb_use_preallocated()
3917 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > in ext4_mb_use_preallocated()
3922 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
3923 if (pa->pa_deleted == 0 && pa->pa_free) { in ext4_mb_use_preallocated()
3924 atomic_inc(&pa->pa_count); in ext4_mb_use_preallocated()
3925 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_use_preallocated()
3926 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3931 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3955 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], in ext4_mb_use_preallocated()
3957 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
3958 if (pa->pa_deleted == 0 && in ext4_mb_use_preallocated()
3959 pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
3962 pa, cpa); in ext4_mb_use_preallocated()
3964 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
4012 struct ext4_prealloc_space *pa; in ext4_mb_generate_from_pa() local
4031 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_generate_from_pa()
4032 spin_lock(&pa->pa_lock); in ext4_mb_generate_from_pa()
4033 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_generate_from_pa()
4035 len = pa->pa_len; in ext4_mb_generate_from_pa()
4036 spin_unlock(&pa->pa_lock); in ext4_mb_generate_from_pa()
4047 struct ext4_prealloc_space *pa) in ext4_mb_mark_pa_deleted() argument
4051 if (pa->pa_deleted) { in ext4_mb_mark_pa_deleted()
4052 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", in ext4_mb_mark_pa_deleted()
4053 pa->pa_type, pa->pa_pstart, pa->pa_lstart, in ext4_mb_mark_pa_deleted()
4054 pa->pa_len); in ext4_mb_mark_pa_deleted()
4058 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
4060 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_mark_pa_deleted()
4061 ei = EXT4_I(pa->pa_inode); in ext4_mb_mark_pa_deleted()
4068 struct ext4_prealloc_space *pa; in ext4_mb_pa_callback() local
4069 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); in ext4_mb_pa_callback()
4071 BUG_ON(atomic_read(&pa->pa_count)); in ext4_mb_pa_callback()
4072 BUG_ON(pa->pa_deleted == 0); in ext4_mb_pa_callback()
4073 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_callback()
4081 struct super_block *sb, struct ext4_prealloc_space *pa) in ext4_mb_put_pa() argument
4087 spin_lock(&pa->pa_lock); in ext4_mb_put_pa()
4088 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { in ext4_mb_put_pa()
4089 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
4093 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
4094 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
4098 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_put_pa()
4099 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
4101 grp_blk = pa->pa_pstart; in ext4_mb_put_pa()
4104 * next group when pa is used up in ext4_mb_put_pa()
4106 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_put_pa()
4115 * find block B in PA in ext4_mb_put_pa()
4118 * drop PA from group in ext4_mb_put_pa()
4122 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" in ext4_mb_put_pa()
4126 list_del(&pa->pa_group_list); in ext4_mb_put_pa()
4129 spin_lock(pa->pa_obj_lock); in ext4_mb_put_pa()
4130 list_del_rcu(&pa->pa_inode_list); in ext4_mb_put_pa()
4131 spin_unlock(pa->pa_obj_lock); in ext4_mb_put_pa()
4133 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_put_pa()
4144 struct ext4_prealloc_space *pa; in ext4_mb_new_inode_pa() local
4154 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
4203 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
4204 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
4205 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
4206 pa->pa_free = pa->pa_len; in ext4_mb_new_inode_pa()
4207 spin_lock_init(&pa->pa_lock); in ext4_mb_new_inode_pa()
4208 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_inode_pa()
4209 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_inode_pa()
4210 pa->pa_deleted = 0; in ext4_mb_new_inode_pa()
4211 pa->pa_type = MB_INODE_PA; in ext4_mb_new_inode_pa()
4213 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_inode_pa()
4214 pa->pa_len, pa->pa_lstart); in ext4_mb_new_inode_pa()
4215 trace_ext4_mb_new_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4217 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4218 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); in ext4_mb_new_inode_pa()
4225 pa->pa_obj_lock = &ei->i_prealloc_lock; in ext4_mb_new_inode_pa()
4226 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
4228 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_inode_pa()
4230 spin_lock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4231 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_new_inode_pa()
4232 spin_unlock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4244 struct ext4_prealloc_space *pa; in ext4_mb_new_group_pa() local
4253 pa = ac->ac_pa; in ext4_mb_new_group_pa()
4259 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
4260 pa->pa_lstart = pa->pa_pstart; in ext4_mb_new_group_pa()
4261 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
4262 pa->pa_free = pa->pa_len; in ext4_mb_new_group_pa()
4263 spin_lock_init(&pa->pa_lock); in ext4_mb_new_group_pa()
4264 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_group_pa()
4265 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_group_pa()
4266 pa->pa_deleted = 0; in ext4_mb_new_group_pa()
4267 pa->pa_type = MB_GROUP_PA; in ext4_mb_new_group_pa()
4269 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_group_pa()
4270 pa->pa_len, pa->pa_lstart); in ext4_mb_new_group_pa()
4271 trace_ext4_mb_new_group_pa(ac, pa); in ext4_mb_new_group_pa()
4273 ext4_mb_use_group_pa(ac, pa); in ext4_mb_new_group_pa()
4274 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); in ext4_mb_new_group_pa()
4282 pa->pa_obj_lock = &lg->lg_prealloc_lock; in ext4_mb_new_group_pa()
4283 pa->pa_inode = NULL; in ext4_mb_new_group_pa()
4285 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_group_pa()
4288 * We will later add the new pa to the right bucket in ext4_mb_new_group_pa()
4304 * @pa must be unlinked from inode and group lists, so that
4311 struct ext4_prealloc_space *pa) in ext4_mb_release_inode_pa() argument
4322 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_inode_pa()
4323 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_inode_pa()
4324 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); in ext4_mb_release_inode_pa()
4325 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_inode_pa()
4326 end = bit + pa->pa_len; in ext4_mb_release_inode_pa()
4339 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + in ext4_mb_release_inode_pa()
4342 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); in ext4_mb_release_inode_pa()
4345 if (free != pa->pa_free) { in ext4_mb_release_inode_pa()
4347 "pa %p: logic %lu, phys. %lu, len %d", in ext4_mb_release_inode_pa()
4348 pa, (unsigned long) pa->pa_lstart, in ext4_mb_release_inode_pa()
4349 (unsigned long) pa->pa_pstart, in ext4_mb_release_inode_pa()
4350 pa->pa_len); in ext4_mb_release_inode_pa()
4352 free, pa->pa_free); in ext4_mb_release_inode_pa()
4354 * pa is already deleted so we use the value obtained in ext4_mb_release_inode_pa()
4365 struct ext4_prealloc_space *pa) in ext4_mb_release_group_pa() argument
4371 trace_ext4_mb_release_group_pa(sb, pa); in ext4_mb_release_group_pa()
4372 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_group_pa()
4373 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_group_pa()
4374 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { in ext4_mb_release_group_pa()
4376 e4b->bd_group, group, pa->pa_pstart); in ext4_mb_release_group_pa()
4379 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); in ext4_mb_release_group_pa()
4380 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); in ext4_mb_release_group_pa()
4381 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); in ext4_mb_release_group_pa()
4401 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_group_preallocations() local
4432 list_for_each_entry_safe(pa, tmp, in ext4_mb_discard_group_preallocations()
4434 spin_lock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4435 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_group_preallocations()
4436 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4440 if (pa->pa_deleted) { in ext4_mb_discard_group_preallocations()
4441 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4446 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_group_preallocations()
4452 free += pa->pa_free; in ext4_mb_discard_group_preallocations()
4454 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4456 list_del(&pa->pa_group_list); in ext4_mb_discard_group_preallocations()
4457 list_add(&pa->u.pa_tmp_list, &list); in ext4_mb_discard_group_preallocations()
4461 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_mb_discard_group_preallocations()
4464 spin_lock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4465 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_group_preallocations()
4466 spin_unlock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4468 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_discard_group_preallocations()
4469 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_group_preallocations()
4471 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_mb_discard_group_preallocations()
4473 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_group_preallocations()
4474 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_group_preallocations()
4500 struct ext4_prealloc_space *pa, *tmp; in ext4_discard_preallocations() local
4525 /* first, collect all pa's in the inode */ in ext4_discard_preallocations()
4528 pa = list_entry(ei->i_prealloc_list.prev, in ext4_discard_preallocations()
4530 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); in ext4_discard_preallocations()
4531 spin_lock(&pa->pa_lock); in ext4_discard_preallocations()
4532 if (atomic_read(&pa->pa_count)) { in ext4_discard_preallocations()
4535 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4538 "uh-oh! used pa while discarding"); in ext4_discard_preallocations()
4544 if (pa->pa_deleted == 0) { in ext4_discard_preallocations()
4545 ext4_mb_mark_pa_deleted(sb, pa); in ext4_discard_preallocations()
4546 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4547 list_del_rcu(&pa->pa_inode_list); in ext4_discard_preallocations()
4548 list_add(&pa->u.pa_tmp_list, &list); in ext4_discard_preallocations()
4553 /* someone is deleting pa right now */ in ext4_discard_preallocations()
4554 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4558 * doesn't mean pa is already unlinked from in ext4_discard_preallocations()
4562 * pa from inode's list may access already in ext4_discard_preallocations()
4574 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_discard_preallocations()
4575 BUG_ON(pa->pa_type != MB_INODE_PA); in ext4_discard_preallocations()
4576 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_discard_preallocations()
4596 list_del(&pa->pa_group_list); in ext4_discard_preallocations()
4597 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_discard_preallocations()
4603 list_del(&pa->u.pa_tmp_list); in ext4_discard_preallocations()
4604 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_discard_preallocations()
4610 struct ext4_prealloc_space *pa; in ext4_mb_pa_alloc() local
4613 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); in ext4_mb_pa_alloc()
4614 if (!pa) in ext4_mb_pa_alloc()
4616 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
4617 ac->ac_pa = pa; in ext4_mb_pa_alloc()
4623 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_free() local
4625 BUG_ON(!pa); in ext4_mb_pa_free()
4627 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); in ext4_mb_pa_free()
4628 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_free()
4643 struct ext4_prealloc_space *pa; in ext4_mb_show_pa() local
4651 pa = list_entry(cur, struct ext4_prealloc_space, in ext4_mb_show_pa()
4653 spin_lock(&pa->pa_lock); in ext4_mb_show_pa()
4654 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_show_pa()
4656 spin_unlock(&pa->pa_lock); in ext4_mb_show_pa()
4657 mb_debug(sb, "PA:%u:%d:%d\n", i, start, in ext4_mb_show_pa()
4658 pa->pa_len); in ext4_mb_show_pa()
4825 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_lg_preallocations() local
4832 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], in ext4_mb_discard_lg_preallocations()
4835 spin_lock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4836 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_lg_preallocations()
4838 * This is the pa that we just used in ext4_mb_discard_lg_preallocations()
4842 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4845 if (pa->pa_deleted) { in ext4_mb_discard_lg_preallocations()
4846 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4850 BUG_ON(pa->pa_type != MB_GROUP_PA); in ext4_mb_discard_lg_preallocations()
4853 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_lg_preallocations()
4854 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4856 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_lg_preallocations()
4857 list_add(&pa->u.pa_tmp_list, &discard_list); in ext4_mb_discard_lg_preallocations()
4872 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { in ext4_mb_discard_lg_preallocations()
4875 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_mb_discard_lg_preallocations()
4884 list_del(&pa->pa_group_list); in ext4_mb_discard_lg_preallocations()
4885 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_lg_preallocations()
4889 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_lg_preallocations()
4890 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_lg_preallocations()
4908 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim() local
4910 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
4924 if (!added && pa->pa_free < tmp_pa->pa_free) { in ext4_mb_add_n_trim()
4926 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
4938 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
4952 * if per-inode prealloc list is too long, trim some PA
4976 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context() local
4977 if (pa) { in ext4_mb_release_context()
4978 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_release_context()
4980 spin_lock(&pa->pa_lock); in ext4_mb_release_context()
4981 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
4982 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
4983 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
4984 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
4985 spin_unlock(&pa->pa_lock); in ext4_mb_release_context()
4988 * We want to add the pa to the right bucket. in ext4_mb_release_context()
4993 if (likely(pa->pa_free)) { in ext4_mb_release_context()
4994 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
4995 list_del_rcu(&pa->pa_inode_list); in ext4_mb_release_context()
4996 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
5001 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_release_context()
5004 * to trim the least recently used PA. in ext4_mb_release_context()
5006 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
5007 list_move(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_release_context()
5008 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
5011 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
5170 * pa allocated above is added to grp->bb_prealloc_list only in ext4_mb_new_blocks()
5174 * So we have to free this pa here itself. in ext4_mb_new_blocks()
5199 * If block allocation fails then the pa allocated above in ext4_mb_new_blocks()