• Home
  • Raw
  • Download

Lines Matching full:pa

192  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
196 * - new PA: buddy += N; PA = N
197 * - use inode PA: on-disk += N; PA -= N
198 * - discard inode PA buddy -= on-disk - PA; PA = 0
199 * - use locality group PA on-disk += N; PA -= N
200 * - discard locality group PA buddy -= PA; PA = 0
201 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
203 * bits from PA, only from on-disk bitmap
213 * bit set and PA claims same block, it's OK. IOW, one can set bit in
214 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
219 * - new PA
220 * blocks for PA are allocated in the buddy, buddy must be referenced
221 * until PA is linked to allocation group to avoid concurrent buddy init
222 * - use inode PA
223 * we need to make sure that either on-disk bitmap or PA has uptodate data
224 * given (3) we care that PA-=N operation doesn't interfere with init
225 * - discard inode PA
227 * - use locality group PA
228 * again PA-=N must be serialized with init
229 * - discard locality group PA
231 * - new PA vs.
232 * - use inode PA
234 * - discard inode PA
235 * discard process must wait until PA isn't used by another process
236 * - use locality group PA
238 * - discard locality group PA
239 * discard process must wait until PA isn't used by another process
240 * - use inode PA
241 * - use inode PA
243 * - discard inode PA
244 * discard process must wait until PA isn't used by another process
245 * - use locality group PA
247 * - discard locality group PA
248 * discard process must wait until PA isn't used by another process
251 * - PA is referenced and while it is no discard is possible
252 * - PA is referenced until block isn't marked in on-disk bitmap
253 * - PA changes only after on-disk bitmap
258 * a special case when we've used PA to emptiness. no need to modify buddy
273 * find proper PA (per-inode or group)
277 * release PA
289 * remove PA from object (inode or locality group)
301 * - per-pa lock (pa)
304 * - new pa
308 * - find and use pa:
309 * pa
311 * - release consumed pa:
312 * pa
318 * pa
322 * pa
327 * pa
689 struct ext4_prealloc_space *pa; in __mb_check_buddy() local
690 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in __mb_check_buddy()
691 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); in __mb_check_buddy()
693 for (i = 0; i < pa->pa_len; i++) in __mb_check_buddy()
2949 struct ext4_prealloc_space *pa; in ext4_mb_cleanup_pa() local
2954 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_cleanup_pa()
2955 list_del(&pa->pa_group_list); in ext4_mb_cleanup_pa()
2957 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_cleanup_pa()
3422 struct ext4_prealloc_space *pa; in ext4_mb_normalize_request() local
3516 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
3519 if (pa->pa_deleted) in ext4_mb_normalize_request()
3521 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
3522 if (pa->pa_deleted) { in ext4_mb_normalize_request()
3523 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3527 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), in ext4_mb_normalize_request()
3528 pa->pa_len); in ext4_mb_normalize_request()
3530 /* PA must not overlap original request */ in ext4_mb_normalize_request()
3532 ac->ac_o_ex.fe_logical < pa->pa_lstart)); in ext4_mb_normalize_request()
3535 if (pa->pa_lstart >= end || pa_end <= start) { in ext4_mb_normalize_request()
3536 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3539 BUG_ON(pa->pa_lstart <= start && pa_end >= end); in ext4_mb_normalize_request()
3541 /* adjust start or end to be adjacent to this pa */ in ext4_mb_normalize_request()
3545 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
3546 BUG_ON(pa->pa_lstart > end); in ext4_mb_normalize_request()
3547 end = pa->pa_lstart; in ext4_mb_normalize_request()
3549 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3556 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
3559 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
3560 if (pa->pa_deleted == 0) { in ext4_mb_normalize_request()
3561 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), in ext4_mb_normalize_request()
3562 pa->pa_len); in ext4_mb_normalize_request()
3563 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); in ext4_mb_normalize_request()
3565 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3630 * Called on failure; free up any blocks from the inode PA for this
3637 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks() local
3641 if (pa == NULL) { in ext4_discard_allocated_blocks()
3661 if (pa->pa_type == MB_INODE_PA) in ext4_discard_allocated_blocks()
3662 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
3669 struct ext4_prealloc_space *pa) in ext4_mb_use_inode_pa() argument
3677 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
3678 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), in ext4_mb_use_inode_pa()
3685 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
3687 BUG_ON(start < pa->pa_pstart); in ext4_mb_use_inode_pa()
3688 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); in ext4_mb_use_inode_pa()
3689 BUG_ON(pa->pa_free < len); in ext4_mb_use_inode_pa()
3690 pa->pa_free -= len; in ext4_mb_use_inode_pa()
3692 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
3699 struct ext4_prealloc_space *pa) in ext4_mb_use_group_pa() argument
3703 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
3708 ac->ac_pa = pa; in ext4_mb_use_group_pa()
3712 * instead we correct pa later, after blocks are marked in ext4_mb_use_group_pa()
3714 * Other CPUs are prevented from allocating from this pa by lg_mutex in ext4_mb_use_group_pa()
3716 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
3717 pa->pa_lstart-len, len, pa); in ext4_mb_use_group_pa()
3728 struct ext4_prealloc_space *pa, in ext4_mb_check_group_pa() argument
3734 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
3735 return pa; in ext4_mb_check_group_pa()
3738 new_distance = abs(goal_block - pa->pa_pstart); in ext4_mb_check_group_pa()
3745 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
3746 return pa; in ext4_mb_check_group_pa()
3759 struct ext4_prealloc_space *pa, *cpa = NULL; in ext4_mb_use_preallocated() local
3768 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_use_preallocated()
3772 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || in ext4_mb_use_preallocated()
3773 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + in ext4_mb_use_preallocated()
3774 EXT4_C2B(sbi, pa->pa_len))) in ext4_mb_use_preallocated()
3779 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > in ext4_mb_use_preallocated()
3784 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
3785 if (pa->pa_deleted == 0 && pa->pa_free) { in ext4_mb_use_preallocated()
3786 atomic_inc(&pa->pa_count); in ext4_mb_use_preallocated()
3787 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_use_preallocated()
3788 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3793 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3817 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], in ext4_mb_use_preallocated()
3819 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
3820 if (pa->pa_deleted == 0 && in ext4_mb_use_preallocated()
3821 pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
3824 pa, cpa); in ext4_mb_use_preallocated()
3826 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3872 struct ext4_prealloc_space *pa; in ext4_mb_generate_from_pa() local
3888 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_generate_from_pa()
3889 spin_lock(&pa->pa_lock); in ext4_mb_generate_from_pa()
3890 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_generate_from_pa()
3892 len = pa->pa_len; in ext4_mb_generate_from_pa()
3893 spin_unlock(&pa->pa_lock); in ext4_mb_generate_from_pa()
3904 struct ext4_prealloc_space *pa) in ext4_mb_mark_pa_deleted() argument
3908 if (pa->pa_deleted) { in ext4_mb_mark_pa_deleted()
3909 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", in ext4_mb_mark_pa_deleted()
3910 pa->pa_type, pa->pa_pstart, pa->pa_lstart, in ext4_mb_mark_pa_deleted()
3911 pa->pa_len); in ext4_mb_mark_pa_deleted()
3915 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
3917 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_mark_pa_deleted()
3918 ei = EXT4_I(pa->pa_inode); in ext4_mb_mark_pa_deleted()
3925 struct ext4_prealloc_space *pa; in ext4_mb_pa_callback() local
3926 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); in ext4_mb_pa_callback()
3928 BUG_ON(atomic_read(&pa->pa_count)); in ext4_mb_pa_callback()
3929 BUG_ON(pa->pa_deleted == 0); in ext4_mb_pa_callback()
3930 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_callback()
3938 struct super_block *sb, struct ext4_prealloc_space *pa) in ext4_mb_put_pa() argument
3944 spin_lock(&pa->pa_lock); in ext4_mb_put_pa()
3945 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { in ext4_mb_put_pa()
3946 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
3950 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
3951 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
3955 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_put_pa()
3956 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
3958 grp_blk = pa->pa_pstart; in ext4_mb_put_pa()
3961 * next group when pa is used up in ext4_mb_put_pa()
3963 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_put_pa()
3972 * find block B in PA in ext4_mb_put_pa()
3975 * drop PA from group in ext4_mb_put_pa()
3979 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" in ext4_mb_put_pa()
3983 list_del(&pa->pa_group_list); in ext4_mb_put_pa()
3986 spin_lock(pa->pa_obj_lock); in ext4_mb_put_pa()
3987 list_del_rcu(&pa->pa_inode_list); in ext4_mb_put_pa()
3988 spin_unlock(pa->pa_obj_lock); in ext4_mb_put_pa()
3990 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_put_pa()
4001 struct ext4_prealloc_space *pa; in ext4_mb_new_inode_pa() local
4011 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
4051 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
4052 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
4053 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
4054 pa->pa_free = pa->pa_len; in ext4_mb_new_inode_pa()
4055 spin_lock_init(&pa->pa_lock); in ext4_mb_new_inode_pa()
4056 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_inode_pa()
4057 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_inode_pa()
4058 pa->pa_deleted = 0; in ext4_mb_new_inode_pa()
4059 pa->pa_type = MB_INODE_PA; in ext4_mb_new_inode_pa()
4061 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_inode_pa()
4062 pa->pa_len, pa->pa_lstart); in ext4_mb_new_inode_pa()
4063 trace_ext4_mb_new_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4065 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4066 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); in ext4_mb_new_inode_pa()
4071 pa->pa_obj_lock = &ei->i_prealloc_lock; in ext4_mb_new_inode_pa()
4072 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
4074 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_inode_pa()
4076 spin_lock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4077 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_new_inode_pa()
4078 spin_unlock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4090 struct ext4_prealloc_space *pa; in ext4_mb_new_group_pa() local
4099 pa = ac->ac_pa; in ext4_mb_new_group_pa()
4105 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
4106 pa->pa_lstart = pa->pa_pstart; in ext4_mb_new_group_pa()
4107 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
4108 pa->pa_free = pa->pa_len; in ext4_mb_new_group_pa()
4109 spin_lock_init(&pa->pa_lock); in ext4_mb_new_group_pa()
4110 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_group_pa()
4111 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_group_pa()
4112 pa->pa_deleted = 0; in ext4_mb_new_group_pa()
4113 pa->pa_type = MB_GROUP_PA; in ext4_mb_new_group_pa()
4115 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_group_pa()
4116 pa->pa_len, pa->pa_lstart); in ext4_mb_new_group_pa()
4117 trace_ext4_mb_new_group_pa(ac, pa); in ext4_mb_new_group_pa()
4119 ext4_mb_use_group_pa(ac, pa); in ext4_mb_new_group_pa()
4120 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); in ext4_mb_new_group_pa()
4126 pa->pa_obj_lock = &lg->lg_prealloc_lock; in ext4_mb_new_group_pa()
4127 pa->pa_inode = NULL; in ext4_mb_new_group_pa()
4129 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_group_pa()
4132 * We will later add the new pa to the right bucket in ext4_mb_new_group_pa()
4148 * @pa must be unlinked from inode and group lists, so that
4155 struct ext4_prealloc_space *pa) in ext4_mb_release_inode_pa() argument
4166 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_inode_pa()
4167 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_inode_pa()
4168 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); in ext4_mb_release_inode_pa()
4169 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_inode_pa()
4170 end = bit + pa->pa_len; in ext4_mb_release_inode_pa()
4183 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + in ext4_mb_release_inode_pa()
4186 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); in ext4_mb_release_inode_pa()
4189 if (free != pa->pa_free) { in ext4_mb_release_inode_pa()
4191 "pa %p: logic %lu, phys. %lu, len %d", in ext4_mb_release_inode_pa()
4192 pa, (unsigned long) pa->pa_lstart, in ext4_mb_release_inode_pa()
4193 (unsigned long) pa->pa_pstart, in ext4_mb_release_inode_pa()
4194 pa->pa_len); in ext4_mb_release_inode_pa()
4196 free, pa->pa_free); in ext4_mb_release_inode_pa()
4198 * pa is already deleted so we use the value obtained in ext4_mb_release_inode_pa()
4209 struct ext4_prealloc_space *pa) in ext4_mb_release_group_pa() argument
4215 trace_ext4_mb_release_group_pa(sb, pa); in ext4_mb_release_group_pa()
4216 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_group_pa()
4217 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_group_pa()
4218 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_group_pa()
4219 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); in ext4_mb_release_group_pa()
4220 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); in ext4_mb_release_group_pa()
4221 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); in ext4_mb_release_group_pa()
4241 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_group_preallocations() local
4270 list_for_each_entry_safe(pa, tmp, in ext4_mb_discard_group_preallocations()
4272 spin_lock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4273 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_group_preallocations()
4274 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4278 if (pa->pa_deleted) { in ext4_mb_discard_group_preallocations()
4279 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4284 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_group_preallocations()
4290 free += pa->pa_free; in ext4_mb_discard_group_preallocations()
4292 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4294 list_del(&pa->pa_group_list); in ext4_mb_discard_group_preallocations()
4295 list_add(&pa->u.pa_tmp_list, &list); in ext4_mb_discard_group_preallocations()
4299 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_mb_discard_group_preallocations()
4302 spin_lock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4303 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_group_preallocations()
4304 spin_unlock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4306 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_discard_group_preallocations()
4307 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_group_preallocations()
4309 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_mb_discard_group_preallocations()
4311 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_group_preallocations()
4312 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_group_preallocations()
4338 struct ext4_prealloc_space *pa, *tmp; in ext4_discard_preallocations() local
4363 /* first, collect all pa's in the inode */ in ext4_discard_preallocations()
4366 pa = list_entry(ei->i_prealloc_list.prev, in ext4_discard_preallocations()
4368 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); in ext4_discard_preallocations()
4369 spin_lock(&pa->pa_lock); in ext4_discard_preallocations()
4370 if (atomic_read(&pa->pa_count)) { in ext4_discard_preallocations()
4373 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4376 "uh-oh! used pa while discarding"); in ext4_discard_preallocations()
4382 if (pa->pa_deleted == 0) { in ext4_discard_preallocations()
4383 ext4_mb_mark_pa_deleted(sb, pa); in ext4_discard_preallocations()
4384 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4385 list_del_rcu(&pa->pa_inode_list); in ext4_discard_preallocations()
4386 list_add(&pa->u.pa_tmp_list, &list); in ext4_discard_preallocations()
4391 /* someone is deleting pa right now */ in ext4_discard_preallocations()
4392 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4396 * doesn't mean pa is already unlinked from in ext4_discard_preallocations()
4400 * pa from inode's list may access already in ext4_discard_preallocations()
4412 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_discard_preallocations()
4413 BUG_ON(pa->pa_type != MB_INODE_PA); in ext4_discard_preallocations()
4414 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_discard_preallocations()
4434 list_del(&pa->pa_group_list); in ext4_discard_preallocations()
4435 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_discard_preallocations()
4441 list_del(&pa->u.pa_tmp_list); in ext4_discard_preallocations()
4442 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_discard_preallocations()
4448 struct ext4_prealloc_space *pa; in ext4_mb_pa_alloc() local
4451 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); in ext4_mb_pa_alloc()
4452 if (!pa) in ext4_mb_pa_alloc()
4454 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
4455 ac->ac_pa = pa; in ext4_mb_pa_alloc()
4461 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_free() local
4463 BUG_ON(!pa); in ext4_mb_pa_free()
4465 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); in ext4_mb_pa_free()
4466 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_free()
4481 struct ext4_prealloc_space *pa; in ext4_mb_show_pa() local
4486 pa = list_entry(cur, struct ext4_prealloc_space, in ext4_mb_show_pa()
4488 spin_lock(&pa->pa_lock); in ext4_mb_show_pa()
4489 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_show_pa()
4491 spin_unlock(&pa->pa_lock); in ext4_mb_show_pa()
4492 mb_debug(sb, "PA:%u:%d:%d\n", i, start, in ext4_mb_show_pa()
4493 pa->pa_len); in ext4_mb_show_pa()
4660 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_lg_preallocations() local
4667 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], in ext4_mb_discard_lg_preallocations()
4670 spin_lock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4671 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_lg_preallocations()
4673 * This is the pa that we just used in ext4_mb_discard_lg_preallocations()
4677 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4680 if (pa->pa_deleted) { in ext4_mb_discard_lg_preallocations()
4681 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4685 BUG_ON(pa->pa_type != MB_GROUP_PA); in ext4_mb_discard_lg_preallocations()
4688 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_lg_preallocations()
4689 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4691 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_lg_preallocations()
4692 list_add(&pa->u.pa_tmp_list, &discard_list); in ext4_mb_discard_lg_preallocations()
4707 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { in ext4_mb_discard_lg_preallocations()
4710 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_mb_discard_lg_preallocations()
4719 list_del(&pa->pa_group_list); in ext4_mb_discard_lg_preallocations()
4720 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_lg_preallocations()
4724 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_lg_preallocations()
4725 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_lg_preallocations()
4743 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim() local
4745 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
4759 if (!added && pa->pa_free < tmp_pa->pa_free) { in ext4_mb_add_n_trim()
4761 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
4773 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
4787 * if per-inode prealloc list is too long, trim some PA
4811 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context() local
4812 if (pa) { in ext4_mb_release_context()
4813 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_release_context()
4815 spin_lock(&pa->pa_lock); in ext4_mb_release_context()
4816 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
4817 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
4818 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
4819 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
4820 spin_unlock(&pa->pa_lock); in ext4_mb_release_context()
4823 * We want to add the pa to the right bucket. in ext4_mb_release_context()
4828 if (likely(pa->pa_free)) { in ext4_mb_release_context()
4829 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
4830 list_del_rcu(&pa->pa_inode_list); in ext4_mb_release_context()
4831 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
4836 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_release_context()
4839 * to trim the least recently used PA. in ext4_mb_release_context()
4841 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
4842 list_move(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_release_context()
4843 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
4846 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
5004 * pa allocated above is added to grp->bb_prealloc_list only in ext4_mb_new_blocks()
5008 * So we have to free this pa here itself. in ext4_mb_new_blocks()
5032 * If block allocation fails then the pa allocated above in ext4_mb_new_blocks()