Lines Matching full:pa

192  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
196 * - new PA: buddy += N; PA = N
197 * - use inode PA: on-disk += N; PA -= N
198 * - discard inode PA buddy -= on-disk - PA; PA = 0
199 * - use locality group PA on-disk += N; PA -= N
200 * - discard locality group PA buddy -= PA; PA = 0
201 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
203 * bits from PA, only from on-disk bitmap
213 * bit set and PA claims same block, it's OK. IOW, one can set bit in
214 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
219 * - new PA
220 * blocks for PA are allocated in the buddy, buddy must be referenced
221 * until PA is linked to allocation group to avoid concurrent buddy init
222 * - use inode PA
223 * we need to make sure that either on-disk bitmap or PA has uptodate data
224 * given (3) we care that PA-=N operation doesn't interfere with init
225 * - discard inode PA
227 * - use locality group PA
228 * again PA-=N must be serialized with init
229 * - discard locality group PA
231 * - new PA vs.
232 * - use inode PA
234 * - discard inode PA
235 * discard process must wait until PA isn't used by another process
236 * - use locality group PA
238 * - discard locality group PA
239 * discard process must wait until PA isn't used by another process
240 * - use inode PA
241 * - use inode PA
243 * - discard inode PA
244 * discard process must wait until PA isn't used by another process
245 * - use locality group PA
247 * - discard locality group PA
248 * discard process must wait until PA isn't used by another process
251 * - PA is referenced and while it is no discard is possible
252 * - PA is referenced until block isn't marked in on-disk bitmap
253 * - PA changes only after on-disk bitmap
258 * a special case when we've used PA to emptiness. no need to modify buddy
273 * find proper PA (per-inode or group)
277 * release PA
289 * remove PA from object (inode or locality group)
301 * - per-pa lock (pa)
304 * - new pa
308 * - find and use pa:
309 * pa
311 * - release consumed pa:
312 * pa
318 * pa
322 * pa
327 * pa
689 struct ext4_prealloc_space *pa; in __mb_check_buddy() local
690 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in __mb_check_buddy()
691 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); in __mb_check_buddy()
693 for (i = 0; i < pa->pa_len; i++) in __mb_check_buddy()
2949 struct ext4_prealloc_space *pa; in ext4_mb_cleanup_pa() local
2954 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_cleanup_pa()
2955 list_del(&pa->pa_group_list); in ext4_mb_cleanup_pa()
2957 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_cleanup_pa()
3448 struct ext4_prealloc_space *pa; in ext4_mb_normalize_request() local
3551 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
3554 if (pa->pa_deleted) in ext4_mb_normalize_request()
3556 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
3557 if (pa->pa_deleted) { in ext4_mb_normalize_request()
3558 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3562 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), in ext4_mb_normalize_request()
3563 pa->pa_len); in ext4_mb_normalize_request()
3565 /* PA must not overlap original request */ in ext4_mb_normalize_request()
3567 ac->ac_o_ex.fe_logical < pa->pa_lstart)); in ext4_mb_normalize_request()
3570 if (pa->pa_lstart >= end || pa_end <= start) { in ext4_mb_normalize_request()
3571 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3574 BUG_ON(pa->pa_lstart <= start && pa_end >= end); in ext4_mb_normalize_request()
3576 /* adjust start or end to be adjacent to this pa */ in ext4_mb_normalize_request()
3580 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
3581 BUG_ON(pa->pa_lstart > end); in ext4_mb_normalize_request()
3582 end = pa->pa_lstart; in ext4_mb_normalize_request()
3584 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3591 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_normalize_request()
3594 spin_lock(&pa->pa_lock); in ext4_mb_normalize_request()
3595 if (pa->pa_deleted == 0) { in ext4_mb_normalize_request()
3596 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), in ext4_mb_normalize_request()
3597 pa->pa_len); in ext4_mb_normalize_request()
3598 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); in ext4_mb_normalize_request()
3600 spin_unlock(&pa->pa_lock); in ext4_mb_normalize_request()
3665 * Called on failure; free up any blocks from the inode PA for this
3672 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks() local
3676 if (pa == NULL) { in ext4_discard_allocated_blocks()
3696 if (pa->pa_type == MB_INODE_PA) in ext4_discard_allocated_blocks()
3697 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
3704 struct ext4_prealloc_space *pa) in ext4_mb_use_inode_pa() argument
3712 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
3713 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), in ext4_mb_use_inode_pa()
3720 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
3722 BUG_ON(start < pa->pa_pstart); in ext4_mb_use_inode_pa()
3723 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); in ext4_mb_use_inode_pa()
3724 BUG_ON(pa->pa_free < len); in ext4_mb_use_inode_pa()
3725 pa->pa_free -= len; in ext4_mb_use_inode_pa()
3727 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
3734 struct ext4_prealloc_space *pa) in ext4_mb_use_group_pa() argument
3738 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
3743 ac->ac_pa = pa; in ext4_mb_use_group_pa()
3747 * instead we correct pa later, after blocks are marked in ext4_mb_use_group_pa()
3749 * Other CPUs are prevented from allocating from this pa by lg_mutex in ext4_mb_use_group_pa()
3751 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
3752 pa->pa_lstart-len, len, pa); in ext4_mb_use_group_pa()
3763 struct ext4_prealloc_space *pa, in ext4_mb_check_group_pa() argument
3769 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
3770 return pa; in ext4_mb_check_group_pa()
3773 new_distance = abs(goal_block - pa->pa_pstart); in ext4_mb_check_group_pa()
3780 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
3781 return pa; in ext4_mb_check_group_pa()
3794 struct ext4_prealloc_space *pa, *cpa = NULL; in ext4_mb_use_preallocated() local
3803 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { in ext4_mb_use_preallocated()
3807 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || in ext4_mb_use_preallocated()
3808 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + in ext4_mb_use_preallocated()
3809 EXT4_C2B(sbi, pa->pa_len))) in ext4_mb_use_preallocated()
3814 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > in ext4_mb_use_preallocated()
3819 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
3820 if (pa->pa_deleted == 0 && pa->pa_free) { in ext4_mb_use_preallocated()
3821 atomic_inc(&pa->pa_count); in ext4_mb_use_preallocated()
3822 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_use_preallocated()
3823 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3828 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3852 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], in ext4_mb_use_preallocated()
3854 spin_lock(&pa->pa_lock); in ext4_mb_use_preallocated()
3855 if (pa->pa_deleted == 0 && in ext4_mb_use_preallocated()
3856 pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
3859 pa, cpa); in ext4_mb_use_preallocated()
3861 spin_unlock(&pa->pa_lock); in ext4_mb_use_preallocated()
3907 struct ext4_prealloc_space *pa; in ext4_mb_generate_from_pa() local
3923 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); in ext4_mb_generate_from_pa()
3924 spin_lock(&pa->pa_lock); in ext4_mb_generate_from_pa()
3925 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_generate_from_pa()
3927 len = pa->pa_len; in ext4_mb_generate_from_pa()
3928 spin_unlock(&pa->pa_lock); in ext4_mb_generate_from_pa()
3939 struct ext4_prealloc_space *pa) in ext4_mb_mark_pa_deleted() argument
3943 if (pa->pa_deleted) { in ext4_mb_mark_pa_deleted()
3944 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", in ext4_mb_mark_pa_deleted()
3945 pa->pa_type, pa->pa_pstart, pa->pa_lstart, in ext4_mb_mark_pa_deleted()
3946 pa->pa_len); in ext4_mb_mark_pa_deleted()
3950 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
3952 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_mark_pa_deleted()
3953 ei = EXT4_I(pa->pa_inode); in ext4_mb_mark_pa_deleted()
3960 struct ext4_prealloc_space *pa; in ext4_mb_pa_callback() local
3961 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); in ext4_mb_pa_callback()
3963 BUG_ON(atomic_read(&pa->pa_count)); in ext4_mb_pa_callback()
3964 BUG_ON(pa->pa_deleted == 0); in ext4_mb_pa_callback()
3965 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_callback()
3973 struct super_block *sb, struct ext4_prealloc_space *pa) in ext4_mb_put_pa() argument
3979 spin_lock(&pa->pa_lock); in ext4_mb_put_pa()
3980 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { in ext4_mb_put_pa()
3981 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
3985 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
3986 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
3990 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_put_pa()
3991 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
3993 grp_blk = pa->pa_pstart; in ext4_mb_put_pa()
3996 * next group when pa is used up in ext4_mb_put_pa()
3998 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_put_pa()
4007 * find block B in PA in ext4_mb_put_pa()
4010 * drop PA from group in ext4_mb_put_pa()
4014 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" in ext4_mb_put_pa()
4018 list_del(&pa->pa_group_list); in ext4_mb_put_pa()
4021 spin_lock(pa->pa_obj_lock); in ext4_mb_put_pa()
4022 list_del_rcu(&pa->pa_inode_list); in ext4_mb_put_pa()
4023 spin_unlock(pa->pa_obj_lock); in ext4_mb_put_pa()
4025 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_put_pa()
4036 struct ext4_prealloc_space *pa; in ext4_mb_new_inode_pa() local
4046 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
4086 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
4087 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
4088 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
4089 pa->pa_free = pa->pa_len; in ext4_mb_new_inode_pa()
4090 spin_lock_init(&pa->pa_lock); in ext4_mb_new_inode_pa()
4091 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_inode_pa()
4092 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_inode_pa()
4093 pa->pa_deleted = 0; in ext4_mb_new_inode_pa()
4094 pa->pa_type = MB_INODE_PA; in ext4_mb_new_inode_pa()
4096 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_inode_pa()
4097 pa->pa_len, pa->pa_lstart); in ext4_mb_new_inode_pa()
4098 trace_ext4_mb_new_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4100 ext4_mb_use_inode_pa(ac, pa); in ext4_mb_new_inode_pa()
4101 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); in ext4_mb_new_inode_pa()
4106 pa->pa_obj_lock = &ei->i_prealloc_lock; in ext4_mb_new_inode_pa()
4107 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
4109 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_inode_pa()
4111 spin_lock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4112 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_new_inode_pa()
4113 spin_unlock(pa->pa_obj_lock); in ext4_mb_new_inode_pa()
4125 struct ext4_prealloc_space *pa; in ext4_mb_new_group_pa() local
4134 pa = ac->ac_pa; in ext4_mb_new_group_pa()
4140 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
4141 pa->pa_lstart = pa->pa_pstart; in ext4_mb_new_group_pa()
4142 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
4143 pa->pa_free = pa->pa_len; in ext4_mb_new_group_pa()
4144 spin_lock_init(&pa->pa_lock); in ext4_mb_new_group_pa()
4145 INIT_LIST_HEAD(&pa->pa_inode_list); in ext4_mb_new_group_pa()
4146 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_group_pa()
4147 pa->pa_deleted = 0; in ext4_mb_new_group_pa()
4148 pa->pa_type = MB_GROUP_PA; in ext4_mb_new_group_pa()
4150 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_group_pa()
4151 pa->pa_len, pa->pa_lstart); in ext4_mb_new_group_pa()
4152 trace_ext4_mb_new_group_pa(ac, pa); in ext4_mb_new_group_pa()
4154 ext4_mb_use_group_pa(ac, pa); in ext4_mb_new_group_pa()
4155 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); in ext4_mb_new_group_pa()
4161 pa->pa_obj_lock = &lg->lg_prealloc_lock; in ext4_mb_new_group_pa()
4162 pa->pa_inode = NULL; in ext4_mb_new_group_pa()
4164 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_group_pa()
4167 * We will later add the new pa to the right bucket in ext4_mb_new_group_pa()
4183 * @pa must be unlinked from inode and group lists, so that
4190 struct ext4_prealloc_space *pa) in ext4_mb_release_inode_pa() argument
4201 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_inode_pa()
4202 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_inode_pa()
4203 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); in ext4_mb_release_inode_pa()
4204 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_inode_pa()
4205 end = bit + pa->pa_len; in ext4_mb_release_inode_pa()
4218 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + in ext4_mb_release_inode_pa()
4221 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); in ext4_mb_release_inode_pa()
4224 if (free != pa->pa_free) { in ext4_mb_release_inode_pa()
4226 "pa %p: logic %lu, phys. %lu, len %d", in ext4_mb_release_inode_pa()
4227 pa, (unsigned long) pa->pa_lstart, in ext4_mb_release_inode_pa()
4228 (unsigned long) pa->pa_pstart, in ext4_mb_release_inode_pa()
4229 pa->pa_len); in ext4_mb_release_inode_pa()
4231 free, pa->pa_free); in ext4_mb_release_inode_pa()
4233 * pa is already deleted so we use the value obtained in ext4_mb_release_inode_pa()
4244 struct ext4_prealloc_space *pa) in ext4_mb_release_group_pa() argument
4250 trace_ext4_mb_release_group_pa(sb, pa); in ext4_mb_release_group_pa()
4251 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_group_pa()
4252 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_group_pa()
4253 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_group_pa()
4254 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); in ext4_mb_release_group_pa()
4255 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); in ext4_mb_release_group_pa()
4256 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); in ext4_mb_release_group_pa()
4276 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_group_preallocations() local
4305 list_for_each_entry_safe(pa, tmp, in ext4_mb_discard_group_preallocations()
4307 spin_lock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4308 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_group_preallocations()
4309 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4313 if (pa->pa_deleted) { in ext4_mb_discard_group_preallocations()
4314 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4319 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_group_preallocations()
4325 free += pa->pa_free; in ext4_mb_discard_group_preallocations()
4327 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
4329 list_del(&pa->pa_group_list); in ext4_mb_discard_group_preallocations()
4330 list_add(&pa->u.pa_tmp_list, &list); in ext4_mb_discard_group_preallocations()
4334 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_mb_discard_group_preallocations()
4337 spin_lock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4338 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_group_preallocations()
4339 spin_unlock(pa->pa_obj_lock); in ext4_mb_discard_group_preallocations()
4341 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_discard_group_preallocations()
4342 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_group_preallocations()
4344 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_mb_discard_group_preallocations()
4346 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_group_preallocations()
4347 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_group_preallocations()
4373 struct ext4_prealloc_space *pa, *tmp; in ext4_discard_preallocations() local
4398 /* first, collect all pa's in the inode */ in ext4_discard_preallocations()
4401 pa = list_entry(ei->i_prealloc_list.prev, in ext4_discard_preallocations()
4403 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); in ext4_discard_preallocations()
4404 spin_lock(&pa->pa_lock); in ext4_discard_preallocations()
4405 if (atomic_read(&pa->pa_count)) { in ext4_discard_preallocations()
4408 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4411 "uh-oh! used pa while discarding"); in ext4_discard_preallocations()
4417 if (pa->pa_deleted == 0) { in ext4_discard_preallocations()
4418 ext4_mb_mark_pa_deleted(sb, pa); in ext4_discard_preallocations()
4419 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4420 list_del_rcu(&pa->pa_inode_list); in ext4_discard_preallocations()
4421 list_add(&pa->u.pa_tmp_list, &list); in ext4_discard_preallocations()
4426 /* someone is deleting pa right now */ in ext4_discard_preallocations()
4427 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
4431 * doesn't mean pa is already unlinked from in ext4_discard_preallocations()
4435 * pa from inode's list may access already in ext4_discard_preallocations()
4447 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { in ext4_discard_preallocations()
4448 BUG_ON(pa->pa_type != MB_INODE_PA); in ext4_discard_preallocations()
4449 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_discard_preallocations()
4469 list_del(&pa->pa_group_list); in ext4_discard_preallocations()
4470 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); in ext4_discard_preallocations()
4476 list_del(&pa->u.pa_tmp_list); in ext4_discard_preallocations()
4477 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_discard_preallocations()
4483 struct ext4_prealloc_space *pa; in ext4_mb_pa_alloc() local
4486 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); in ext4_mb_pa_alloc()
4487 if (!pa) in ext4_mb_pa_alloc()
4489 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
4490 ac->ac_pa = pa; in ext4_mb_pa_alloc()
4496 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_free() local
4498 BUG_ON(!pa); in ext4_mb_pa_free()
4500 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); in ext4_mb_pa_free()
4501 kmem_cache_free(ext4_pspace_cachep, pa); in ext4_mb_pa_free()
4516 struct ext4_prealloc_space *pa; in ext4_mb_show_pa() local
4521 pa = list_entry(cur, struct ext4_prealloc_space, in ext4_mb_show_pa()
4523 spin_lock(&pa->pa_lock); in ext4_mb_show_pa()
4524 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_show_pa()
4526 spin_unlock(&pa->pa_lock); in ext4_mb_show_pa()
4527 mb_debug(sb, "PA:%u:%d:%d\n", i, start, in ext4_mb_show_pa()
4528 pa->pa_len); in ext4_mb_show_pa()
4695 struct ext4_prealloc_space *pa, *tmp; in ext4_mb_discard_lg_preallocations() local
4702 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], in ext4_mb_discard_lg_preallocations()
4705 spin_lock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4706 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_lg_preallocations()
4708 * This is the pa that we just used in ext4_mb_discard_lg_preallocations()
4712 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4715 if (pa->pa_deleted) { in ext4_mb_discard_lg_preallocations()
4716 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4720 BUG_ON(pa->pa_type != MB_GROUP_PA); in ext4_mb_discard_lg_preallocations()
4723 ext4_mb_mark_pa_deleted(sb, pa); in ext4_mb_discard_lg_preallocations()
4724 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
4726 list_del_rcu(&pa->pa_inode_list); in ext4_mb_discard_lg_preallocations()
4727 list_add(&pa->u.pa_tmp_list, &discard_list); in ext4_mb_discard_lg_preallocations()
4742 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { in ext4_mb_discard_lg_preallocations()
4745 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_mb_discard_lg_preallocations()
4754 list_del(&pa->pa_group_list); in ext4_mb_discard_lg_preallocations()
4755 ext4_mb_release_group_pa(&e4b, pa); in ext4_mb_discard_lg_preallocations()
4759 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_lg_preallocations()
4760 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_lg_preallocations()
4778 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim() local
4780 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
4794 if (!added && pa->pa_free < tmp_pa->pa_free) { in ext4_mb_add_n_trim()
4796 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
4808 list_add_tail_rcu(&pa->pa_inode_list, in ext4_mb_add_n_trim()
4822 * if per-inode prealloc list is too long, trim some PA
4846 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context() local
4847 if (pa) { in ext4_mb_release_context()
4848 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_release_context()
4850 spin_lock(&pa->pa_lock); in ext4_mb_release_context()
4851 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
4852 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
4853 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
4854 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
4855 spin_unlock(&pa->pa_lock); in ext4_mb_release_context()
4858 * We want to add the pa to the right bucket. in ext4_mb_release_context()
4863 if (likely(pa->pa_free)) { in ext4_mb_release_context()
4864 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
4865 list_del_rcu(&pa->pa_inode_list); in ext4_mb_release_context()
4866 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
4871 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_release_context()
4874 * to trim the least recently used PA. in ext4_mb_release_context()
4876 spin_lock(pa->pa_obj_lock); in ext4_mb_release_context()
4877 list_move(&pa->pa_inode_list, &ei->i_prealloc_list); in ext4_mb_release_context()
4878 spin_unlock(pa->pa_obj_lock); in ext4_mb_release_context()
4881 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
5040 * pa allocated above is added to grp->bb_prealloc_list only in ext4_mb_new_blocks()
5044 * So we have to free this pa here itself. in ext4_mb_new_blocks()
5069 * If block allocation fails then the pa allocated above in ext4_mb_new_blocks()