Lines Matching refs:tmp_pa

4249 	struct ext4_prealloc_space *tmp_pa;
4257 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4259 tmp_pa_start = tmp_pa->pa_lstart;
4260 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4262 spin_lock(&tmp_pa->pa_lock);
4263 if (tmp_pa->pa_deleted == 0)
4265 spin_unlock(&tmp_pa->pa_lock);
4286 struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
4305 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4307 tmp_pa_start = tmp_pa->pa_lstart;
4308 tmp_pa_end = pa_logical_end(sbi, tmp_pa);
4311 spin_lock(&tmp_pa->pa_lock);
4312 if (tmp_pa->pa_deleted == 0)
4315 spin_unlock(&tmp_pa->pa_lock);
4322 if (tmp_pa) {
4323 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) {
4326 left_pa = tmp_pa;
4336 right_pa = tmp_pa;
4355 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4357 left_pa = tmp_pa;
4358 spin_lock(&tmp_pa->pa_lock);
4359 if (tmp_pa->pa_deleted == 0) {
4360 spin_unlock(&tmp_pa->pa_lock);
4363 spin_unlock(&tmp_pa->pa_lock);
4375 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4377 right_pa = tmp_pa;
4378 spin_lock(&tmp_pa->pa_lock);
4379 if (tmp_pa->pa_deleted == 0) {
4380 spin_unlock(&tmp_pa->pa_lock);
4383 spin_unlock(&tmp_pa->pa_lock);
4791 struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL;
4816 * (tmp_pa->pa_lstart never changes so we can skip locking for it).
4820 tmp_pa->pa_lstart, iter)) {
4821 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4827 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4830 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4832 tmp = rb_prev(&tmp_pa->pa_node.inode_node);
4835 tmp_pa = rb_entry(tmp, struct ext4_prealloc_space,
4847 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4852 * valid tmp_pa which is guaranteed to be non deleted.
4854 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
4862 tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
4864 spin_lock(&tmp_pa->pa_lock);
4865 if (tmp_pa->pa_deleted == 0) {
4875 spin_unlock(&tmp_pa->pa_lock);
4879 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical));
4880 BUG_ON(tmp_pa->pa_deleted == 1);
4887 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) {
4888 spin_unlock(&tmp_pa->pa_lock);
4894 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) >
4900 spin_unlock(&tmp_pa->pa_lock);
4904 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) {
4905 atomic_inc(&tmp_pa->pa_count);
4906 ext4_mb_use_inode_pa(ac, tmp_pa);
4907 spin_unlock(&tmp_pa->pa_lock);
4938 WARN_ON_ONCE(tmp_pa->pa_free == 0);
4940 spin_unlock(&tmp_pa->pa_lock);
4964 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i],
4966 spin_lock(&tmp_pa->pa_lock);
4967 if (tmp_pa->pa_deleted == 0 &&
4968 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) {
4971 tmp_pa, cpa);
4973 spin_unlock(&tmp_pa->pa_lock);
5932 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5940 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5943 spin_lock(&tmp_pa->pa_lock);
5944 if (tmp_pa->pa_deleted) {
5945 spin_unlock(&tmp_pa->pa_lock);
5948 if (!added && pa->pa_free < tmp_pa->pa_free) {
5951 &tmp_pa->pa_node.lg_list);
5958 spin_unlock(&tmp_pa->pa_lock);