Lines Matching refs:delayed_refs
349 static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs, in btrfs_delayed_ref_lock() argument
352 lockdep_assert_held(&delayed_refs->lock); in btrfs_delayed_ref_lock()
357 spin_unlock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
360 spin_lock(&delayed_refs->lock); in btrfs_delayed_ref_lock()
371 struct btrfs_delayed_ref_root *delayed_refs, in drop_delayed_ref() argument
385 struct btrfs_delayed_ref_root *delayed_refs, in merge_ref() argument
414 drop_delayed_ref(fs_info, delayed_refs, head, next); in merge_ref()
417 drop_delayed_ref(fs_info, delayed_refs, head, ref); in merge_ref()
432 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_merge_delayed_refs() argument
455 if (merge_ref(fs_info, delayed_refs, head, ref, seq)) in btrfs_merge_delayed_refs()
477 struct btrfs_delayed_ref_root *delayed_refs) in btrfs_select_ref_head() argument
485 spin_lock(&delayed_refs->lock); in btrfs_select_ref_head()
487 start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits); in btrfs_select_ref_head()
488 xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) { in btrfs_select_ref_head()
495 if (delayed_refs->run_delayed_start == 0) { in btrfs_select_ref_head()
496 spin_unlock(&delayed_refs->lock); in btrfs_select_ref_head()
499 delayed_refs->run_delayed_start = 0; in btrfs_select_ref_head()
504 WARN_ON(delayed_refs->num_heads_ready == 0); in btrfs_select_ref_head()
505 delayed_refs->num_heads_ready--; in btrfs_select_ref_head()
506 delayed_refs->run_delayed_start = head->bytenr + in btrfs_select_ref_head()
509 locked = btrfs_delayed_ref_lock(delayed_refs, head); in btrfs_select_ref_head()
510 spin_unlock(&delayed_refs->lock); in btrfs_select_ref_head()
523 void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs, in btrfs_unselect_ref_head() argument
526 spin_lock(&delayed_refs->lock); in btrfs_unselect_ref_head()
528 delayed_refs->num_heads_ready++; in btrfs_unselect_ref_head()
529 spin_unlock(&delayed_refs->lock); in btrfs_unselect_ref_head()
534 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_delete_ref_head() argument
539 lockdep_assert_held(&delayed_refs->lock); in btrfs_delete_ref_head()
542 xa_erase(&delayed_refs->head_refs, index); in btrfs_delete_ref_head()
544 delayed_refs->num_heads--; in btrfs_delete_ref_head()
546 delayed_refs->num_heads_ready--; in btrfs_delete_ref_head()
586 struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs; in insert_delayed_ref()
638 struct btrfs_delayed_ref_root *delayed_refs = in update_existing_head_ref() local
639 &trans->transaction->delayed_refs; in update_existing_head_ref()
713 delayed_refs->pending_csums -= existing->num_bytes; in update_existing_head_ref()
717 delayed_refs->pending_csums += existing->num_bytes; in update_existing_head_ref()
815 struct btrfs_delayed_ref_root *delayed_refs; in add_delayed_ref_head() local
819 delayed_refs = &trans->transaction->delayed_refs; in add_delayed_ref_head()
820 lockdep_assert_held(&delayed_refs->lock); in add_delayed_ref_head()
825 xa_release(&delayed_refs->dirty_extents, index); in add_delayed_ref_head()
838 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord, in add_delayed_ref_head()
842 xa_release(&delayed_refs->dirty_extents, index); in add_delayed_ref_head()
854 existing = xa_load(&delayed_refs->head_refs, index); in add_delayed_ref_head()
864 existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC); in add_delayed_ref_head()
884 delayed_refs->pending_csums += head_ref->num_bytes; in add_delayed_ref_head()
888 delayed_refs->num_heads++; in add_delayed_ref_head()
889 delayed_refs->num_heads_ready++; in add_delayed_ref_head()
995 struct btrfs_delayed_ref_root *delayed_refs; in add_delayed_ref() local
1014 delayed_refs = &trans->transaction->delayed_refs; in add_delayed_ref()
1022 if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) { in add_delayed_ref()
1029 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS); in add_delayed_ref()
1032 xa_release(&delayed_refs->dirty_extents, index); in add_delayed_ref()
1040 spin_lock(&delayed_refs->lock); in add_delayed_ref()
1049 xa_release(&delayed_refs->head_refs, index); in add_delayed_ref()
1050 spin_unlock(&delayed_refs->lock); in add_delayed_ref()
1057 spin_unlock(&delayed_refs->lock); in add_delayed_ref()
1115 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_add_delayed_extent_op() local
1132 delayed_refs = &trans->transaction->delayed_refs; in btrfs_add_delayed_extent_op()
1134 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS); in btrfs_add_delayed_extent_op()
1140 spin_lock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1144 xa_release(&delayed_refs->head_refs, index); in btrfs_add_delayed_extent_op()
1145 spin_unlock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1149 spin_unlock(&delayed_refs->lock); in btrfs_add_delayed_extent_op()
1173 struct btrfs_delayed_ref_root *delayed_refs, in btrfs_find_delayed_ref_head() argument
1178 lockdep_assert_held(&delayed_refs->lock); in btrfs_find_delayed_ref_head()
1180 return xa_load(&delayed_refs->head_refs, index); in btrfs_find_delayed_ref_head()
1252 struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs; in btrfs_destroy_delayed_refs() local
1256 spin_lock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
1262 head = find_first_ref_head(delayed_refs); in btrfs_destroy_delayed_refs()
1266 if (!btrfs_delayed_ref_lock(delayed_refs, head)) in btrfs_destroy_delayed_refs()
1274 drop_delayed_ref(fs_info, delayed_refs, head, ref); in btrfs_destroy_delayed_refs()
1279 btrfs_delete_ref_head(fs_info, delayed_refs, head); in btrfs_destroy_delayed_refs()
1281 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
1316 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); in btrfs_destroy_delayed_refs()
1319 spin_lock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()
1325 spin_unlock(&delayed_refs->lock); in btrfs_destroy_delayed_refs()