diff options
author | Qu Wenruo <wqu@suse.com> | 2017-12-12 15:34:25 +0800 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2018-03-31 01:41:13 +0200 |
commit | dba213242fbcfc5495004ab76ca27c35ce1bf304 (patch) | |
tree | 44eef478bbedfc70a530166189d22ddf88ef35f5 /fs/btrfs | |
parent | f59c0347d4be22dad5812b5b14bf94ac0efd371a (diff) | |
download | linux-stable-dba213242fbcfc5495004ab76ca27c35ce1bf304.tar.gz linux-stable-dba213242fbcfc5495004ab76ca27c35ce1bf304.tar.bz2 linux-stable-dba213242fbcfc5495004ab76ca27c35ce1bf304.zip |
btrfs: qgroup: Make qgroup_reserve and its callers to use separate reservation type
Since most callers of qgroup_reserve() are already defined by type,
converting qgroup_reserve() is quite an easy work.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/qgroup.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 252af87340cc..58a8b6930960 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2391,17 +2391,18 @@ out: static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) { if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && - qg->reserved + (s64)qg->rfer + num_bytes > qg->max_rfer) + qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) return false; if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && - qg->reserved + (s64)qg->excl + num_bytes > qg->max_excl) + qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) return false; return true; } -static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce) +static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, + enum btrfs_qgroup_rsv_type type) { struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; @@ -2453,7 +2454,7 @@ retry: * Commit the tree and retry, since we may have * deletions which would free up space. */ - if (!retried && qg->reserved > 0) { + if (!retried && qgroup_rsv_total(qg) > 0) { struct btrfs_trans_handle *trans; spin_unlock(&fs_info->qgroup_lock); @@ -2493,7 +2494,7 @@ retry: qg = unode_aux_to_qgroup(unode); trace_qgroup_update_reserve(fs_info, qg, num_bytes); - qg->reserved += num_bytes; + qgroup_rsv_add(qg, num_bytes, type); } out: @@ -2540,10 +2541,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, qg = unode_aux_to_qgroup(unode); trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes); - if (qg->reserved < num_bytes) - report_reserved_underflow(fs_info, qg, num_bytes); - else - qg->reserved -= num_bytes; + qgroup_rsv_release(qg, num_bytes, type); list_for_each_entry(glist, &qg->groups, next_group) { ret = ulist_add(fs_info->qgroup_ulist, @@ -2931,7 +2929,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, to_reserve, QGROUP_RESERVE); if (ret < 0) goto cleanup; - ret = qgroup_reserve(root, to_reserve, true); + ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); if (ret < 0) goto cleanup; @@ -3084,7 +3082,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); trace_qgroup_meta_reserve(root, (s64)num_bytes); - ret = qgroup_reserve(root, num_bytes, enforce); + ret = qgroup_reserve(root, num_bytes, enforce, BTRFS_QGROUP_RSV_META); if (ret < 0) return ret; atomic64_add(num_bytes, &root->qgroup_meta_rsv); |