diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 13:35:29 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 13:35:29 -0800 |
commit | 5cea7647e64657138138a3794ae172ee0fc175da (patch) | |
tree | 38adc54cba508db574e190e9d9aa601c36a8fd7c /fs/btrfs/sysfs.c | |
parent | 808eb24e0e0939b487bf90e3888a9636f1c83acb (diff) | |
parent | d28e649a5c58b779b303c252c66ee84a0f2c3b32 (diff) | |
download | linux-5cea7647e64657138138a3794ae172ee0fc175da.tar.gz linux-5cea7647e64657138138a3794ae172ee0fc175da.tar.bz2 linux-5cea7647e64657138138a3794ae172ee0fc175da.zip |
Merge branch 'for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs updates from David Sterba:
"There are some new user features and the usual load of invisible
enhancements or cleanups.
New features:
- extend mount options to specify zlib compression level, -o
compress=zlib:9
- v2 of ioctl "extent to inode mapping", addressing a usecase where
we want to retrieve more but inaccurate results and do the
postprocessing in userspace, aiding defragmentation or
deduplication tools
- populate compression heuristics logic, do data sampling and try to
guess compressibility by: looking for repeated patterns, counting
unique byte values and distribution, calculating Shannon entropy;
this will need more benchmarking and possibly fine tuning, but the
base should be good enough
- enable indexing for btrfs as lower filesystem in overlayfs
- speedup page cache readahead during send on large files
Internal enhancements:
- more sanity checks of b-tree items when reading them from disk
- more EINVAL/EUCLEAN fixups, missing BLK_STS_* conversion, other
errno or error handling fixes
- remove some homegrown IO-related logic, that's been obsoleted by
core block layer changes (batching, plug/unplug, own counters)
- add ref-verify, optional debugging feature to verify extent
reference accounting
- simplify code handling outstanding extents, make it more clear
where and how the accounting is done
- make delalloc reservations per-inode, simplify the code and make
the logic more straightforward
- extensive cleanup of delayed refs code
Notable fixes:
- fix send ioctl on 32bit with 64bit kernel"
* 'for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (102 commits)
btrfs: Fix bug for misused dev_t when lookup in dev state hash table.
Btrfs: heuristic: add Shannon entropy calculation
Btrfs: heuristic: add byte core set calculation
Btrfs: heuristic: add byte set calculation
Btrfs: heuristic: add detection of repeated data patterns
Btrfs: heuristic: implement sampling logic
Btrfs: heuristic: add bucket and sample counters and other defines
Btrfs: compression: separate heuristic/compression workspaces
btrfs: move btrfs_truncate_block out of trans handle
btrfs: don't call btrfs_start_delalloc_roots in flushoncommit
btrfs: track refs in a rb_tree instead of a list
btrfs: add a comp_refs() helper
btrfs: switch args for comp_*_refs
btrfs: make the delalloc block rsv per inode
btrfs: add tracepoints for outstanding extents mods
Btrfs: rework outstanding_extents
btrfs: increase output size for LOGICAL_INO_V2 ioctl
btrfs: add a flags argument to LOGICAL_INO and call it LOGICAL_INO_V2
btrfs: add a flag to iterate_inodes_from_logical to find all extent refs for uncompressed extents
btrfs: send: remove unused code
...
Diffstat (limited to 'fs/btrfs/sysfs.c')
-rw-r--r-- | fs/btrfs/sysfs.c | 63 |
1 files changed, 32 insertions, 31 deletions
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 883881b16c86..a28bba801264 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -247,7 +247,7 @@ static ssize_t global_rsv_size_show(struct kobject *kobj, struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; return btrfs_show_u64(&block_rsv->size, &block_rsv->lock, buf); } -BTRFS_ATTR(global_rsv_size, global_rsv_size_show); +BTRFS_ATTR(allocation, global_rsv_size, global_rsv_size_show); static ssize_t global_rsv_reserved_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) @@ -256,15 +256,15 @@ static ssize_t global_rsv_reserved_show(struct kobject *kobj, struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; return btrfs_show_u64(&block_rsv->reserved, &block_rsv->lock, buf); } -BTRFS_ATTR(global_rsv_reserved, global_rsv_reserved_show); +BTRFS_ATTR(allocation, global_rsv_reserved, global_rsv_reserved_show); #define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj) #define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj) static ssize_t raid_bytes_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); -BTRFS_RAID_ATTR(total_bytes, raid_bytes_show); -BTRFS_RAID_ATTR(used_bytes, raid_bytes_show); +BTRFS_ATTR(raid, total_bytes, raid_bytes_show); +BTRFS_ATTR(raid, used_bytes, raid_bytes_show); static ssize_t raid_bytes_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -277,7 +277,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj, down_read(&sinfo->groups_sem); list_for_each_entry(block_group, &sinfo->block_groups[index], list) { - if (&attr->attr == BTRFS_RAID_ATTR_PTR(total_bytes)) + if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes)) val += block_group->key.offset; else val += btrfs_block_group_used(&block_group->item); @@ -287,8 +287,8 @@ static ssize_t raid_bytes_show(struct kobject *kobj, } static struct attribute *raid_attributes[] = { - BTRFS_RAID_ATTR_PTR(total_bytes), - BTRFS_RAID_ATTR_PTR(used_bytes), + BTRFS_ATTR_PTR(raid, total_bytes), + BTRFS_ATTR_PTR(raid, used_bytes), NULL }; @@ -311,7 +311,7 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \ struct btrfs_space_info *sinfo = to_space_info(kobj); \ return btrfs_show_u64(&sinfo->field, &sinfo->lock, buf); \ } \ -BTRFS_ATTR(field, btrfs_space_info_show_##field) +BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field) static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj, struct kobj_attribute *a, @@ -331,19 +331,20 @@ SPACE_INFO_ATTR(bytes_may_use); SPACE_INFO_ATTR(bytes_readonly); SPACE_INFO_ATTR(disk_used); SPACE_INFO_ATTR(disk_total); -BTRFS_ATTR(total_bytes_pinned, btrfs_space_info_show_total_bytes_pinned); +BTRFS_ATTR(space_info, total_bytes_pinned, + btrfs_space_info_show_total_bytes_pinned); static struct attribute *space_info_attrs[] = { - BTRFS_ATTR_PTR(flags), - BTRFS_ATTR_PTR(total_bytes), - BTRFS_ATTR_PTR(bytes_used), - BTRFS_ATTR_PTR(bytes_pinned), - BTRFS_ATTR_PTR(bytes_reserved), - BTRFS_ATTR_PTR(bytes_may_use), - BTRFS_ATTR_PTR(bytes_readonly), - BTRFS_ATTR_PTR(disk_used), - BTRFS_ATTR_PTR(disk_total), - BTRFS_ATTR_PTR(total_bytes_pinned), + BTRFS_ATTR_PTR(space_info, flags), + BTRFS_ATTR_PTR(space_info, total_bytes), + BTRFS_ATTR_PTR(space_info, bytes_used), + BTRFS_ATTR_PTR(space_info, bytes_pinned), + BTRFS_ATTR_PTR(space_info, bytes_reserved), + BTRFS_ATTR_PTR(space_info, bytes_may_use), + BTRFS_ATTR_PTR(space_info, bytes_readonly), + BTRFS_ATTR_PTR(space_info, disk_used), + BTRFS_ATTR_PTR(space_info, disk_total), + BTRFS_ATTR_PTR(space_info, total_bytes_pinned), NULL, }; @@ -361,8 +362,8 @@ struct kobj_type space_info_ktype = { }; static const struct attribute *allocation_attrs[] = { - BTRFS_ATTR_PTR(global_rsv_reserved), - BTRFS_ATTR_PTR(global_rsv_size), + BTRFS_ATTR_PTR(allocation, global_rsv_reserved), + BTRFS_ATTR_PTR(allocation, global_rsv_size), NULL, }; @@ -415,7 +416,7 @@ static ssize_t btrfs_label_store(struct kobject *kobj, return len; } -BTRFS_ATTR_RW(label, btrfs_label_show, btrfs_label_store); +BTRFS_ATTR_RW(, label, btrfs_label_show, btrfs_label_store); static ssize_t btrfs_nodesize_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) @@ -425,7 +426,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj, return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize); } -BTRFS_ATTR(nodesize, btrfs_nodesize_show); +BTRFS_ATTR(, nodesize, btrfs_nodesize_show); static ssize_t btrfs_sectorsize_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) @@ -436,7 +437,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj, fs_info->super_copy->sectorsize); } -BTRFS_ATTR(sectorsize, btrfs_sectorsize_show); +BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show); static ssize_t btrfs_clone_alignment_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) @@ -447,7 +448,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj, fs_info->super_copy->sectorsize); } -BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show); +BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show); static ssize_t quota_override_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) @@ -487,14 +488,14 @@ static ssize_t quota_override_store(struct kobject *kobj, return len; } -BTRFS_ATTR_RW(quota_override, quota_override_show, quota_override_store); +BTRFS_ATTR_RW(, quota_override, quota_override_show, quota_override_store); static const struct attribute *btrfs_attrs[] = { - BTRFS_ATTR_PTR(label), - BTRFS_ATTR_PTR(nodesize), - BTRFS_ATTR_PTR(sectorsize), - BTRFS_ATTR_PTR(clone_alignment), - BTRFS_ATTR_PTR(quota_override), + BTRFS_ATTR_PTR(, label), + BTRFS_ATTR_PTR(, nodesize), + BTRFS_ATTR_PTR(, sectorsize), + BTRFS_ATTR_PTR(, clone_alignment), + BTRFS_ATTR_PTR(, quota_override), NULL, }; |