From 92c423118105e1c8c1587367a26eeb3277bda89a Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 2 Mar 2011 16:50:21 +0800 Subject: Btrfs: Remove unused btrfs_block_group_free_space() We've already recorded the value in block_group->frees_space. Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 15 --------------- fs/btrfs/free-space-cache.h | 1 - 2 files changed, 16 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 11d2e9cea09e..3af64c6ea9df 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1685,21 +1685,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_free_space *info; - struct rb_node *n; - u64 ret = 0; - - for (n = rb_first(&block_group->free_space_offset); n; - n = rb_next(n)) { - info = rb_entry(n, struct btrfs_free_space, offset_index); - ret += info->bytes; - } - - return ret; -} - /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 65c3b935289f..12b2b5165f8a 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -55,7 +55,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_group_cache *block_group, -- cgit v1.2.3 From f38b6e754d8cc4605ac21d9c1094d569d88b163b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 14 Mar 2011 13:40:51 +0800 Subject: Btrfs: Use bitmap_set/clear() No functional change. Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 3af64c6ea9df..0e23bbabbba2 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1134,15 +1134,13 @@ static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + count = bytes_to_bits(bytes, block_group->sectorsize); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - clear_bit(i, info->bitmap); + bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; block_group->free_space -= bytes; @@ -1152,15 +1150,13 @@ static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *info, u64 offset, u64 bytes) { - unsigned long start, end; - unsigned long i; + unsigned long start, count; start = offset_to_bit(info->offset, block_group->sectorsize, offset); - end = start + bytes_to_bits(bytes, block_group->sectorsize); - BUG_ON(end > BITS_PER_BITMAP); + count = bytes_to_bits(bytes, block_group->sectorsize); + BUG_ON(start + count > BITS_PER_BITMAP); - for (i = start; i < end; i++) - set_bit(i, info->bitmap); + bitmap_set(info->bitmap, start, count); info->bytes += bytes; block_group->free_space += bytes; -- cgit v1.2.3 From 34d52cb6c50b5a43901709998f59fb1c5a43dc4a Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 29 Mar 2011 13:46:06 +0800 Subject: Btrfs: Make free space cache code generic So we can re-use the code to cache free inode numbers. The change is quite straightforward. Two new structures are introduced. - struct btrfs_free_space_ctl We move those variables that are used for caching free space from struct btrfs_block_group_cache to this new struct. - struct btrfs_free_space_op We do block group specific work (e.g. calculation of extents threshold) through functions registered in this struct. And then we can remove references to struct btrfs_block_group_cache. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 7 +- fs/btrfs/extent-tree.c | 37 ++-- fs/btrfs/free-space-cache.c | 430 ++++++++++++++++++++++++-------------------- fs/btrfs/free-space-cache.h | 20 +++ 4 files changed, 271 insertions(+), 223 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e61fe1b6b8c..d17e4a3b8bf7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -830,9 +830,6 @@ struct btrfs_block_group_cache { u64 bytes_super; u64 flags; u64 sectorsize; - int extents_thresh; - int free_extents; - int total_bitmaps; unsigned int ro:1; unsigned int dirty:1; unsigned int iref:1; @@ -847,9 +844,7 @@ struct btrfs_block_group_cache { struct btrfs_space_info *space_info; /* free space cache stuff */ - spinlock_t tree_lock; - struct rb_root free_space_offset; - u64 free_space; + struct btrfs_free_space_ctl *free_space_ctl; /* block group cache stuff */ struct rb_node cache_node; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 31f33ba56fe8..904eae10ec65 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) WARN_ON(cache->pinned > 0); WARN_ON(cache->reserved > 0); WARN_ON(cache->reserved_pinned > 0); + kfree(cache->free_space_ctl); kfree(cache); } } @@ -4893,7 +4894,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, return 0; wait_event(caching_ctl->wait, block_group_cache_done(cache) || - (cache->free_space >= num_bytes)); + (cache->free_space_ctl->free_space >= num_bytes)); put_caching_control(caching_ctl); return 0; @@ -8551,10 +8552,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) ret = -ENOMEM; goto error; } + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + ret = -ENOMEM; + goto error; + } atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); cache->fs_info = info; INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); @@ -8562,14 +8569,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (need_clear) cache->disk_cache_state = BTRFS_DC_CLEAR; - /* - * we only want to have 32k of ram per block group for keeping - * track of free space, and if we pass 1/2 of that we want to - * start converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); - read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); @@ -8580,6 +8579,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; + btrfs_init_free_space_ctl(cache); + /* * We need to exclude the super stripes now so that the space * info has super bytes accounted for, otherwise we'll think @@ -8666,6 +8667,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) return -ENOMEM; + cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), + GFP_NOFS); + if (!cache->free_space_ctl) { + kfree(cache); + return -ENOMEM; + } cache->key.objectid = chunk_offset; cache->key.offset = size; @@ -8673,19 +8680,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->sectorsize = root->sectorsize; cache->fs_info = root->fs_info; - /* - * we only want to have 32k of ram per block group for keeping track - * of free space, and if we pass 1/2 of that we want to start - * converting things over to using bitmaps - */ - cache->extents_thresh = ((1024 * 32) / 2) / - sizeof(struct btrfs_free_space); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); - spin_lock_init(&cache->tree_lock); INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->cluster_list); + btrfs_init_free_space_ctl(cache); + btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); cache->flags = type; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 0e23bbabbba2..d4fb4f077a79 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -29,9 +29,7 @@ #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) -static void recalculate_thresholds(struct btrfs_block_group_cache - *block_group); -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); struct inode *lookup_free_space_inode(struct btrfs_root *root, @@ -212,6 +210,7 @@ static int readahead_cache(struct inode *inode) int load_free_space_cache(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_root *root = fs_info->tree_root; struct inode *inode; struct btrfs_free_space_header *header; @@ -417,9 +416,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } if (entry->type == BTRFS_FREE_SPACE_EXTENT) { - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + spin_unlock(&ctl->tree_lock); BUG_ON(ret); } else { e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); @@ -431,11 +430,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, page_cache_release(page); goto free_cache; } - spin_lock(&block_group->tree_lock); - ret = link_free_space(block_group, e); - block_group->total_bitmaps++; - recalculate_thresholds(block_group); - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + ret = link_free_space(ctl, e); + ctl->total_bitmaps++; + ctl->op->recalc_thresholds(ctl); + spin_unlock(&ctl->tree_lock); list_add_tail(&e->list, &bitmaps); } @@ -471,16 +470,16 @@ next: index++; } - spin_lock(&block_group->tree_lock); - if (block_group->free_space != (block_group->key.offset - used - - block_group->bytes_super)) { - spin_unlock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); + if (ctl->free_space != (block_group->key.offset - used - + block_group->bytes_super)) { + spin_unlock(&ctl->tree_lock); printk(KERN_ERR "block group %llu has an wrong amount of free " "space\n", block_group->key.objectid); ret = 0; goto free_cache; } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); ret = 1; out: @@ -503,6 +502,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space_header *header; struct extent_buffer *leaf; struct inode *inode; @@ -546,7 +546,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, return 0; } - node = rb_first(&block_group->free_space_offset); + node = rb_first(&ctl->free_space_offset); if (!node) { iput(inode); return 0; @@ -851,30 +851,30 @@ out_free: return ret; } -static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, +static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, u64 offset) { BUG_ON(offset < bitmap_start); offset -= bitmap_start; - return (unsigned long)(div64_u64(offset, sectorsize)); + return (unsigned long)(div_u64(offset, unit)); } -static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) +static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) { - return (unsigned long)(div64_u64(bytes, sectorsize)); + return (unsigned long)(div_u64(bytes, unit)); } -static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, +static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset) { u64 bitmap_start; u64 bytes_per_bitmap; - bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; - bitmap_start = offset - block_group->key.objectid; + bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; + bitmap_start = offset - ctl->start; bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); bitmap_start *= bytes_per_bitmap; - bitmap_start += block_group->key.objectid; + bitmap_start += ctl->start; return bitmap_start; } @@ -932,10 +932,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, * offset. */ static struct btrfs_free_space * -tree_search_offset(struct btrfs_block_group_cache *block_group, +tree_search_offset(struct btrfs_free_space_ctl *ctl, u64 offset, int bitmap_only, int fuzzy) { - struct rb_node *n = block_group->free_space_offset.rb_node; + struct rb_node *n = ctl->free_space_offset.rb_node; struct btrfs_free_space *entry, *prev = NULL; /* find entry that is closest to the 'offset' */ @@ -1031,8 +1031,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, break; } } - if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) return entry; } else if (entry->offset + entry->bytes > offset) return entry; @@ -1043,7 +1042,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, while (1) { if (entry->bitmap) { if (entry->offset + BITS_PER_BITMAP * - block_group->sectorsize > offset) + ctl->unit > offset) break; } else { if (entry->offset + entry->bytes > offset) @@ -1059,42 +1058,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, } static inline void -__unlink_free_space(struct btrfs_block_group_cache *block_group, +__unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - rb_erase(&info->offset_index, &block_group->free_space_offset); - block_group->free_extents--; + rb_erase(&info->offset_index, &ctl->free_space_offset); + ctl->free_extents--; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, +static void unlink_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { - __unlink_free_space(block_group, info); - block_group->free_space -= info->bytes; + __unlink_free_space(ctl, info); + ctl->free_space -= info->bytes; } -static int link_free_space(struct btrfs_block_group_cache *block_group, +static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { int ret = 0; BUG_ON(!info->bitmap && !info->bytes); - ret = tree_insert_offset(&block_group->free_space_offset, info->offset, + ret = tree_insert_offset(&ctl->free_space_offset, info->offset, &info->offset_index, (info->bitmap != NULL)); if (ret) return ret; - block_group->free_space += info->bytes; - block_group->free_extents++; + ctl->free_space += info->bytes; + ctl->free_extents++; return ret; } -static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) { + struct btrfs_block_group_cache *block_group = ctl->private; u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; u64 size = block_group->key.offset; + u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; + int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); + + BUG_ON(ctl->total_bitmaps > max_bitmaps); /* * The goal is to keep the total amount of memory used per 1gb of space @@ -1112,10 +1116,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as * we add more bitmaps. */ - bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; + bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; if (bitmap_bytes >= max_bytes) { - block_group->extents_thresh = 0; + ctl->extents_thresh = 0; return; } @@ -1126,43 +1130,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) extent_bytes = max_bytes - bitmap_bytes; extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); - block_group->extents_thresh = + ctl->extents_thresh = div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); } -static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - count = bytes_to_bits(bytes, block_group->sectorsize); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); BUG_ON(start + count > BITS_PER_BITMAP); bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; - block_group->free_space -= bytes; + ctl->free_space -= bytes; } -static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, +static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset, u64 bytes) { unsigned long start, count; - start = offset_to_bit(info->offset, block_group->sectorsize, offset); - count = bytes_to_bits(bytes, block_group->sectorsize); + start = offset_to_bit(info->offset, ctl->unit, offset); + count = bytes_to_bits(bytes, ctl->unit); BUG_ON(start + count > BITS_PER_BITMAP); bitmap_set(info->bitmap, start, count); info->bytes += bytes; - block_group->free_space += bytes; + ctl->free_space += bytes; } -static int search_bitmap(struct btrfs_block_group_cache *block_group, +static int search_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1170,9 +1174,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, unsigned long bits, i; unsigned long next_zero; - i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, + i = offset_to_bit(bitmap_info->offset, ctl->unit, max_t(u64, *offset, bitmap_info->offset)); - bits = bytes_to_bits(*bytes, block_group->sectorsize); + bits = bytes_to_bits(*bytes, ctl->unit); for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); i < BITS_PER_BITMAP; @@ -1187,29 +1191,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, } if (found_bits) { - *offset = (u64)(i * block_group->sectorsize) + - bitmap_info->offset; - *bytes = (u64)(found_bits) * block_group->sectorsize; + *offset = (u64)(i * ctl->unit) + bitmap_info->offset; + *bytes = (u64)(found_bits) * ctl->unit; return 0; } return -1; } -static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache - *block_group, u64 *offset, - u64 *bytes, int debug) +static struct btrfs_free_space * +find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) { struct btrfs_free_space *entry; struct rb_node *node; int ret; - if (!block_group->free_space_offset.rb_node) + if (!ctl->free_space_offset.rb_node) return NULL; - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, *offset), - 0, 1); + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); if (!entry) return NULL; @@ -1219,7 +1219,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache continue; if (entry->bitmap) { - ret = search_bitmap(block_group, entry, offset, bytes); + ret = search_bitmap(ctl, entry, offset, bytes); if (!ret) return entry; continue; @@ -1233,33 +1233,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache return NULL; } -static void add_new_bitmap(struct btrfs_block_group_cache *block_group, +static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, u64 offset) { - u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; - int max_bitmaps = (int)div64_u64(block_group->key.offset + - bytes_per_bg - 1, bytes_per_bg); - BUG_ON(block_group->total_bitmaps >= max_bitmaps); - - info->offset = offset_to_bitmap(block_group, offset); + info->offset = offset_to_bitmap(ctl, offset); info->bytes = 0; - link_free_space(block_group, info); - block_group->total_bitmaps++; + link_free_space(ctl, info); + ctl->total_bitmaps++; - recalculate_thresholds(block_group); + ctl->op->recalc_thresholds(ctl); } -static void free_bitmap(struct btrfs_block_group_cache *block_group, +static void free_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info) { - unlink_free_space(block_group, bitmap_info); + unlink_free_space(ctl, bitmap_info); kfree(bitmap_info->bitmap); kmem_cache_free(btrfs_free_space_cachep, bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); + ctl->total_bitmaps--; + ctl->op->recalc_thresholds(ctl); } -static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, +static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) { @@ -1268,8 +1263,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro int ret; again: - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; /* * XXX - this can go away after a few releases. @@ -1284,24 +1278,22 @@ again: search_start = *offset; search_bytes = *bytes; search_bytes = min(search_bytes, end - search_start + 1); - ret = search_bitmap(block_group, bitmap_info, &search_start, - &search_bytes); + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); if (*offset > bitmap_info->offset && *offset + *bytes > end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, - end - *offset + 1); + bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); *bytes -= end - *offset + 1; *offset = end + 1; } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { - bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); + bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); *bytes = 0; } if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1328,31 +1320,28 @@ again: */ search_start = *offset; search_bytes = *bytes; - ret = search_bitmap(block_group, bitmap_info, &search_start, + ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); if (ret < 0 || search_start != *offset) return -EAGAIN; goto again; } else if (!bitmap_info->bytes) - free_bitmap(block_group, bitmap_info); + free_bitmap(ctl, bitmap_info); return 0; } -static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) { - struct btrfs_free_space *bitmap_info; - int added = 0; - u64 bytes, offset, end; - int ret; + struct btrfs_block_group_cache *block_group = ctl->private; /* * If we are below the extents threshold then we can add this as an * extent, and don't have to deal with the bitmap */ - if (block_group->free_extents < block_group->extents_thresh) { + if (ctl->free_extents < ctl->extents_thresh) { /* * If this block group has some small extents we don't want to * use up all of our free slots in the cache with them, we want @@ -1361,11 +1350,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, * the overhead of a bitmap if we don't have to. */ if (info->bytes <= block_group->sectorsize * 4) { - if (block_group->free_extents * 2 <= - block_group->extents_thresh) - return 0; + if (ctl->free_extents * 2 <= ctl->extents_thresh) + return false; } else { - return 0; + return false; } } @@ -1375,31 +1363,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, */ if (BITS_PER_BITMAP * block_group->sectorsize > block_group->key.offset) - return 0; + return false; + + return true; +} + +static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + struct btrfs_free_space *bitmap_info; + int added = 0; + u64 bytes, offset, end; + int ret; bytes = info->bytes; offset = info->offset; + if (!ctl->op->use_bitmap(ctl, info)) + return 0; + again: - bitmap_info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!bitmap_info) { BUG_ON(added); goto new_bitmap; } - end = bitmap_info->offset + - (u64)(BITS_PER_BITMAP * block_group->sectorsize); + end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(block_group, bitmap_info, offset, - end - offset); + bitmap_set_bits(ctl, bitmap_info, offset, end - offset); bytes -= end - offset; offset = end; added = 0; } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(block_group, bitmap_info, offset, bytes); + bitmap_set_bits(ctl, bitmap_info, offset, bytes); bytes = 0; } else { BUG(); @@ -1413,19 +1412,19 @@ again: new_bitmap: if (info && info->bitmap) { - add_new_bitmap(block_group, info, offset); + add_new_bitmap(ctl, info, offset); added = 1; info = NULL; goto again; } else { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* no pre-allocated info, allocate a new one */ if (!info) { info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); if (!info) { - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = -ENOMEM; goto out; } @@ -1433,7 +1432,7 @@ new_bitmap: /* allocate the bitmap */ info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; goto out; @@ -1451,7 +1450,7 @@ out: return ret; } -bool try_merge_free_space(struct btrfs_block_group_cache *block_group, +bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; @@ -1465,18 +1464,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, * are adding, if there is remove that struct and add a new one to * cover the entire range */ - right_info = tree_search_offset(block_group, offset + bytes, 0, 0); + right_info = tree_search_offset(ctl, offset + bytes, 0, 0); if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); else - left_info = tree_search_offset(block_group, offset - 1, 0, 0); + left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { if (update_stat) - unlink_free_space(block_group, right_info); + unlink_free_space(ctl, right_info); else - __unlink_free_space(block_group, right_info); + __unlink_free_space(ctl, right_info); info->bytes += right_info->bytes; kmem_cache_free(btrfs_free_space_cachep, right_info); merged = true; @@ -1485,9 +1484,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { if (update_stat) - unlink_free_space(block_group, left_info); + unlink_free_space(ctl, left_info); else - __unlink_free_space(block_group, left_info); + __unlink_free_space(ctl, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kmem_cache_free(btrfs_free_space_cachep, left_info); @@ -1500,6 +1499,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; int ret = 0; @@ -1510,9 +1510,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset; info->bytes = bytes; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - if (try_merge_free_space(block_group, info, true)) + if (try_merge_free_space(ctl, info, true)) goto link; /* @@ -1520,7 +1520,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, * extent then we know we're going to have to allocate a new extent, so * before we do that see if we need to drop this into a bitmap */ - ret = insert_into_bitmap(block_group, info); + ret = insert_into_bitmap(ctl, info); if (ret < 0) { goto out; } else if (ret) { @@ -1528,11 +1528,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, goto out; } link: - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); if (ret) kmem_cache_free(btrfs_free_space_cachep, info); out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); if (ret) { printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); @@ -1545,21 +1545,21 @@ out: int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct btrfs_free_space *next_info = NULL; int ret = 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); again: - info = tree_search_offset(block_group, offset, 0, 0); + info = tree_search_offset(ctl, offset, 0, 0); if (!info) { /* * oops didn't find an extent that matched the space we wanted * to remove, look for a bitmap instead */ - info = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), + info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!info) { WARN_ON(1); @@ -1574,8 +1574,8 @@ again: offset_index); if (next_info->bitmap) - end = next_info->offset + BITS_PER_BITMAP * - block_group->sectorsize - 1; + end = next_info->offset + + BITS_PER_BITMAP * ctl->unit - 1; else end = next_info->offset + next_info->bytes; @@ -1595,20 +1595,20 @@ again: } if (info->bytes == bytes) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (info->bitmap) { kfree(info->bitmap); - block_group->total_bitmaps--; + ctl->total_bitmaps--; } kmem_cache_free(btrfs_free_space_cachep, info); goto out_lock; } if (!info->bitmap && info->offset == offset) { - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); info->offset += bytes; info->bytes -= bytes; - link_free_space(block_group, info); + link_free_space(ctl, info); goto out_lock; } @@ -1622,13 +1622,13 @@ again: * first unlink the old info and then * insert it again after the hole we're creating */ - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (offset + bytes < info->offset + info->bytes) { u64 old_end = info->offset + info->bytes; info->offset = offset + bytes; info->bytes = old_end - info->offset; - ret = link_free_space(block_group, info); + ret = link_free_space(ctl, info); WARN_ON(ret); if (ret) goto out_lock; @@ -1638,7 +1638,7 @@ again: */ kmem_cache_free(btrfs_free_space_cachep, info); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* step two, insert a new info struct to cover * anything before the hole @@ -1649,12 +1649,12 @@ again: goto out; } - ret = remove_from_bitmap(block_group, info, &offset, &bytes); + ret = remove_from_bitmap(ctl, info, &offset, &bytes); if (ret == -EAGAIN) goto again; BUG_ON(ret); out_lock: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); out: return ret; } @@ -1662,11 +1662,12 @@ out: void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *n; int count = 0; - for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { + for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; @@ -1681,6 +1682,30 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } +static struct btrfs_free_space_op free_space_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = block_group->sectorsize; + ctl->start = block_group->key.objectid; + ctl->private = block_group; + ctl->op = &free_space_op; + + /* + * we only want to have 32k of ram per block group for keeping + * track of free space, and if we pass 1/2 of that we want to + * start converting things over to using bitmaps + */ + ctl->extents_thresh = ((1024 * 32) / 2) / + sizeof(struct btrfs_free_space); +} + /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group @@ -1692,6 +1717,7 @@ __btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; @@ -1713,8 +1739,8 @@ __btrfs_return_cluster_to_free_space( bitmap = (entry->bitmap != NULL); if (!bitmap) - try_merge_free_space(block_group, entry, false); - tree_insert_offset(&block_group->free_space_offset, + try_merge_free_space(ctl, entry, false); + tree_insert_offset(&ctl->free_space_offset, entry->offset, &entry->offset_index, bitmap); } cluster->root = RB_ROOT; @@ -1727,12 +1753,13 @@ out: void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *node; struct btrfs_free_cluster *cluster; struct list_head *head; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); while ((head = block_group->cluster_list.next) != &block_group->cluster_list) { cluster = list_entry(head, struct btrfs_free_cluster, @@ -1741,57 +1768,58 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) WARN_ON(cluster->block_group != block_group); __btrfs_return_cluster_to_free_space(block_group, cluster); if (need_resched()) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); cond_resched(); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); } } - while ((node = rb_last(&block_group->free_space_offset)) != NULL) { + while ((node = rb_last(&ctl->free_space_offset)) != NULL) { info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(block_group, info); + unlink_free_space(ctl, info); if (info->bitmap) kfree(info->bitmap); kmem_cache_free(btrfs_free_space_cachep, info); if (need_resched()) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); cond_resched(); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); } } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); } u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; u64 bytes_search = bytes + empty_size; u64 ret = 0; - spin_lock(&block_group->tree_lock); - entry = find_free_space(block_group, &offset, &bytes_search, 0); + spin_lock(&ctl->tree_lock); + entry = find_free_space(ctl, &offset, &bytes_search); if (!entry) goto out; ret = offset; if (entry->bitmap) { - bitmap_clear_bits(block_group, entry, offset, bytes); + bitmap_clear_bits(ctl, entry, offset, bytes); if (!entry->bytes) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); } else { - unlink_free_space(block_group, entry); + unlink_free_space(ctl, entry); entry->offset += bytes; entry->bytes -= bytes; if (!entry->bytes) kmem_cache_free(btrfs_free_space_cachep, entry); else - link_free_space(block_group, entry); + link_free_space(ctl, entry); } out: - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1808,6 +1836,7 @@ int btrfs_return_cluster_to_free_space( struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster) { + struct btrfs_free_space_ctl *ctl; int ret; /* first, get a safe pointer to the block group */ @@ -1826,10 +1855,12 @@ int btrfs_return_cluster_to_free_space( atomic_inc(&block_group->count); spin_unlock(&cluster->lock); + ctl = block_group->free_space_ctl; + /* now return any extents the cluster had on it */ - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); ret = __btrfs_return_cluster_to_free_space(block_group, cluster); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); /* finally drop our ref */ btrfs_put_block_group(block_group); @@ -1841,6 +1872,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *entry, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; int err; u64 search_start = cluster->window_start; u64 search_bytes = bytes; @@ -1849,13 +1881,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, search_start = min_start; search_bytes = bytes; - err = search_bitmap(block_group, entry, &search_start, - &search_bytes); + err = search_bitmap(ctl, entry, &search_start, &search_bytes); if (err) return 0; ret = search_start; - bitmap_clear_bits(block_group, entry, ret, bytes); + bitmap_clear_bits(ctl, entry, ret, bytes); return ret; } @@ -1869,6 +1900,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 bytes, u64 min_start) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct rb_node *node; u64 ret = 0; @@ -1929,20 +1961,20 @@ out: if (!ret) return 0; - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - block_group->free_space -= bytes; + ctl->free_space -= bytes; if (entry->bytes == 0) { - block_group->free_extents--; + ctl->free_extents--; if (entry->bitmap) { kfree(entry->bitmap); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); + ctl->total_bitmaps--; + ctl->op->recalc_thresholds(ctl); } kmem_cache_free(btrfs_free_space_cachep, entry); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -1952,6 +1984,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; unsigned long next_zero; unsigned long i; unsigned long search_bits; @@ -2006,7 +2039,7 @@ again: cluster->window_start = start * block_group->sectorsize + entry->offset; - rb_erase(&entry->offset_index, &block_group->free_space_offset); + rb_erase(&entry->offset_index, &ctl->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, &entry->offset_index, 1); BUG_ON(ret); @@ -2021,6 +2054,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *first = NULL; struct btrfs_free_space *entry = NULL; struct btrfs_free_space *prev = NULL; @@ -2031,7 +2065,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, u64 max_extent; u64 max_gap = 128 * 1024; - entry = tree_search_offset(block_group, offset, 0, 1); + entry = tree_search_offset(ctl, offset, 0, 1); if (!entry) return -ENOSPC; @@ -2097,7 +2131,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, if (entry->bitmap) continue; - rb_erase(&entry->offset_index, &block_group->free_space_offset); + rb_erase(&entry->offset_index, &ctl->free_space_offset); ret = tree_insert_offset(&cluster->root, entry->offset, &entry->offset_index, 0); BUG_ON(ret); @@ -2116,16 +2150,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 min_bytes) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; struct rb_node *node; int ret = -ENOSPC; - if (block_group->total_bitmaps == 0) + if (ctl->total_bitmaps == 0) return -ENOSPC; - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, offset), - 0, 1); + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); if (!entry) return -ENOSPC; @@ -2158,6 +2191,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_free_cluster *cluster, u64 offset, u64 bytes, u64 empty_size) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; u64 min_bytes; int ret; @@ -2177,14 +2211,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, } else min_bytes = max(bytes, (bytes + empty_size) >> 2); - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); /* * If we know we don't have enough space to make a cluster don't even * bother doing all the work to try and find one. */ - if (block_group->free_space < min_bytes) { - spin_unlock(&block_group->tree_lock); + if (ctl->free_space < min_bytes) { + spin_unlock(&ctl->tree_lock); return -ENOSPC; } @@ -2210,7 +2244,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, } out: spin_unlock(&cluster->lock); - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); return ret; } @@ -2231,6 +2265,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, u64 *trimmed, u64 start, u64 end, u64 minlen) { + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry = NULL; struct btrfs_fs_info *fs_info = block_group->fs_info; u64 bytes = 0; @@ -2240,52 +2275,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, *trimmed = 0; while (start < end) { - spin_lock(&block_group->tree_lock); + spin_lock(&ctl->tree_lock); - if (block_group->free_space < minlen) { - spin_unlock(&block_group->tree_lock); + if (ctl->free_space < minlen) { + spin_unlock(&ctl->tree_lock); break; } - entry = tree_search_offset(block_group, start, 0, 1); + entry = tree_search_offset(ctl, start, 0, 1); if (!entry) - entry = tree_search_offset(block_group, - offset_to_bitmap(block_group, - start), + entry = tree_search_offset(ctl, + offset_to_bitmap(ctl, start), 1, 1); if (!entry || entry->offset >= end) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); break; } if (entry->bitmap) { - ret = search_bitmap(block_group, entry, &start, &bytes); + ret = search_bitmap(ctl, entry, &start, &bytes); if (!ret) { if (start >= end) { - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); break; } bytes = min(bytes, end - start); - bitmap_clear_bits(block_group, entry, - start, bytes); + bitmap_clear_bits(ctl, entry, start, bytes); if (entry->bytes == 0) - free_bitmap(block_group, entry); + free_bitmap(ctl, entry); } else { start = entry->offset + BITS_PER_BITMAP * block_group->sectorsize; - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); ret = 0; continue; } } else { start = entry->offset; bytes = min(entry->bytes, end - start); - unlink_free_space(block_group, entry); + unlink_free_space(ctl, entry); kfree(entry); } - spin_unlock(&block_group->tree_lock); + spin_unlock(&ctl->tree_lock); if (bytes >= minlen) { int update_ret; @@ -2297,8 +2330,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, bytes, &actually_trimmed); - btrfs_add_free_space(block_group, - start, bytes); + btrfs_add_free_space(block_group, start, bytes); if (!update_ret) btrfs_update_reserved_bytes(block_group, bytes, 0, 1); diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 12b2b5165f8a..a64a23fae1eb 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -27,6 +27,25 @@ struct btrfs_free_space { struct list_head list; }; +struct btrfs_free_space_ctl { + spinlock_t tree_lock; + struct rb_root free_space_offset; + u64 free_space; + int extents_thresh; + int free_extents; + int total_bitmaps; + int unit; + u64 start; + struct btrfs_free_space_op *op; + void *private; +}; + +struct btrfs_free_space_op { + void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl); + bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info); +}; + struct inode *lookup_free_space_inode(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); @@ -45,6 +64,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); +void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, -- cgit v1.2.3 From 581bb050941b4f220f84d3e5ed6dace3d42dd382 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:06:11 +0800 Subject: Btrfs: Cache free inode numbers in memory Currently btrfs stores the highest objectid of the fs tree, and it always returns (highest+1) inode number when we create a file, so inode numbers won't be reclaimed when we delete files, so we'll run out of inode numbers as we keep create/delete files in 32bits machines. This fixes it, and it works similarly to how we cache free space in block cgroups. We start a kernel thread to read the file tree. By scanning inode items, we know which chunks of inode numbers are free, and we cache them in an rb-tree. Because we are searching the commit root, we have to carefully handle the cross-transaction case. The rb-tree is a hybrid extent+bitmap tree, so if we have too many small chunks of inode numbers, we'll use bitmaps. Initially we allow 16K ram of extents, and a bitmap will be used if we exceed this threshold. The extents threshold is adjusted in runtime. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 15 +- fs/btrfs/disk-io.c | 18 +++ fs/btrfs/free-space-cache.c | 96 ++++++++++--- fs/btrfs/free-space-cache.h | 16 ++- fs/btrfs/inode-map.c | 341 +++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/inode-map.h | 11 ++ fs/btrfs/inode.c | 42 ++++-- fs/btrfs/ioctl.c | 4 +- fs/btrfs/relocation.c | 3 +- fs/btrfs/transaction.c | 7 +- 10 files changed, 500 insertions(+), 53 deletions(-) create mode 100644 fs/btrfs/inode-map.h diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d17e4a3b8bf7..c96a4e4c5566 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1102,6 +1102,15 @@ struct btrfs_root { spinlock_t accounting_lock; struct btrfs_block_rsv *block_rsv; + /* free ino cache stuff */ + struct mutex fs_commit_mutex; + struct btrfs_free_space_ctl *free_ino_ctl; + enum btrfs_caching_type cached; + spinlock_t cache_lock; + wait_queue_head_t cache_wait; + struct btrfs_free_space_ctl *free_ino_pinned; + u64 cache_progress; + struct mutex log_mutex; wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; @@ -2408,12 +2417,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset); int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); -/* inode-map.c */ -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *fs_root, - u64 dirid, u64 *objectid); -int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid); - /* inode-item.c */ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ef6865c17cd6..d02683b1ee16 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -41,6 +41,7 @@ #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" +#include "inode-map.h" static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -1327,6 +1328,19 @@ again: if (IS_ERR(root)) return root; + root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); + if (!root->free_ino_ctl) + goto fail; + root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), + GFP_NOFS); + if (!root->free_ino_pinned) + goto fail; + + btrfs_init_free_ino_ctl(root); + mutex_init(&root->fs_commit_mutex); + spin_lock_init(&root->cache_lock); + init_waitqueue_head(&root->cache_wait); + set_anon_super(&root->anon_super, NULL); if (btrfs_root_refs(&root->root_item) == 0) { @@ -2483,6 +2497,8 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); + __btrfs_remove_free_space_cache(root->free_ino_pinned); + __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); return 0; } @@ -2496,6 +2512,8 @@ static void free_fs_root(struct btrfs_root *root) } free_extent_buffer(root->node); free_extent_buffer(root->commit_root); + kfree(root->free_ino_ctl); + kfree(root->free_ino_pinned); kfree(root->name); kfree(root); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d4fb4f077a79..2ce89bfc8815 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -25,6 +25,7 @@ #include "transaction.h" #include "disk-io.h" #include "extent_io.h" +#include "inode-map.h" #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) @@ -105,7 +106,7 @@ int create_free_space_inode(struct btrfs_root *root, u64 objectid; int ret; - ret = btrfs_find_free_objectid(trans, root, 0, &objectid); + ret = btrfs_find_free_objectid(root, &objectid); if (ret < 0) return ret; @@ -1496,10 +1497,9 @@ bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, return merged; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 offset, u64 bytes) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; int ret = 0; @@ -1751,11 +1751,29 @@ out: return 0; } -void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *info; struct rb_node *node; + + spin_lock(&ctl->tree_lock); + while ((node = rb_last(&ctl->free_space_offset)) != NULL) { + info = rb_entry(node, struct btrfs_free_space, offset_index); + unlink_free_space(ctl, info); + kfree(info->bitmap); + kmem_cache_free(btrfs_free_space_cachep, info); + if (need_resched()) { + spin_unlock(&ctl->tree_lock); + cond_resched(); + spin_lock(&ctl->tree_lock); + } + } + spin_unlock(&ctl->tree_lock); +} + +void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_cluster *cluster; struct list_head *head; @@ -1773,21 +1791,9 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) spin_lock(&ctl->tree_lock); } } - - while ((node = rb_last(&ctl->free_space_offset)) != NULL) { - info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(ctl, info); - if (info->bitmap) - kfree(info->bitmap); - kmem_cache_free(btrfs_free_space_cachep, info); - if (need_resched()) { - spin_unlock(&ctl->tree_lock); - cond_resched(); - spin_lock(&ctl->tree_lock); - } - } - spin_unlock(&ctl->tree_lock); + + __btrfs_remove_free_space_cache(ctl); } u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, @@ -2352,3 +2358,53 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, return ret; } + +/* + * Find the left-most item in the cache tree, and then return the + * smallest inode number in the item. + * + * Note: the returned inode number may not be the smallest one in + * the tree, if the left-most item is a bitmap. + */ +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) +{ + struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; + struct btrfs_free_space *entry = NULL; + u64 ino = 0; + + spin_lock(&ctl->tree_lock); + + if (RB_EMPTY_ROOT(&ctl->free_space_offset)) + goto out; + + entry = rb_entry(rb_first(&ctl->free_space_offset), + struct btrfs_free_space, offset_index); + + if (!entry->bitmap) { + ino = entry->offset; + + unlink_free_space(ctl, entry); + entry->offset++; + entry->bytes--; + if (!entry->bytes) + kmem_cache_free(btrfs_free_space_cachep, entry); + else + link_free_space(ctl, entry); + } else { + u64 offset = 0; + u64 count = 1; + int ret; + + ret = search_bitmap(ctl, entry, &offset, &count); + BUG_ON(ret); + + ino = offset; + bitmap_clear_bits(ctl, entry, offset, 1); + if (entry->bytes == 0) + free_bitmap(ctl, entry); + } +out: + spin_unlock(&ctl->tree_lock); + + return ino; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index a64a23fae1eb..af06e6b6ceaa 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -64,15 +64,25 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); + void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 bytenr, u64 size); +int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, + u64 bytenr, u64 size); +static inline int +btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size) +{ + return __btrfs_add_free_space(block_group->free_space_ctl, + bytenr, size); +} int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); +void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); void btrfs_remove_free_space_cache(struct btrfs_block_group_cache - *block_group); + *block_group); u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); +u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c05a08f4c411..5be62df90c4f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -16,11 +16,343 @@ * Boston, MA 021110-1307, USA. */ +#include +#include +#include + #include "ctree.h" #include "disk-io.h" +#include "free-space-cache.h" +#include "inode-map.h" #include "transaction.h" -int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) +static int caching_kthread(void *data) +{ + struct btrfs_root *root = data; + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_key key; + struct btrfs_path *path; + struct extent_buffer *leaf; + u64 last = (u64)-1; + int slot; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* Since the commit root is read-only, we can safely skip locking. */ + path->skip_locking = 1; + path->search_commit_root = 1; + path->reada = 2; + + key.objectid = BTRFS_FIRST_FREE_OBJECTID; + key.offset = 0; + key.type = BTRFS_INODE_ITEM_KEY; +again: + /* need to make sure the commit_root doesn't disappear */ + mutex_lock(&root->fs_commit_mutex); + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + while (1) { + smp_mb(); + if (fs_info->closing > 1) + goto out; + + leaf = path->nodes[0]; + slot = path->slots[0]; + if (path->slots[0] >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + else if (ret > 0) + break; + + if (need_resched() || + btrfs_transaction_in_commit(fs_info)) { + leaf = path->nodes[0]; + + if (btrfs_header_nritems(leaf) == 0) { + WARN_ON(1); + break; + } + + /* + * Save the key so we can advances forward + * in the next search. + */ + btrfs_item_key_to_cpu(leaf, &key, 0); + btrfs_release_path(root, path); + root->cache_progress = last; + mutex_unlock(&root->fs_commit_mutex); + schedule_timeout(1); + goto again; + } else + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + + if (key.type != BTRFS_INODE_ITEM_KEY) + goto next; + + if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) + break; + + if (last != (u64)-1 && last + 1 != key.objectid) { + __btrfs_add_free_space(ctl, last + 1, + key.objectid - last - 1); + wake_up(&root->cache_wait); + } + + last = key.objectid; +next: + path->slots[0]++; + } + + if (last < BTRFS_LAST_FREE_OBJECTID - 1) { + __btrfs_add_free_space(ctl, last + 1, + BTRFS_LAST_FREE_OBJECTID - last - 1); + } + + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + + root->cache_progress = (u64)-1; + btrfs_unpin_free_ino(root); +out: + wake_up(&root->cache_wait); + mutex_unlock(&root->fs_commit_mutex); + + btrfs_free_path(path); + + return ret; +} + +static void start_caching(struct btrfs_root *root) +{ + struct task_struct *tsk; + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_NO) { + spin_unlock(&root->cache_lock); + return; + } + + root->cached = BTRFS_CACHE_STARTED; + spin_unlock(&root->cache_lock); + + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", + root->root_key.objectid); + BUG_ON(IS_ERR(tsk)); +} + +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) +{ +again: + *objectid = btrfs_find_ino_for_alloc(root); + + if (*objectid != 0) + return 0; + + start_caching(root); + + wait_event(root->cache_wait, + root->cached == BTRFS_CACHE_FINISHED || + root->free_ino_ctl->free_space > 0); + + if (root->cached == BTRFS_CACHE_FINISHED && + root->free_ino_ctl->free_space == 0) + return -ENOSPC; + else + goto again; +} + +void btrfs_return_ino(struct btrfs_root *root, u64 objectid) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; +again: + if (root->cached == BTRFS_CACHE_FINISHED) { + __btrfs_add_free_space(ctl, objectid, 1); + } else { + /* + * If we are in the process of caching free ino chunks, + * to avoid adding the same inode number to the free_ino + * tree twice due to cross transaction, we'll leave it + * in the pinned tree until a transaction is committed + * or the caching work is done. + */ + + mutex_lock(&root->fs_commit_mutex); + spin_lock(&root->cache_lock); + if (root->cached == BTRFS_CACHE_FINISHED) { + spin_unlock(&root->cache_lock); + mutex_unlock(&root->fs_commit_mutex); + goto again; + } + spin_unlock(&root->cache_lock); + + start_caching(root); + + if (objectid <= root->cache_progress) + __btrfs_add_free_space(ctl, objectid, 1); + else + __btrfs_add_free_space(pinned, objectid, 1); + + mutex_unlock(&root->fs_commit_mutex); + } +} + +/* + * When a transaction is committed, we'll move those inode numbers which + * are smaller than root->cache_progress from pinned tree to free_ino tree, + * and others will just be dropped, because the commit root we were + * searching has changed. + * + * Must be called with root->fs_commit_mutex held + */ +void btrfs_unpin_free_ino(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; + struct btrfs_free_space *info; + struct rb_node *n; + u64 count; + + while (1) { + n = rb_first(rbroot); + if (!n) + break; + + info = rb_entry(n, struct btrfs_free_space, offset_index); + BUG_ON(info->bitmap); + + if (info->offset > root->cache_progress) + goto free; + else if (info->offset + info->bytes > root->cache_progress) + count = root->cache_progress - info->offset + 1; + else + count = info->bytes; + + __btrfs_add_free_space(ctl, info->offset, count); +free: + rb_erase(&info->offset_index, rbroot); + kfree(info); + } +} + +#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) +#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) + +/* + * The goal is to keep the memory used by the free_ino tree won't + * exceed the memory if we use bitmaps only. + */ +static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) +{ + struct btrfs_free_space *info; + struct rb_node *n; + int max_ino; + int max_bitmaps; + + n = rb_last(&ctl->free_space_offset); + if (!n) { + ctl->extents_thresh = INIT_THRESHOLD; + return; + } + info = rb_entry(n, struct btrfs_free_space, offset_index); + + /* + * Find the maximum inode number in the filesystem. Note we + * ignore the fact that this can be a bitmap, because we are + * not doing precise calculation. + */ + max_ino = info->bytes - 1; + + max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; + if (max_bitmaps <= ctl->total_bitmaps) { + ctl->extents_thresh = 0; + return; + } + + ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * + PAGE_CACHE_SIZE / sizeof(*info); +} + +/* + * We don't fall back to bitmap, if we are below the extents threshold + * or this chunk of inode numbers is a big one. + */ +static bool use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + if (ctl->free_extents < ctl->extents_thresh || + info->bytes > INODES_PER_BITMAP / 10) + return false; + + return true; +} + +static struct btrfs_free_space_op free_ino_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + +static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) +{ +} + +static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info) +{ + /* + * We always use extents for two reasons: + * + * - The pinned tree is only used during the process of caching + * work. + * - Make code simpler. See btrfs_unpin_free_ino(). + */ + return false; +} + +static struct btrfs_free_space_op pinned_free_ino_op = { + .recalc_thresholds = pinned_recalc_thresholds, + .use_bitmap = pinned_use_bitmap, +}; + +void btrfs_init_free_ino_ctl(struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; + + spin_lock_init(&ctl->tree_lock); + ctl->unit = 1; + ctl->start = 0; + ctl->private = NULL; + ctl->op = &free_ino_op; + + /* + * Initially we allow to use 16K of ram to cache chunks of + * inode numbers before we resort to bitmaps. This is somewhat + * arbitrary, but it will be adjusted in runtime. + */ + ctl->extents_thresh = INIT_THRESHOLD; + + spin_lock_init(&pinned->tree_lock); + pinned->unit = 1; + pinned->start = 0; + pinned->private = NULL; + pinned->extents_thresh = 0; + pinned->op = &pinned_free_ino_op; +} + +static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; int ret; @@ -55,15 +387,14 @@ error: return ret; } -int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 dirid, u64 *objectid) +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) { int ret; mutex_lock(&root->objectid_mutex); if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { - ret = btrfs_find_highest_inode(root, &root->highest_objectid); + ret = btrfs_find_highest_objectid(root, + &root->highest_objectid); if (ret) goto out; } diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h new file mode 100644 index 000000000000..eb918451b492 --- /dev/null +++ b/fs/btrfs/inode-map.h @@ -0,0 +1,11 @@ +#ifndef __BTRFS_INODE_MAP +#define __BTRFS_INODE_MAP + +void btrfs_init_free_ino_ctl(struct btrfs_root *root); +void btrfs_unpin_free_ino(struct btrfs_root *root); +void btrfs_return_ino(struct btrfs_root *root, u64 objectid); +int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); + +int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); + +#endif diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a4157cfdd533..77dd0a776c83 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -51,6 +51,7 @@ #include "compression.h" #include "locking.h" #include "free-space-cache.h" +#include "inode-map.h" struct btrfs_iget_args { u64 ino; @@ -3809,6 +3810,10 @@ void btrfs_evict_inode(struct inode *inode) BUG_ON(ret); } + if (!(root == root->fs_info->tree_root || + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) + btrfs_return_ino(root, inode->i_ino); + nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); @@ -4538,6 +4543,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, return ERR_PTR(-ENOMEM); } + /* + * we have to initialize this early, so we can reclaim the inode + * number if we fail afterwards in this function. + */ + inode->i_ino = objectid; + if (dir) { trace_btrfs_inode_request(dir); @@ -4583,7 +4594,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, goto fail; inode_init_owner(inode, dir, mode); - inode->i_ino = objectid; inode_set_bytes(inode, 0); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], @@ -4712,10 +4722,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (!new_valid_dev(rdev)) return -EINVAL; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 for inode item and ref * 2 for dir items @@ -4727,6 +4733,10 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); @@ -4774,9 +4784,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, u64 objectid; u64 index = 0; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 for inode item and ref * 2 for dir items @@ -4788,6 +4795,10 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, mode, &index); @@ -4902,10 +4913,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) u64 index = 0; unsigned long nr = 1; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; - /* * 2 items for inode and ref * 2 items for dir items @@ -4916,6 +4923,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) return PTR_ERR(trans); btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_fail; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, @@ -7257,9 +7268,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) return -ENAMETOOLONG; - err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); - if (err) - return err; /* * 2 items for inode item and ref * 2 items for dir items @@ -7271,6 +7279,10 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, btrfs_set_trans_block_group(trans, dir); + err = btrfs_find_free_ino(root, &objectid); + if (err) + goto out_unlock; + inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f580a3a5d2fc..e1835f8eec93 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -50,6 +50,7 @@ #include "print-tree.h" #include "volumes.h" #include "locking.h" +#include "inode-map.h" /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) @@ -323,8 +324,7 @@ static noinline int create_subvol(struct btrfs_root *root, u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; - ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, - 0, &objectid); + ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); if (ret) { dput(parent); return ret; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 58250e09eb05..e6cb89357256 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -30,6 +30,7 @@ #include "btrfs_inode.h" #include "async-thread.h" #include "free-space-cache.h" +#include "inode-map.h" /* * backref_node, mapping_node and tree_block start with this @@ -3897,7 +3898,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, if (IS_ERR(trans)) return ERR_CAST(trans); - err = btrfs_find_free_objectid(trans, root, objectid, &objectid); + err = btrfs_find_free_objectid(root, &objectid); if (err) goto out; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..aef6c81e7101 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -27,6 +27,7 @@ #include "transaction.h" #include "locking.h" #include "tree-log.h" +#include "inode-map.h" #define BTRFS_ROOT_TRANS_TAG 0 @@ -761,7 +762,11 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_orphan_commit_root(trans, root); if (root->commit_root != root->node) { + mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); + btrfs_unpin_free_ino(root); + mutex_unlock(&root->fs_commit_mutex); + btrfs_set_root_node(&root->root_item, root->node); } @@ -930,7 +935,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); + ret = btrfs_find_free_objectid(tree_root, &objectid); if (ret) { pending->error = ret; goto fail; -- cgit v1.2.3 From 0414efae7989a2183fb2cc000ab285c4c2836a00 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:20:14 +0800 Subject: Btrfs: Make the code for reading/writing free space cache generic Extract out block group specific code from lookup_free_space_inode(), create_free_space_inode(), load_free_space_cache() and btrfs_write_out_cache(), so the code can be used to read/write free ino cache. Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 358 +++++++++++++++++++++++++------------------- 1 file changed, 204 insertions(+), 154 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2ce89bfc8815..fcbdcef6ca28 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -33,9 +33,9 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info); -struct inode *lookup_free_space_inode(struct btrfs_root *root, - struct btrfs_block_group_cache - *block_group, struct btrfs_path *path) +static struct inode *__lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_path *path, + u64 offset) { struct btrfs_key key; struct btrfs_key location; @@ -45,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, struct inode *inode = NULL; int ret; - spin_lock(&block_group->lock); - if (block_group->inode) - inode = igrab(block_group->inode); - spin_unlock(&block_group->lock); - if (inode) - return inode; - key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -83,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, inode->i_mapping->flags &= ~__GFP_FS; + return inode; +} + +struct inode *lookup_free_space_inode(struct btrfs_root *root, + struct btrfs_block_group_cache + *block_group, struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&block_group->lock); + if (block_group->inode) + inode = igrab(block_group->inode); + spin_unlock(&block_group->lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, + block_group->key.objectid); + if (IS_ERR(inode)) + return inode; + spin_lock(&block_group->lock); if (!root->fs_info->closing) { block_group->inode = igrab(inode); @@ -93,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; } -int create_free_space_inode(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int __create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 ino, u64 offset) { struct btrfs_key key; struct btrfs_disk_key disk_key; struct btrfs_free_space_header *header; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; - u64 objectid; int ret; - ret = btrfs_find_free_objectid(root, &objectid); - if (ret < 0) - return ret; - - ret = btrfs_insert_empty_inode(trans, root, path, objectid); + ret = btrfs_insert_empty_inode(trans, root, path, ino); if (ret) return ret; @@ -130,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root, BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); btrfs_set_inode_nlink(leaf, inode_item, 1); btrfs_set_inode_transid(leaf, inode_item, trans->transid); - btrfs_set_inode_block_group(leaf, inode_item, - block_group->key.objectid); + btrfs_set_inode_block_group(leaf, inode_item, offset); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root, path); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_insert_empty_item(trans, root, path, &key, @@ -156,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root, return 0; } +int create_free_space_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + int ret; + u64 ino; + + ret = btrfs_find_free_objectid(root, &ino); + if (ret < 0) + return ret; + + return __create_free_space_inode(root, trans, path, ino, + block_group->key.objectid); +} + int btrfs_truncate_free_space_cache(struct btrfs_root *root, struct btrfs_trans_handle *trans, struct btrfs_path *path, @@ -208,16 +231,13 @@ static int readahead_cache(struct inode *inode) return 0; } -int load_free_space_cache(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *block_group) +int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_path *path, u64 offset) { - struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; - struct btrfs_root *root = fs_info->tree_root; - struct inode *inode; struct btrfs_free_space_header *header; struct extent_buffer *leaf; struct page *page; - struct btrfs_path *path; u32 *checksums = NULL, *crc; char *disk_crcs = NULL; struct btrfs_key key; @@ -225,76 +245,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, u64 num_entries; u64 num_bitmaps; u64 generation; - u64 used = btrfs_block_group_used(&block_group->item); u32 cur_crc = ~(u32)0; pgoff_t index = 0; unsigned long first_page_offset; int num_checksums; - int ret = 0; - - /* - * If we're unmounting then just return, since this does a search on the - * normal root and not the commit root and we could deadlock. - */ - smp_mb(); - if (fs_info->closing) - return 0; - - /* - * If this block group has been marked to be cleared for one reason or - * another then we can't trust the on disk cache, so just return. - */ - spin_lock(&block_group->lock); - if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); + int ret = 0, ret2; INIT_LIST_HEAD(&bitmaps); - path = btrfs_alloc_path(); - if (!path) - return 0; - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) { - btrfs_free_path(path); - return 0; - } - /* Nothing in the space cache, goodbye */ - if (!i_size_read(inode)) { - btrfs_free_path(path); + if (!i_size_read(inode)) goto out; - } key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - if (ret) { - btrfs_free_path(path); + if (ret < 0) + goto out; + else if (ret > 0) { + btrfs_release_path(root, path); + ret = 0; goto out; } + ret = -1; + leaf = path->nodes[0]; header = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_header); num_entries = btrfs_free_space_entries(leaf, header); num_bitmaps = btrfs_free_space_bitmaps(leaf, header); generation = btrfs_free_space_generation(leaf, header); - btrfs_free_path(path); + btrfs_release_path(root, path); if (BTRFS_I(inode)->generation != generation) { printk(KERN_ERR "btrfs: free space inode generation (%llu) did" - " not match free space cache generation (%llu) for " - "block group %llu\n", + " not match free space cache generation (%llu)\n", (unsigned long long)BTRFS_I(inode)->generation, - (unsigned long long)generation, - (unsigned long long)block_group->key.objectid); - goto free_cache; + (unsigned long long)generation); + goto out; } if (!num_entries) @@ -311,10 +302,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, goto out; ret = readahead_cache(inode); - if (ret) { - ret = 0; + if (ret) goto out; - } while (1) { struct btrfs_free_space_entry *entry; @@ -333,10 +322,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, } page = grab_cache_page(inode->i_mapping, index); - if (!page) { - ret = 0; + if (!page) goto free_cache; - } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); @@ -345,9 +332,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, unlock_page(page); page_cache_release(page); printk(KERN_ERR "btrfs: error reading free " - "space cache: %llu\n", - (unsigned long long) - block_group->key.objectid); + "space cache\n"); goto free_cache; } } @@ -360,13 +345,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, gen = addr + (sizeof(u32) * num_checksums); if (*gen != BTRFS_I(inode)->generation) { printk(KERN_ERR "btrfs: space cache generation" - " (%llu) does not match inode (%llu) " - "for block group %llu\n", + " (%llu) does not match inode (%llu)\n", (unsigned long long)*gen, (unsigned long long) - BTRFS_I(inode)->generation, - (unsigned long long) - block_group->key.objectid); + BTRFS_I(inode)->generation); kunmap(page); unlock_page(page); page_cache_release(page); @@ -382,9 +364,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, PAGE_CACHE_SIZE - start_offset); btrfs_csum_final(cur_crc, (char *)&cur_crc); if (cur_crc != *crc) { - printk(KERN_ERR "btrfs: crc mismatch for page %lu in " - "block group %llu\n", index, - (unsigned long long)block_group->key.objectid); + printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", + index); kunmap(page); unlock_page(page); page_cache_release(page); @@ -432,7 +413,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, goto free_cache; } spin_lock(&ctl->tree_lock); - ret = link_free_space(ctl, e); + ret2 = link_free_space(ctl, e); ctl->total_bitmaps++; ctl->op->recalc_thresholds(ctl); spin_unlock(&ctl->tree_lock); @@ -471,42 +452,96 @@ next: index++; } - spin_lock(&ctl->tree_lock); - if (ctl->free_space != (block_group->key.offset - used - - block_group->bytes_super)) { - spin_unlock(&ctl->tree_lock); - printk(KERN_ERR "block group %llu has an wrong amount of free " - "space\n", block_group->key.objectid); - ret = 0; - goto free_cache; - } - spin_unlock(&ctl->tree_lock); - ret = 1; out: kfree(checksums); kfree(disk_crcs); - iput(inode); return ret; - free_cache: - /* This cache is bogus, make sure it gets cleared */ - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_CLEAR; - spin_unlock(&block_group->lock); - btrfs_remove_free_space_cache(block_group); + __btrfs_remove_free_space_cache(ctl); goto out; } -int btrfs_write_out_cache(struct btrfs_root *root, - struct btrfs_trans_handle *trans, - struct btrfs_block_group_cache *block_group, - struct btrfs_path *path) +int load_free_space_cache(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct btrfs_root *root = fs_info->tree_root; + struct inode *inode; + struct btrfs_path *path; + int ret; + bool matched; + u64 used = btrfs_block_group_used(&block_group->item); + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + /* + * If this block group has been marked to be cleared for one reason or + * another then we can't trust the on disk cache, so just return. + */ + spin_lock(&block_group->lock); + if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) { + btrfs_free_path(path); + return 0; + } + + ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, + path, block_group->key.objectid); + btrfs_free_path(path); + if (ret <= 0) + goto out; + + spin_lock(&ctl->tree_lock); + matched = (ctl->free_space == (block_group->key.offset - used - + block_group->bytes_super)); + spin_unlock(&ctl->tree_lock); + + if (!matched) { + __btrfs_remove_free_space_cache(ctl); + printk(KERN_ERR "block group %llu has an wrong amount of free " + "space\n", block_group->key.objectid); + ret = -1; + } +out: + if (ret < 0) { + /* This cache is bogus, make sure it gets cleared */ + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_CLEAR; + spin_unlock(&block_group->lock); + + printk(KERN_ERR "btrfs: failed to load free space cache " + "for block group %llu\n", block_group->key.objectid); + } + + iput(inode); + return ret; +} + +int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, + struct btrfs_free_space_ctl *ctl, + struct btrfs_block_group_cache *block_group, + struct btrfs_trans_handle *trans, + struct btrfs_path *path, u64 offset) +{ struct btrfs_free_space_header *header; struct extent_buffer *leaf; - struct inode *inode; struct rb_node *node; struct list_head *pos, *n; struct page **pages; @@ -523,35 +558,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; - int ret = 0; + int ret = -1; bool next_page = false; bool out_of_space = false; - root = root->fs_info->tree_root; - INIT_LIST_HEAD(&bitmap_list); - spin_lock(&block_group->lock); - if (block_group->disk_cache_state < BTRFS_DC_SETUP) { - spin_unlock(&block_group->lock); - return 0; - } - spin_unlock(&block_group->lock); - - inode = lookup_free_space_inode(root, block_group, path); - if (IS_ERR(inode)) - return 0; - - if (!i_size_read(inode)) { - iput(inode); - return 0; - } - node = rb_first(&ctl->free_space_offset); - if (!node) { - iput(inode); + if (!node) return 0; - } + + if (!i_size_read(inode)) + return -1; num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; @@ -561,16 +579,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, /* We need a checksum per page. */ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); - if (!crc) { - iput(inode); - return 0; - } + if (!crc) + return -1; pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); if (!pages) { kfree(crc); - iput(inode); - return 0; + return -1; } /* Since the first page has all of our checksums and our generation we @@ -580,7 +595,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); /* Get the cluster for this block_group if it exists */ - if (!list_empty(&block_group->cluster_list)) + if (block_group && !list_empty(&block_group->cluster_list)) cluster = list_entry(block_group->cluster_list.next, struct btrfs_free_cluster, block_group_list); @@ -622,7 +637,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, * When searching for pinned extents, we need to start at our start * offset. */ - start = block_group->key.objectid; + if (block_group) + start = block_group->key.objectid; /* Write out the extent entries */ do { @@ -680,8 +696,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, * We want to add any pinned extents to our free space cache * so we don't leak the space */ - while (!next_page && (start < block_group->key.objectid + - block_group->key.offset)) { + while (block_group && !next_page && + (start < block_group->key.objectid + + block_group->key.offset)) { ret = find_first_extent_bit(unpin, start, &start, &end, EXTENT_DIRTY); if (ret) { @@ -799,12 +816,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, filemap_write_and_wait(inode->i_mapping); key.objectid = BTRFS_FREE_SPACE_OBJECTID; - key.offset = block_group->key.objectid; + key.offset = offset; key.type = 0; ret = btrfs_search_slot(trans, root, &key, path, 1, 1); if (ret < 0) { - ret = 0; + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); @@ -817,8 +834,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || - found_key.offset != block_group->key.objectid) { - ret = 0; + found_key.offset != offset) { + ret = -1; clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, @@ -838,16 +855,49 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = 1; out_free: - if (ret == 0) { + if (ret != 1) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_ERROR; - spin_unlock(&block_group->lock); BTRFS_I(inode)->generation = 0; } kfree(checksums); kfree(pages); btrfs_update_inode(trans, root, inode); + return ret; +} + +int btrfs_write_out_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_block_group_cache *block_group, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct inode *inode; + int ret = 0; + + root = root->fs_info->tree_root; + + spin_lock(&block_group->lock); + if (block_group->disk_cache_state < BTRFS_DC_SETUP) { + spin_unlock(&block_group->lock); + return 0; + } + spin_unlock(&block_group->lock); + + inode = lookup_free_space_inode(root, block_group, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, + path, block_group->key.objectid); + if (ret < 0) { + spin_lock(&block_group->lock); + block_group->disk_cache_state = BTRFS_DC_ERROR; + spin_unlock(&block_group->lock); + + printk(KERN_ERR "btrfs: failed to write free space cace " + "for block group %llu\n", block_group->key.objectid); + } + iput(inode); return ret; } -- cgit v1.2.3 From 33345d01522f8152f99dc84a3e7a1a45707f387f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:31:50 +0800 Subject: Btrfs: Always use 64bit inode number There's a potential problem in 32bit system when we exhaust 32bit inode numbers and start to allocate big inode numbers, because btrfs uses inode->i_ino in many places. So here we always use BTRFS_I(inode)->location.objectid, which is an u64 variable. There are 2 exceptions that BTRFS_I(inode)->location.objectid != inode->i_ino: the btree inode (0 vs 1) and empty subvol dirs (256 vs 2), and inode->i_ino will be used in those cases. Another reason to make this change is I'm going to use a special inode to save free ino cache, and the inode number must be > (u64)-256. Signed-off-by: Li Zefan --- fs/btrfs/btrfs_inode.h | 9 +++ fs/btrfs/compression.c | 5 +- fs/btrfs/export.c | 25 ++++--- fs/btrfs/extent-tree.c | 10 +-- fs/btrfs/extent_io.c | 4 +- fs/btrfs/file-item.c | 5 +- fs/btrfs/file.c | 27 +++---- fs/btrfs/inode.c | 197 ++++++++++++++++++++++++++----------------------- fs/btrfs/ioctl.c | 18 ++--- fs/btrfs/relocation.c | 24 +++--- fs/btrfs/transaction.c | 4 +- fs/btrfs/tree-log.c | 54 +++++++------- fs/btrfs/xattr.c | 8 +- 13 files changed, 208 insertions(+), 182 deletions(-) diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..8842a4195f91 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -166,6 +166,15 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode) return container_of(inode, struct btrfs_inode, vfs_inode); } +static inline u64 btrfs_ino(struct inode *inode) +{ + u64 ino = BTRFS_I(inode)->location.objectid; + + if (ino <= BTRFS_FIRST_FREE_OBJECTID) + ino = inode->i_ino; + return ino; +} + static inline void btrfs_i_size_write(struct inode *inode, u64 size) { i_size_write(inode, size); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 41d1d7c70e29..369d5068ac7a 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode, kunmap_atomic(kaddr, KM_USER0); if (csum != *cb_sum) { - printk(KERN_INFO "btrfs csum failed ino %lu " + printk(KERN_INFO "btrfs csum failed ino %llu " "extent %llu csum %u " - "wanted %u mirror %d\n", inode->i_ino, + "wanted %u mirror %d\n", + (unsigned long long)btrfs_ino(inode), (unsigned long long)disk_start, csum, *cb_sum, cb->mirror_num); ret = -EIO; diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ff27d7a477b2..7fa283e7d306 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -28,7 +28,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, len = BTRFS_FID_SIZE_NON_CONNECTABLE; type = FILEID_BTRFS_WITHOUT_PARENT; - fid->objectid = inode->i_ino; + fid->objectid = btrfs_ino(inode); fid->root_objectid = BTRFS_I(inode)->root->objectid; fid->gen = inode->i_generation; @@ -174,13 +174,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child) if (!path) return ERR_PTR(-ENOMEM); - if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = dir->i_ino; + key.objectid = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; } @@ -240,6 +240,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, struct btrfs_key key; int name_len; int ret; + u64 ino; if (!dir || !inode) return -EINVAL; @@ -247,19 +248,21 @@ static int btrfs_get_name(struct dentry *parent, char *name, if (!S_ISDIR(dir->i_mode)) return -EINVAL; + ino = btrfs_ino(inode); + path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->leave_spinning = 1; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = BTRFS_I(inode)->root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; root = root->fs_info->tree_root; } else { - key.objectid = inode->i_ino; - key.offset = dir->i_ino; + key.objectid = ino; + key.offset = btrfs_ino(dir); key.type = BTRFS_INODE_REF_KEY; } @@ -268,7 +271,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, btrfs_free_path(path); return ret; } else if (ret > 0) { - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { + if (ino == BTRFS_FIRST_FREE_OBJECTID) { path->slots[0]--; } else { btrfs_free_path(path); @@ -277,11 +280,11 @@ static int btrfs_get_name(struct dentry *parent, char *name, } leaf = path->nodes[0]; - if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { - rref = btrfs_item_ptr(leaf, path->slots[0], + if (ino == BTRFS_FIRST_FREE_OBJECTID) { + rref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - name_ptr = (unsigned long)(rref + 1); - name_len = btrfs_root_ref_name_len(leaf, rref); + name_ptr = (unsigned long)(rref + 1); + name_len = btrfs_root_ref_name_len(leaf, rref); } else { iref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_ref); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 904eae10ec65..a0e818cb0401 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7009,8 +7009,8 @@ static noinline int get_new_locations(struct inode *reloc_inode, cur_pos = extent_key->objectid - offset; last_byte = extent_key->objectid + extent_key->offset; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, - cur_pos, 0); + ret = btrfs_lookup_file_extent(NULL, root, path, + btrfs_ino(reloc_inode), cur_pos, 0); if (ret < 0) goto out; if (ret > 0) { @@ -7033,7 +7033,7 @@ static noinline int get_new_locations(struct inode *reloc_inode, btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); if (found_key.offset != cur_pos || found_key.type != BTRFS_EXTENT_DATA_KEY || - found_key.objectid != reloc_inode->i_ino) + found_key.objectid != btrfs_ino(reloc_inode)) break; fi = btrfs_item_ptr(leaf, path->slots[0], @@ -7179,7 +7179,7 @@ next: break; } - if (inode && key.objectid != inode->i_ino) { + if (inode && key.objectid != btrfs_ino(inode)) { BUG_ON(extent_locked); btrfs_release_path(root, path); mutex_unlock(&inode->i_mutex); @@ -7488,7 +7488,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, continue; if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) continue; - if (!inode || inode->i_ino != key.objectid) { + if (!inode || btrfs_ino(inode) != key.objectid) { iput(inode); inode = btrfs_ilookup(target_root->fs_info->sb, key.objectid, target_root, 1); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5ae0bffaa4d8..41d313a0d098 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3030,7 +3030,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, * because there might be preallocation past i_size */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, - path, inode->i_ino, -1, 0); + path, btrfs_ino(inode), -1, 0); if (ret < 0) { btrfs_free_path(path); return ret; @@ -3043,7 +3043,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, found_type = btrfs_key_type(&found_key); /* No extents, but there might be delalloc bits */ - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || found_type != BTRFS_EXTENT_DATA_KEY) { /* have to trust i_size as the end */ last = (u64)-1; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..1d9410e39212 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -208,8 +208,9 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, EXTENT_NODATASUM, GFP_NOFS); } else { printk(KERN_INFO "btrfs no csum found " - "for inode %lu start %llu\n", - inode->i_ino, + "for inode %llu start %llu\n", + (unsigned long long) + btrfs_ino(inode), (unsigned long long)offset); } item = NULL; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 75899a01dded..bef020451525 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -298,6 +298,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_path *path; struct btrfs_key key; struct btrfs_key new_key; + u64 ino = btrfs_ino(inode); u64 search_start = start; u64 disk_bytenr = 0; u64 num_bytes = 0; @@ -318,14 +319,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, while (1) { recow = 0; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, search_start, -1); if (ret < 0) break; if (ret > 0 && path->slots[0] > 0 && search_start == start) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); - if (key.objectid == inode->i_ino && + if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -346,7 +347,7 @@ next_slot: } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - if (key.objectid > inode->i_ino || + if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) break; @@ -592,6 +593,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int del_slot = 0; int recow; int ret; + u64 ino = btrfs_ino(inode); btrfs_drop_extent_cache(inode, start, end - 1, 0); @@ -600,7 +602,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, again: recow = 0; split = start; - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = split; @@ -612,8 +614,7 @@ again: leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - BUG_ON(key.objectid != inode->i_ino || - key.type != BTRFS_EXTENT_DATA_KEY); + BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); BUG_ON(btrfs_file_extent_type(leaf, fi) != @@ -630,7 +631,7 @@ again: other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { new_key.offset = end; btrfs_set_item_key_safe(trans, root, path, &new_key); @@ -653,7 +654,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); @@ -702,7 +703,7 @@ again: ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); if (split == start) { @@ -718,7 +719,7 @@ again: other_start = end; other_end = 0; if (extent_mergeable(leaf, path->slots[0] + 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(root, path); @@ -729,13 +730,13 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } other_start = 0; other_end = start; if (extent_mergeable(leaf, path->slots[0] - 1, - inode->i_ino, bytenr, orig_offset, + ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { btrfs_release_path(root, path); @@ -746,7 +747,7 @@ again: del_nr++; ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, - inode->i_ino, orig_offset); + ino, orig_offset); BUG_ON(ret); } if (del_nr == 0) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 77dd0a776c83..adec22884a3e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -138,7 +138,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, path->leave_spinning = 1; btrfs_set_trans_block_group(trans, inode); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = start; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(cur_size); @@ -1049,6 +1049,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, int nocow; int check_prev = 1; bool nolock = false; + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); @@ -1063,14 +1064,14 @@ static noinline int run_delalloc_nocow(struct inode *inode, cow_start = (u64)-1; cur_offset = start; while (1) { - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, ino, cur_offset, 0); BUG_ON(ret < 0); if (ret > 0 && path->slots[0] > 0 && check_prev) { leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); - if (found_key.objectid == inode->i_ino && + if (found_key.objectid == ino && found_key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } @@ -1091,7 +1092,7 @@ next_slot: num_bytes = 0; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid > inode->i_ino || + if (found_key.objectid > ino || found_key.type > BTRFS_EXTENT_DATA_KEY || found_key.offset > end) break; @@ -1126,7 +1127,7 @@ next_slot: goto out_check; if (btrfs_extent_readonly(root, disk_bytenr)) goto out_check; - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, ino, found_key.offset - extent_offset, disk_bytenr)) goto out_check; @@ -1643,7 +1644,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, &hint, 0); BUG_ON(ret); - ins.objectid = inode->i_ino; + ins.objectid = btrfs_ino(inode); ins.offset = file_pos; ins.type = BTRFS_EXTENT_DATA_KEY; ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); @@ -1674,7 +1675,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_file_extent(trans, root, root->root_key.objectid, - inode->i_ino, file_pos, &ins); + btrfs_ino(inode), file_pos, &ins); BUG_ON(ret); btrfs_free_path(path); @@ -2004,8 +2005,9 @@ good: zeroit: if (printk_ratelimit()) { - printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " - "private %llu\n", page->mapping->host->i_ino, + printk(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " + "private %llu\n", + (unsigned long long)btrfs_ino(page->mapping->host), (unsigned long long)start, csum, (unsigned long long)private); } @@ -2243,7 +2245,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) /* insert an orphan item to track this unlinked/truncated file */ if (insert >= 1) { - ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); + ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2280,7 +2282,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) spin_unlock(&root->orphan_lock); if (trans && delete_item) { - ret = btrfs_del_orphan_item(trans, root, inode->i_ino); + ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); BUG_ON(ret); } @@ -2542,7 +2544,8 @@ static void btrfs_read_locked_inode(struct inode *inode) * try to precache a NULL acl entry for files that don't have * any xattrs or acls */ - maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); + maybe_acls = acls_after_inode_item(leaf, path->slots[0], + btrfs_ino(inode)); if (!maybe_acls) cache_no_acl(inode); @@ -2688,6 +2691,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_dir_item *di; struct btrfs_key key; u64 index; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) { @@ -2696,7 +2701,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, } path->leave_spinning = 1; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -2713,17 +2718,16 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; btrfs_release_path(root, path); - ret = btrfs_del_inode_ref(trans, root, name, name_len, - inode->i_ino, - dir->i_ino, &index); + ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, + dir_ino, &index); if (ret) { printk(KERN_INFO "btrfs failed to delete reference to %.*s, " - "inode %lu parent %lu\n", name_len, name, - inode->i_ino, dir->i_ino); + "inode %llu parent %llu\n", name_len, name, + (unsigned long long)ino, (unsigned long long)dir_ino); goto err; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -2737,7 +2741,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, - inode, dir->i_ino); + inode, dir_ino); BUG_ON(ret != 0 && ret != -ENOENT); ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, @@ -2815,12 +2819,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, int check_link = 1; int err = -ENOSPC; int ret; + u64 ino = btrfs_ino(inode); + u64 dir_ino = btrfs_ino(dir); trans = btrfs_start_transaction(root, 10); if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) return trans; - if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return ERR_PTR(-ENOSPC); /* check if there is someone else holds reference */ @@ -2879,7 +2885,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, if (ret == 0 && S_ISREG(inode->i_mode)) { ret = btrfs_lookup_file_extent(trans, root, path, - inode->i_ino, (u64)-1, 0); + ino, (u64)-1, 0); if (ret < 0) { err = ret; goto out; @@ -2895,7 +2901,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, goto out; } - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2912,7 +2918,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, ref = btrfs_lookup_inode_ref(trans, root, path, dentry->d_name.name, dentry->d_name.len, - inode->i_ino, dir->i_ino, 0); + ino, dir_ino, 0); if (IS_ERR(ref)) { err = PTR_ERR(ref); goto out; @@ -2923,7 +2929,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, index = btrfs_inode_ref_index(path->nodes[0], ref); btrfs_release_path(root, path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2998,12 +3004,13 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_key key; u64 index; int ret; + u64 dir_ino = btrfs_ino(dir); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, name_len, -1); BUG_ON(!di || IS_ERR(di)); @@ -3016,10 +3023,10 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, &index, name, name_len); + dir_ino, &index, name, name_len); if (ret < 0) { BUG_ON(ret != -ENOENT); - di = btrfs_search_dir_index_item(root, path, dir->i_ino, + di = btrfs_search_dir_index_item(root, path, dir_ino, name, name_len); BUG_ON(!di || IS_ERR(di)); @@ -3029,7 +3036,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, index = key.offset; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, name, name_len, -1); BUG_ON(!di || IS_ERR(di)); @@ -3058,7 +3065,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) unsigned long nr = 0; if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || - inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) return -ENOTEMPTY; trans = __unlink_start_trans(dir, dentry); @@ -3067,7 +3074,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) btrfs_set_trans_block_group(trans, dir); - if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { + if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, dentry->d_name.name, @@ -3299,6 +3306,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int encoding; int ret; int err = 0; + u64 ino = btrfs_ino(inode); BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); @@ -3309,7 +3317,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, BUG_ON(!path); path->reada = -1; - key.objectid = inode->i_ino; + key.objectid = ino; key.offset = (u64)-1; key.type = (u8)-1; @@ -3337,7 +3345,7 @@ search_again: found_type = btrfs_key_type(&found_key); encoding = 0; - if (found_key.objectid != inode->i_ino) + if (found_key.objectid != ino) break; if (found_type < min_type) @@ -3456,7 +3464,7 @@ delete: ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, 0, btrfs_header_owner(leaf), - inode->i_ino, extent_offset); + ino, extent_offset); BUG_ON(ret); } @@ -3655,7 +3663,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) break; err = btrfs_insert_file_extent(trans, root, - inode->i_ino, cur_offset, 0, + btrfs_ino(inode), cur_offset, 0, 0, hole_size, 0, hole_size, 0, 0, 0); if (err) @@ -3812,7 +3820,7 @@ void btrfs_evict_inode(struct inode *inode) if (!(root == root->fs_info->tree_root || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) - btrfs_return_ino(root, inode->i_ino); + btrfs_return_ino(root, btrfs_ino(inode)); nr = trans->blocks_used; btrfs_end_transaction(trans, root); @@ -3839,7 +3847,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, + di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); @@ -3892,7 +3900,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); - if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || + if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) goto out; @@ -3931,6 +3939,7 @@ static void inode_tree_add(struct inode *inode) struct btrfs_inode *entry; struct rb_node **p; struct rb_node *parent; + u64 ino = btrfs_ino(inode); again: p = &root->inode_tree.rb_node; parent = NULL; @@ -3943,9 +3952,9 @@ again: parent = *p; entry = rb_entry(parent, struct btrfs_inode, rb_node); - if (inode->i_ino < entry->vfs_inode.i_ino) + if (ino < btrfs_ino(&entry->vfs_inode)) p = &parent->rb_left; - else if (inode->i_ino > entry->vfs_inode.i_ino) + else if (ino > btrfs_ino(&entry->vfs_inode)) p = &parent->rb_right; else { WARN_ON(!(entry->vfs_inode.i_state & @@ -4009,9 +4018,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -4019,7 +4028,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -4028,7 +4037,7 @@ again: } while (node) { entry = rb_entry(node, struct btrfs_inode, rb_node); - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; inode = igrab(&entry->vfs_inode); if (inode) { spin_unlock(&root->inode_lock); @@ -4066,7 +4075,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return args->ino == inode->i_ino && + return args->ino == btrfs_ino(inode) && args->root == BTRFS_I(inode)->root; } @@ -4244,9 +4253,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, /* special case for "." */ if (filp->f_pos == 0) { - over = filldir(dirent, ".", 1, - 1, inode->i_ino, - DT_DIR); + over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); if (over) return 0; filp->f_pos = 1; @@ -4265,7 +4272,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) @@ -4420,8 +4427,9 @@ void btrfs_dirty_inode(struct inode *inode) if (IS_ERR(trans)) { if (printk_ratelimit()) { printk(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %ld\n", - inode->i_ino, PTR_ERR(trans)); + "dirty inode %llu error %ld\n", + (unsigned long long)btrfs_ino(inode), + PTR_ERR(trans)); } return; } @@ -4431,8 +4439,9 @@ void btrfs_dirty_inode(struct inode *inode) if (ret) { if (printk_ratelimit()) { printk(KERN_ERR "btrfs: fail to " - "dirty inode %lu error %d\n", - inode->i_ino, ret); + "dirty inode %llu error %d\n", + (unsigned long long)btrfs_ino(inode), + ret); } } } @@ -4452,7 +4461,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) struct extent_buffer *leaf; int ret; - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = (u64)-1; @@ -4484,7 +4493,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != inode->i_ino || + if (found_key.objectid != btrfs_ino(inode) || btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { BTRFS_I(inode)->index_cnt = 2; goto out; @@ -4657,29 +4666,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, int ret = 0; struct btrfs_key key; struct btrfs_root *root = BTRFS_I(parent_inode)->root; + u64 ino = btrfs_ino(inode); + u64 parent_ino = btrfs_ino(parent_inode); - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); } else { - key.objectid = inode->i_ino; + key.objectid = ino; btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); key.offset = 0; } - if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, key.objectid, root->root_key.objectid, - parent_inode->i_ino, - index, name, name_len); + parent_ino, index, name, name_len); } else if (add_backref) { - ret = btrfs_insert_inode_ref(trans, root, - name, name_len, inode->i_ino, - parent_inode->i_ino, index); + ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, + parent_ino, index); } if (ret == 0) { ret = btrfs_insert_dir_item(trans, root, name, name_len, - parent_inode->i_ino, &key, + parent_ino, &key, btrfs_inode_type(inode), index); BUG_ON(ret); @@ -4738,7 +4747,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) @@ -4800,7 +4809,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, mode, &index); err = PTR_ERR(inode); if (IS_ERR(inode)) @@ -4928,7 +4937,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) goto out_fail; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFDIR | mode, &index); if (IS_ERR(inode)) { @@ -5049,7 +5058,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, u64 bytenr; u64 extent_start = 0; u64 extent_end = 0; - u64 objectid = inode->i_ino; + u64 objectid = btrfs_ino(inode); u32 found_type; struct btrfs_path *path = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5557,7 +5566,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), offset, 0); if (ret < 0) goto out; @@ -5574,7 +5583,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, ret = 0; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid != inode->i_ino || + if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) { /* not our file or wrong item type, must cow */ goto out; @@ -5608,7 +5617,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, * look for other files referencing this extent, if we * find any we must cow */ - if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), key.offset - backref_offset, disk_bytenr)) goto out; @@ -5798,9 +5807,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) flush_dcache_page(bvec->bv_page); if (csum != *private) { - printk(KERN_ERR "btrfs csum failed ino %lu off" + printk(KERN_ERR "btrfs csum failed ino %llu off" " %llu csum %u private %u\n", - inode->i_ino, (unsigned long long)start, + (unsigned long long)btrfs_ino(inode), + (unsigned long long)start, csum, *private); err = -EIO; } @@ -5947,9 +5957,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) struct btrfs_dio_private *dip = bio->bi_private; if (err) { - printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " + printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " "sector %#Lx len %u err no %d\n", - dip->inode->i_ino, bio->bi_rw, + (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, (unsigned long long)bio->bi_sector, bio->bi_size, err); dip->errors = 1; @@ -6859,8 +6869,8 @@ void btrfs_destroy_inode(struct inode *inode) spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { - printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", - inode->i_ino); + printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", + (unsigned long long)btrfs_ino(inode)); list_del_init(&BTRFS_I(inode)->i_orphan); } spin_unlock(&root->orphan_lock); @@ -6999,16 +7009,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, u64 index = 0; u64 root_objectid; int ret; + u64 old_ino = btrfs_ino(old_inode); - if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; /* we only allow rename subvolume link between subvolumes */ - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) return -EXDEV; - if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) + if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || + (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; if (S_ISDIR(old_inode->i_mode) && new_inode && @@ -7024,7 +7035,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, filemap_flush(old_inode->i_mapping); /* close the racy window with snapshot create/destroy ioctl */ - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&root->fs_info->subvol_sem); /* * We want to reserve the absolute worst case amount of items. So if @@ -7049,15 +7060,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (ret) goto out_fail; - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { /* force full log commit if subvolume involved. */ root->fs_info->last_trans_log_full_commit = trans->transid; } else { ret = btrfs_insert_inode_ref(trans, dest, new_dentry->d_name.name, new_dentry->d_name.len, - old_inode->i_ino, - new_dir->i_ino, index); + old_ino, + btrfs_ino(new_dir), index); if (ret) goto out_fail; /* @@ -7073,10 +7084,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * make sure the inode gets flushed if it is replacing * something. */ - if (new_inode && new_inode->i_size && - old_inode && S_ISREG(old_inode->i_mode)) { + if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) btrfs_add_ordered_operation(trans, root, old_inode); - } old_dir->i_ctime = old_dir->i_mtime = ctime; new_dir->i_ctime = new_dir->i_mtime = ctime; @@ -7085,7 +7094,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (old_dentry->d_parent != new_dentry->d_parent) btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); - if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, old_dentry->d_name.name, @@ -7102,7 +7111,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_inode) { new_inode->i_ctime = CURRENT_TIME; - if (unlikely(new_inode->i_ino == + if (unlikely(btrfs_ino(new_inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { root_objectid = BTRFS_I(new_inode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, @@ -7130,7 +7139,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len, 0, index); BUG_ON(ret); - if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { struct dentry *parent = dget_parent(new_dentry); btrfs_log_new_name(trans, old_inode, old_dir, parent); dput(parent); @@ -7139,7 +7148,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, out_fail: btrfs_end_transaction_throttle(trans, root); out_notrans: - if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&root->fs_info->subvol_sem); return ret; @@ -7284,7 +7293,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock; inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, - dentry->d_name.len, dir->i_ino, objectid, + dentry->d_name.len, btrfs_ino(dir), objectid, BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, &index); err = PTR_ERR(inode); @@ -7315,7 +7324,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, path = btrfs_alloc_path(); BUG_ON(!path); - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(name_len); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index e1835f8eec93..01dccb4a70bb 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -416,7 +416,7 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir->i_ino, &key, + name, namelen, btrfs_ino(dir), &key, BTRFS_FT_DIR, index); if (ret) goto fail; @@ -427,7 +427,7 @@ static noinline int create_subvol(struct btrfs_root *root, ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, - dir->i_ino, index, name, namelen); + btrfs_ino(dir), index, name, namelen); BUG_ON(ret); @@ -1123,7 +1123,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file, int ret = 0; u64 flags = 0; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&root->fs_info->subvol_sem); @@ -1150,7 +1150,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; if (copy_from_user(&flags, arg, sizeof(flags))) @@ -1633,7 +1633,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, goto out_dput; } - if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { err = -EINVAL; goto out_dput; } @@ -1919,7 +1919,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } /* clone data */ - key.objectid = src->i_ino; + key.objectid = btrfs_ino(src); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; @@ -1946,7 +1946,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_item_key_to_cpu(leaf, &key, slot); if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || - key.objectid != src->i_ino) + key.objectid != btrfs_ino(src)) break; if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { @@ -1989,7 +1989,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, goto next; memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = inode->i_ino; + new_key.objectid = btrfs_ino(inode); if (off <= key.offset) new_key.offset = key.offset + destoff - off; else @@ -2043,7 +2043,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ret = btrfs_inc_extent_ref(trans, root, disko, diskl, 0, root->root_key.objectid, - inode->i_ino, + btrfs_ino(inode), new_key.offset - datao); BUG_ON(ret); } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e6cb89357256..7b75e0c8ef8d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1410,9 +1410,9 @@ again: prev = node; entry = rb_entry(node, struct btrfs_inode, rb_node); - if (objectid < entry->vfs_inode.i_ino) + if (objectid < btrfs_ino(&entry->vfs_inode)) node = node->rb_left; - else if (objectid > entry->vfs_inode.i_ino) + else if (objectid > btrfs_ino(&entry->vfs_inode)) node = node->rb_right; else break; @@ -1420,7 +1420,7 @@ again: if (!node) { while (prev) { entry = rb_entry(prev, struct btrfs_inode, rb_node); - if (objectid <= entry->vfs_inode.i_ino) { + if (objectid <= btrfs_ino(&entry->vfs_inode)) { node = prev; break; } @@ -1435,7 +1435,7 @@ again: return inode; } - objectid = entry->vfs_inode.i_ino + 1; + objectid = btrfs_ino(&entry->vfs_inode) + 1; if (cond_resched_lock(&root->inode_lock)) goto again; @@ -1471,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, return -ENOMEM; bytenr -= BTRFS_I(reloc_inode)->index_cnt; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, + ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), bytenr, 0); if (ret < 0) goto out; @@ -1559,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (first) { inode = find_next_inode(root, key.objectid); first = 0; - } else if (inode && inode->i_ino < key.objectid) { + } else if (inode && btrfs_ino(inode) < key.objectid) { btrfs_add_delayed_iput(inode); inode = find_next_inode(root, key.objectid); } - if (inode && inode->i_ino == key.objectid) { + if (inode && btrfs_ino(inode) == key.objectid) { end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); WARN_ON(!IS_ALIGNED(key.offset, @@ -1894,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, struct inode *inode = NULL; u64 objectid; u64 start, end; + u64 ino; objectid = min_key->objectid; while (1) { @@ -1906,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root, inode = find_next_inode(root, objectid); if (!inode) break; + ino = btrfs_ino(inode); - if (inode->i_ino > max_key->objectid) { + if (ino > max_key->objectid) { iput(inode); break; } - objectid = inode->i_ino + 1; + objectid = ino + 1; if (!S_ISREG(inode->i_mode)) continue; - if (unlikely(min_key->objectid == inode->i_ino)) { + if (unlikely(min_key->objectid == ino)) { if (min_key->type > BTRFS_EXTENT_DATA_KEY) continue; if (min_key->type < BTRFS_EXTENT_DATA_KEY) @@ -1929,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, start = 0; } - if (unlikely(max_key->objectid == inode->i_ino)) { + if (unlikely(max_key->objectid == ino)) { if (max_key->type < BTRFS_EXTENT_DATA_KEY) continue; if (max_key->type > BTRFS_EXTENT_DATA_KEY) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index aef6c81e7101..f4c1184b7f1a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -972,7 +972,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode->i_ino, &key, + btrfs_ino(parent_inode), &key, BTRFS_FT_DIR, index); BUG_ON(ret); @@ -1014,7 +1014,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, */ ret = btrfs_add_root_ref(trans, tree_root, objectid, parent_root->root_key.objectid, - parent_inode->i_ino, index, + btrfs_ino(parent_inode), index, dentry->d_name.name, dentry->d_name.len); BUG_ON(ret); dput(parent); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c50271ad3157..4323dc68d6cd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -519,7 +519,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * file. This must be done before the btrfs_drop_extents run * so we don't try to drop this extent. */ - ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), start, 0); if (ret == 0 && @@ -832,7 +832,7 @@ again: read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); /* if we already have a perfect match, we're done */ - if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, + if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), btrfs_inode_ref_index(eb, ref), name, namelen)) { goto out; @@ -960,8 +960,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, unsigned long ptr; unsigned long ptr_end; int name_len; + u64 ino = btrfs_ino(inode); - key.objectid = inode->i_ino; + key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; @@ -980,7 +981,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - if (key.objectid != inode->i_ino || + if (key.objectid != ino || key.type != BTRFS_INODE_REF_KEY) break; ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); @@ -1011,10 +1012,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (inode->i_nlink == 0) { if (S_ISDIR(inode->i_mode)) { ret = replay_dir_deletes(trans, root, NULL, path, - inode->i_ino, 1); + ino, 1); BUG_ON(ret); } - ret = insert_orphan_item(trans, root, inode->i_ino); + ret = insert_orphan_item(trans, root, ino); BUG_ON(ret); } btrfs_free_path(path); @@ -2197,6 +2198,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, int ret; int err = 0; int bytes_del = 0; + u64 dir_ino = btrfs_ino(dir); if (BTRFS_I(dir)->logged_trans < trans->transid) return 0; @@ -2212,7 +2214,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_item(trans, log, path, dir_ino, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2224,7 +2226,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, BUG_ON(ret); } btrfs_release_path(log, path); - di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, + di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, index, name, name_len, -1); if (IS_ERR(di)) { err = PTR_ERR(di); @@ -2242,7 +2244,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, if (bytes_del) { struct btrfs_key key; - key.objectid = dir->i_ino; + key.objectid = dir_ino; key.offset = 0; key.type = BTRFS_INODE_ITEM_KEY; btrfs_release_path(log, path); @@ -2300,7 +2302,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, log = root->log_root; mutex_lock(&BTRFS_I(inode)->log_mutex); - ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, + ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), dirid, &index); mutex_unlock(&BTRFS_I(inode)->log_mutex); if (ret == -ENOSPC) { @@ -2366,13 +2368,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, int nritems; u64 first_offset = min_offset; u64 last_offset = (u64)-1; + u64 ino = btrfs_ino(inode); log = root->log_root; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; max_key.offset = (u64)-1; max_key.type = key_type; - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = key_type; min_key.offset = min_offset; @@ -2385,9 +2388,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * we didn't find anything from this transaction, see if there * is anything at all */ - if (ret != 0 || min_key.objectid != inode->i_ino || - min_key.type != key_type) { - min_key.objectid = inode->i_ino; + if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { + min_key.objectid = ino; min_key.type = key_type; min_key.offset = (u64)-1; btrfs_release_path(root, path); @@ -2396,7 +2398,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); return ret; } - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); /* if ret == 0 there are items for this type, * create a range to tell us the last key of this type. @@ -2414,7 +2416,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } /* go backward to find any previous key */ - ret = btrfs_previous_item(root, path, inode->i_ino, key_type); + ret = btrfs_previous_item(root, path, ino, key_type); if (ret == 0) { struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); @@ -2449,8 +2451,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, for (i = path->slots[0]; i < nritems; i++) { btrfs_item_key_to_cpu(src, &min_key, i); - if (min_key.objectid != inode->i_ino || - min_key.type != key_type) + if (min_key.objectid != ino || min_key.type != key_type) goto done; ret = overwrite_item(trans, log, dst_path, src, i, &min_key); @@ -2471,7 +2472,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, goto done; } btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); - if (tmp.objectid != inode->i_ino || tmp.type != key_type) { + if (tmp.objectid != ino || tmp.type != key_type) { last_offset = (u64)-1; goto done; } @@ -2497,8 +2498,7 @@ done: * is valid */ ret = insert_dir_log_key(trans, log, path, key_type, - inode->i_ino, first_offset, - last_offset); + ino, first_offset, last_offset); if (ret) err = ret; } @@ -2742,6 +2742,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, int nritems; int ins_start_slot = 0; int ins_nr; + u64 ino = btrfs_ino(inode); log = root->log_root; @@ -2754,11 +2755,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, return -ENOMEM; } - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = BTRFS_INODE_ITEM_KEY; min_key.offset = 0; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; /* today the code can only do partial logging of directories */ if (!S_ISDIR(inode->i_mode)) @@ -2781,8 +2782,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (inode_only == LOG_INODE_EXISTS) max_key_type = BTRFS_XATTR_ITEM_KEY; - ret = drop_objectid_items(trans, log, path, - inode->i_ino, max_key_type); + ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); } @@ -2800,7 +2800,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, break; again: /* note, ins_nr might be > 0 here, cleanup outside the loop */ - if (min_key.objectid != inode->i_ino) + if (min_key.objectid != ino) break; if (min_key.type > max_key.type) break; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 07b9bc350d5d..a8af771fc60c 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name, return -ENOMEM; /* lookup the xattr by name */ - di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, strlen(name), 0); if (!di) { ret = -ENODATA; @@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, return -ENOMEM; /* first lets see if we already have this xattr */ - di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, + di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, strlen(name), -1); if (IS_ERR(di)) { ret = PTR_ERR(di); @@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, } /* ok we have to create a completely new xattr */ - ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, + ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), name, name_len, value, size); BUG_ON(ret); out: @@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) * NOTE: we set key.offset = 0; because we want to start with the * first xattr that we find and walk forward */ - key.objectid = inode->i_ino; + key.objectid = btrfs_ino(inode); btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); key.offset = 0; -- cgit v1.2.3 From 82d5902d9c681be37ffa9d70482907f9f0b7ec1f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:33:24 +0800 Subject: Btrfs: Support reading/writing on disk free ino cache This is similar to block group caching. We dedicate a special inode in fs tree to save free ino cache. At the very first time we create/delete a file after mount, the free ino cache will be loaded from disk into memory. When the fs tree is commited, the cache will be written back to disk. To keep compatibility, we check the root generation against the generation of the special inode when loading the cache, so the loading will fail if the btrfs filesystem was mounted in an older kernel before. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 7 ++++ fs/btrfs/disk-io.c | 1 + fs/btrfs/extent-tree.c | 3 +- fs/btrfs/free-space-cache.c | 97 ++++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/free-space-cache.h | 11 +++++ fs/btrfs/inode-map.c | 87 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/inode-map.h | 2 + fs/btrfs/inode.c | 45 +++++++++++++-------- fs/btrfs/transaction.c | 2 + 9 files changed, 236 insertions(+), 19 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index c96a4e4c5566..b20082e27a9f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -105,6 +105,12 @@ struct btrfs_ordered_sum; /* For storing free space cache */ #define BTRFS_FREE_SPACE_OBJECTID -11ULL +/* + * The inode number assigned to the special inode for sotring + * free ino cache + */ +#define BTRFS_FREE_INO_OBJECTID -12ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -1110,6 +1116,7 @@ struct btrfs_root { wait_queue_head_t cache_wait; struct btrfs_free_space_ctl *free_ino_pinned; u64 cache_progress; + struct inode *cache_inode; struct mutex log_mutex; wait_queue_head_t log_writer_wait; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d02683b1ee16..4f12c30a5470 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2505,6 +2505,7 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) static void free_fs_root(struct btrfs_root *root) { + iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_super.s_dev) { down_write(&root->anon_super.s_umount); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a0e818cb0401..95ce8da63b28 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3145,7 +3145,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - if (root == root->fs_info->tree_root) { + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) { alloc_chunk = 0; committed = 1; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index fcbdcef6ca28..7d8b6b643403 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -209,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, return ret; } - return btrfs_update_inode(trans, root, inode); + ret = btrfs_update_inode(trans, root, inode); + return ret; } static int readahead_cache(struct inode *inode) @@ -525,6 +526,7 @@ out: spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_CLEAR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to load free space cache " "for block group %llu\n", block_group->key.objectid); @@ -893,6 +895,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to write free space cace " "for block group %llu\n", block_group->key.objectid); @@ -2458,3 +2461,95 @@ out: return ino; } + +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&root->cache_lock); + if (root->cache_inode) + inode = igrab(root->cache_inode); + spin_unlock(&root->cache_lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, 0); + if (IS_ERR(inode)) + return inode; + + spin_lock(&root->cache_lock); + if (!root->fs_info->closing) + root->cache_inode = igrab(inode); + spin_unlock(&root->cache_lock); + + return inode; +} + +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + return __create_free_space_inode(root, trans, path, + BTRFS_FREE_INO_OBJECTID, 0); +} + +int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + int ret = 0; + u64 root_gen = btrfs_root_generation(&root->root_item); + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + goto out; + + if (root_gen != BTRFS_I(inode)->generation) + goto out_put; + + ret = __load_free_space_cache(root, inode, ctl, path, 0); + + if (ret < 0) + printk(KERN_ERR "btrfs: failed to load free ino cache for " + "root %llu\n", root->root_key.objectid); +out_put: + iput(inode); +out: + btrfs_free_path(path); + return ret; +} + +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct inode *inode; + int ret; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); + if (ret < 0) + printk(KERN_ERR "btrfs: failed to write free ino cache " + "for root %llu\n", root->root_key.objectid); + + iput(inode); + return ret; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index af06e6b6ceaa..8f2613f779ed 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -65,6 +65,17 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path); +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); +int load_free_ino_cache(struct btrfs_fs_info *fs_info, + struct btrfs_root *root); +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); + void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, u64 bytenr, u64 size); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 5be62df90c4f..7967e85c72f5 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -137,6 +137,7 @@ out: static void start_caching(struct btrfs_root *root) { struct task_struct *tsk; + int ret; spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { @@ -147,6 +148,14 @@ static void start_caching(struct btrfs_root *root) root->cached = BTRFS_CACHE_STARTED; spin_unlock(&root->cache_lock); + ret = load_free_ino_cache(root->fs_info, root); + if (ret == 1) { + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + return; + } + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", root->root_key.objectid); BUG_ON(IS_ERR(tsk)); @@ -352,6 +361,84 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root) pinned->op = &pinned_free_ino_op; } +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + u64 alloc_hint = 0; + int ret; + int prealloc; + bool retry = false; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; +again: + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { + ret = PTR_ERR(inode); + goto out; + } + + if (IS_ERR(inode)) { + BUG_ON(retry); + retry = true; + + ret = create_free_ino_inode(root, trans, path); + if (ret) + goto out; + goto again; + } + + BTRFS_I(inode)->generation = 0; + ret = btrfs_update_inode(trans, root, inode); + WARN_ON(ret); + + if (i_size_read(inode) > 0) { + ret = btrfs_truncate_free_space_cache(root, trans, path, inode); + if (ret) + goto out_put; + } + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_FINISHED) { + ret = -1; + spin_unlock(&root->cache_lock); + goto out_put; + } + spin_unlock(&root->cache_lock); + + spin_lock(&ctl->tree_lock); + prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; + prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); + prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; + spin_unlock(&ctl->tree_lock); + + /* Just to make sure we have enough space */ + prealloc += 8 * PAGE_CACHE_SIZE; + + ret = btrfs_check_data_free_space(inode, prealloc); + if (ret) + goto out_put; + + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, + prealloc, prealloc, &alloc_hint); + if (ret) + goto out_put; + btrfs_free_reserved_data_space(inode, prealloc); + +out_put: + iput(inode); +out: + if (ret == 0) + ret = btrfs_write_out_ino_cache(root, trans, path); + + btrfs_free_path(path); + return ret; +} + static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h index eb918451b492..ddb347bfee23 100644 --- a/fs/btrfs/inode-map.h +++ b/fs/btrfs/inode-map.h @@ -5,6 +5,8 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root); void btrfs_unpin_free_ino(struct btrfs_root *root); void btrfs_return_ino(struct btrfs_root *root, u64 objectid); int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans); int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index adec22884a3e..b78d3ab789ca 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -745,6 +745,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, return alloc_hint; } +static inline bool is_free_space_inode(struct btrfs_root *root, + struct inode *inode) +{ + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + return true; + return false; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to @@ -777,7 +786,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; - BUG_ON(root == root->fs_info->tree_root); + BUG_ON(is_free_space_inode(root, inode)); trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -1048,17 +1057,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, int type; int nocow; int check_prev = 1; - bool nolock = false; + bool nolock; u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); - if (root == root->fs_info->tree_root) { - nolock = true; + + nolock = is_free_space_inode(root, inode); + + if (nolock) trans = btrfs_join_transaction_nolock(root, 1); - } else { + else trans = btrfs_join_transaction(root, 1); - } BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; @@ -1316,8 +1326,7 @@ static int btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1350,8 +1359,7 @@ static int btrfs_clear_bit_hook(struct inode *inode, if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1458,7 +1466,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (root == root->fs_info->tree_root) + if (is_free_space_inode(root, inode)) ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); else ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); @@ -1701,7 +1709,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_state *cached_state = NULL; int compress_type = 0; int ret; - bool nolock = false; + bool nolock; ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1); @@ -1709,7 +1717,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) return 0; BUG_ON(!ordered_extent); - nolock = (root == root->fs_info->tree_root); + nolock = is_free_space_inode(root, inode); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); @@ -3473,7 +3481,9 @@ delete: if (path->slots[0] == 0 || path->slots[0] != pending_del_slot) { - if (root->ref_cows) { + if (root->ref_cows && + BTRFS_I(inode)->location.objectid != + BTRFS_FREE_INO_OBJECTID) { err = -EAGAIN; goto out; } @@ -3765,7 +3775,7 @@ void btrfs_evict_inode(struct inode *inode) truncate_inode_pages(&inode->i_data, 0); if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || - root == root->fs_info->tree_root)) + is_free_space_inode(root, inode))) goto no_delete; if (is_bad_inode(inode)) { @@ -4382,7 +4392,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; smp_mb(); - nolock = (root->fs_info->closing && root == root->fs_info->tree_root); + if (root->fs_info->closing && is_free_space_inode(root, inode)) + nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) @@ -6900,7 +6911,7 @@ int btrfs_drop_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; if (btrfs_root_refs(&root->root_item) == 0 && - root != root->fs_info->tree_root) + !is_free_space_inode(root, inode)) return 1; else return generic_drop_inode(inode); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f4c1184b7f1a..4d1dbcbbaf41 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -761,6 +761,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_update_reloc_root(trans, root); btrfs_orphan_commit_root(trans, root); + btrfs_save_ino_cache(root, trans); + if (root->commit_root != root->node) { mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); -- cgit v1.2.3 From 306e16ce13c0f3d4fc071b45803b5b83c2606011 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 14:29:38 +0200 Subject: btrfs: rename variables clashing with global function names reported by gcc -Wshadow: page_index, page_offset, new_inode, dev_name Signed-off-by: David Sterba --- fs/btrfs/compression.c | 42 +++++++++++++++++++++--------------------- fs/btrfs/compression.h | 2 +- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent_io.c | 28 ++++++++++++++-------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 24 ++++++++++++------------ fs/btrfs/super.c | 4 ++-- 8 files changed, 53 insertions(+), 53 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 41d1d7c70e29..d4cd0f0cd695 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -332,7 +332,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, struct compressed_bio *cb; unsigned long bytes_left; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - int page_index = 0; + int pg_index = 0; struct page *page; u64 first_byte = disk_start; struct block_device *bdev; @@ -366,8 +366,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, /* create and submit bios for the compressed pages */ bytes_left = compressed_len; - for (page_index = 0; page_index < cb->nr_pages; page_index++) { - page = compressed_pages[page_index]; + for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { + page = compressed_pages[pg_index]; page->mapping = inode->i_mapping; if (bio->bi_size) ret = io_tree->ops->merge_bio_hook(page, 0, @@ -432,7 +432,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, struct compressed_bio *cb) { unsigned long end_index; - unsigned long page_index; + unsigned long pg_index; u64 last_offset; u64 isize = i_size_read(inode); int ret; @@ -456,13 +456,13 @@ static noinline int add_ra_bio_pages(struct inode *inode, end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; while (last_offset < compressed_end) { - page_index = last_offset >> PAGE_CACHE_SHIFT; + pg_index = last_offset >> PAGE_CACHE_SHIFT; - if (page_index > end_index) + if (pg_index > end_index) break; rcu_read_lock(); - page = radix_tree_lookup(&mapping->page_tree, page_index); + page = radix_tree_lookup(&mapping->page_tree, pg_index); rcu_read_unlock(); if (page) { misses++; @@ -476,7 +476,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, if (!page) break; - if (add_to_page_cache_lru(page, mapping, page_index, + if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { page_cache_release(page); goto next; @@ -560,7 +560,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; unsigned long compressed_len; unsigned long nr_pages; - unsigned long page_index; + unsigned long pg_index; struct page *page; struct block_device *bdev; struct bio *comp_bio; @@ -613,10 +613,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; - for (page_index = 0; page_index < nr_pages; page_index++) { - cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | + for (pg_index = 0; pg_index < nr_pages; pg_index++) { + cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!cb->compressed_pages[page_index]) + if (!cb->compressed_pages[pg_index]) goto fail2; } cb->nr_pages = nr_pages; @@ -634,8 +634,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); - for (page_index = 0; page_index < nr_pages; page_index++) { - page = cb->compressed_pages[page_index]; + for (pg_index = 0; pg_index < nr_pages; pg_index++) { + page = cb->compressed_pages[pg_index]; page->mapping = inode->i_mapping; page->index = em_start >> PAGE_CACHE_SHIFT; @@ -702,8 +702,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, return 0; fail2: - for (page_index = 0; page_index < nr_pages; page_index++) - free_page((unsigned long)cb->compressed_pages[page_index]); + for (pg_index = 0; pg_index < nr_pages; pg_index++) + free_page((unsigned long)cb->compressed_pages[pg_index]); kfree(cb->compressed_pages); fail1: @@ -945,7 +945,7 @@ void btrfs_exit_compress(void) int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio_vec *bvec, int vcnt, - unsigned long *page_index, + unsigned long *pg_index, unsigned long *pg_offset) { unsigned long buf_offset; @@ -954,7 +954,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long working_bytes = total_out - buf_start; unsigned long bytes; char *kaddr; - struct page *page_out = bvec[*page_index].bv_page; + struct page *page_out = bvec[*pg_index].bv_page; /* * start byte is the first byte of the page we're currently @@ -995,11 +995,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, /* check if we need to pick another page */ if (*pg_offset == PAGE_CACHE_SIZE) { - (*page_index)++; - if (*page_index >= vcnt) + (*pg_index)++; + if (*pg_index >= vcnt) return 0; - page_out = bvec[*page_index].bv_page; + page_out = bvec[*pg_index].bv_page; *pg_offset = 0; start_byte = page_offset(page_out) - disk_start; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 51000174b9d7..a12059f4f0fd 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio_vec *bvec, int vcnt, - unsigned long *page_index, + unsigned long *pg_index, unsigned long *pg_offset); int btrfs_submit_compressed_write(struct inode *inode, u64 start, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..b5433bbe7516 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2534,7 +2534,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, int btrfs_commit_write(struct file *file, struct page *page, unsigned from, unsigned to); struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t page_offset, u64 start, u64 end, + size_t pg_offset, u64 start, u64 end, int create); int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..990afa8656a2 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -137,7 +137,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = { * that covers the entire device */ static struct extent_map *btree_get_extent(struct inode *inode, - struct page *page, size_t page_offset, u64 start, u64 len, + struct page *page, size_t pg_offset, u64 start, u64 len, int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..b730c12fa958 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2007,7 +2007,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct btrfs_ordered_extent *ordered; int ret; int nr = 0; - size_t page_offset = 0; + size_t pg_offset = 0; size_t iosize; size_t disk_io_size; size_t blocksize = inode->i_sb->s_blocksize; @@ -2043,9 +2043,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree, char *userpage; struct extent_state *cached = NULL; - iosize = PAGE_CACHE_SIZE - page_offset; + iosize = PAGE_CACHE_SIZE - pg_offset; userpage = kmap_atomic(page, KM_USER0); - memset(userpage + page_offset, 0, iosize); + memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, @@ -2054,7 +2054,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, &cached, GFP_NOFS); break; } - em = get_extent(inode, page, page_offset, cur, + em = get_extent(inode, page, pg_offset, cur, end - cur + 1, 0); if (IS_ERR(em) || !em) { SetPageError(page); @@ -2094,7 +2094,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct extent_state *cached = NULL; userpage = kmap_atomic(page, KM_USER0); - memset(userpage + page_offset, 0, iosize); + memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); @@ -2103,7 +2103,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unlock_extent_cached(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } /* the get_extent function already copied into the page */ @@ -2112,7 +2112,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, check_page_uptodate(tree, page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } /* we have an inline extent but it didn't get marked up @@ -2122,7 +2122,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, SetPageError(page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } @@ -2135,7 +2135,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; pnr -= page->index; ret = submit_extent_page(READ, tree, page, - sector, disk_io_size, page_offset, + sector, disk_io_size, pg_offset, bdev, bio, pnr, end_bio_extent_readpage, mirror_num, *bio_flags, @@ -2146,7 +2146,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, if (ret) SetPageError(page); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; } if (!nr) { if (!PageError(page)) @@ -2751,7 +2751,7 @@ int extent_prepare_write(struct extent_io_tree *tree, u64 cur_end; struct extent_map *em; unsigned blocksize = 1 << inode->i_blkbits; - size_t page_offset = 0; + size_t pg_offset = 0; size_t block_off_start; size_t block_off_end; int err = 0; @@ -2767,7 +2767,7 @@ int extent_prepare_write(struct extent_io_tree *tree, lock_extent(tree, page_start, page_end, GFP_NOFS); while (block_start <= block_end) { - em = get_extent(inode, page, page_offset, block_start, + em = get_extent(inode, page, pg_offset, block_start, block_end - block_start + 1, 1); if (IS_ERR(em) || !em) goto err; @@ -2811,7 +2811,7 @@ int extent_prepare_write(struct extent_io_tree *tree, block_start + iosize - 1, EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS); ret = submit_extent_page(READ, tree, page, - sector, iosize, page_offset, em->bdev, + sector, iosize, pg_offset, em->bdev, NULL, 1, end_bio_extent_preparewrite, 0, 0, 0); @@ -2828,7 +2828,7 @@ int extent_prepare_write(struct extent_io_tree *tree, &cached, GFP_NOFS); block_start = cur_end + 1; } - page_offset = block_start & (PAGE_CACHE_SIZE - 1); + pg_offset = block_start & (PAGE_CACHE_SIZE - 1); free_extent_map(em); } if (iocount) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index af2d7179c372..b9ce2f720742 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -164,7 +164,7 @@ static inline struct extent_state *extent_state_next(struct extent_state *state) typedef struct extent_map *(get_extent_t)(struct inode *inode, struct page *page, - size_t page_offset, + size_t pg_offset, u64 start, u64 len, int create); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..fc966472e3ad 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6985,7 +6985,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; struct btrfs_root *dest = BTRFS_I(new_dir)->root; - struct inode *new_inode = new_dentry->d_inode; + struct inode *newinode = new_dentry->d_inode; struct inode *old_inode = old_dentry->d_inode; struct timespec ctime = CURRENT_TIME; u64 index = 0; @@ -7000,18 +7000,18 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, return -EXDEV; if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) + (newinode && newinode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; - if (S_ISDIR(old_inode->i_mode) && new_inode && - new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) + if (S_ISDIR(old_inode->i_mode) && newinode && + newinode->i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; /* * we're using rename to replace one file with another. * and the replacement file is large. Start IO on it now so * we don't add too much work to the end of the transaction */ - if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && + if (newinode && S_ISREG(old_inode->i_mode) && newinode->i_size && old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) filemap_flush(old_inode->i_mapping); @@ -7065,7 +7065,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * make sure the inode gets flushed if it is replacing * something. */ - if (new_inode && new_inode->i_size && + if (newinode && newinode->i_size && old_inode && S_ISREG(old_inode->i_mode)) { btrfs_add_ordered_operation(trans, root, old_inode); } @@ -7092,16 +7092,16 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, } BUG_ON(ret); - if (new_inode) { - new_inode->i_ctime = CURRENT_TIME; - if (unlikely(new_inode->i_ino == + if (newinode) { + newinode->i_ctime = CURRENT_TIME; + if (unlikely(newinode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - root_objectid = BTRFS_I(new_inode)->location.objectid; + root_objectid = BTRFS_I(newinode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, root_objectid, new_dentry->d_name.name, new_dentry->d_name.len); - BUG_ON(new_inode->i_nlink == 0); + BUG_ON(newinode->i_nlink == 0); } else { ret = btrfs_unlink_inode(trans, dest, new_dir, new_dentry->d_inode, @@ -7109,7 +7109,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len); } BUG_ON(ret); - if (new_inode->i_nlink == 0) { + if (newinode->i_nlink == 0) { ret = btrfs_orphan_add(trans, new_dentry->d_inode); BUG_ON(ret); } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..3e28521643fb 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -739,7 +739,7 @@ static int btrfs_set_super(struct super_block *s, void *data) * for multiple device setup. Make sure to keep it in sync. */ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) + const char *device_name, void *data) { struct block_device *bdev = NULL; struct super_block *s; @@ -762,7 +762,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, if (error) return ERR_PTR(error); - error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices); + error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); if (error) goto error_free_subvol_name; -- cgit v1.2.3 From edc95aec57661c8e568e18f6c3f002aefa07ebc8 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 14:31:08 +0200 Subject: btrfs: remove nested duplicate variable declarations Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 1 - fs/btrfs/free-space-cache.c | 3 --- 2 files changed, 4 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..c60197b36bc8 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3647,7 +3647,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans, ret = 0; if (slot == 0) { - struct btrfs_disk_key disk_key; btrfs_cpu_key_to_disk(&disk_key, cpu_key); ret = fixup_low_keys(trans, root, path, &disk_key, 1); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 63731a1fb0a1..9e69c6b8409c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, while(1) { if (entry->bytes < bytes || (!entry->bitmap && entry->offset < min_start)) { - struct rb_node *node; - node = rb_next(&entry->offset_index); if (!node) break; @@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, cluster, entry, bytes, min_start); if (ret == 0) { - struct rb_node *node; node = rb_next(&entry->offset_index); if (!node) break; -- cgit v1.2.3 From 4891aca2dac612a2f21a3278d9906ade13b55788 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 16:45:00 +0200 Subject: btrfs: fix dereference before check The superblock's ->s_fs_info is properly set in btrfs_fill_super, after a call to open_ctree, which derefereces it before check. Although tree_root is set via btrfs_set_super, let's be defensive and leave the check in place. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 990afa8656a2..25e4b8f1d0ef 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1611,7 +1611,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *tree_root = btrfs_sb(sb); - struct btrfs_fs_info *fs_info = tree_root->fs_info; + struct btrfs_fs_info *fs_info = NULL; struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), @@ -1623,11 +1623,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_super_block *disk_super; - if (!extent_root || !tree_root || !fs_info || + if (!extent_root || !tree_root || !tree_root->fs_info || !chunk_root || !dev_root || !csum_root) { err = -ENOMEM; goto fail; } + fs_info = tree_root->fs_info; ret = init_srcu_struct(&fs_info->subvol_srcu); if (ret) { -- cgit v1.2.3 From c704005d886cf0bc9bc3974eb009b22fe0da32c7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 18:00:01 +0200 Subject: btrfs: unify checking of IS_ERR and null use IS_ERR_OR_NULL when possible, done by this coccinelle script: @ match @ identifier id; @@ ( - BUG_ON(IS_ERR(id) || !id); + BUG_ON(IS_ERR_OR_NULL(id)); | - IS_ERR(id) || !id + IS_ERR_OR_NULL(id) | - !id || IS_ERR(id) + IS_ERR_OR_NULL(id) ) Signed-off-by: David Sterba --- fs/btrfs/acl.c | 2 +- fs/btrfs/extent_io.c | 12 ++++++------ fs/btrfs/file.c | 2 +- fs/btrfs/inode.c | 12 ++++++------ fs/btrfs/relocation.c | 2 +- fs/btrfs/tree-log.c | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 5d505aaa72fb..1a21c99a91b8 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -287,7 +287,7 @@ int btrfs_acl_chmod(struct inode *inode) return 0; acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); - if (IS_ERR(acl) || !acl) + if (IS_ERR_OR_NULL(acl)) return PTR_ERR(acl); clone = posix_acl_clone(acl, GFP_KERNEL); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b730c12fa958..3c92712e9763 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2056,7 +2056,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, } em = get_extent(inode, page, pg_offset, cur, end - cur + 1, 0); - if (IS_ERR(em) || !em) { + if (IS_ERR_OR_NULL(em)) { SetPageError(page); unlock_extent(tree, cur, end, GFP_NOFS); break; @@ -2341,7 +2341,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, } em = epd->get_extent(inode, page, pg_offset, cur, end - cur + 1, 1); - if (IS_ERR(em) || !em) { + if (IS_ERR_OR_NULL(em)) { SetPageError(page); break; } @@ -2769,7 +2769,7 @@ int extent_prepare_write(struct extent_io_tree *tree, while (block_start <= block_end) { em = get_extent(inode, page, pg_offset, block_start, block_end - block_start + 1, 1); - if (IS_ERR(em) || !em) + if (IS_ERR_OR_NULL(em)) goto err; cur_end = min(block_end, extent_map_end(em) - 1); @@ -2899,7 +2899,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, len = end - start + 1; write_lock(&map->lock); em = lookup_extent_mapping(map, start, len); - if (!em || IS_ERR(em)) { + if (IS_ERR_OR_NULL(em)) { write_unlock(&map->lock); break; } @@ -2942,7 +2942,7 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, em = get_extent(inode, NULL, 0, start, blksize, 0); unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, &cached_state, GFP_NOFS); - if (!em || IS_ERR(em)) + if (IS_ERR_OR_NULL(em)) return 0; if (em->block_start > EXTENT_MAP_LAST_BYTE) @@ -2976,7 +2976,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode, break; len = (len + sectorsize - 1) & ~(sectorsize - 1); em = get_extent(inode, NULL, 0, offset, len, 0); - if (!em || IS_ERR(em)) + if (IS_ERR_OR_NULL(em)) return em; /* if this isn't a hole return it */ diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 75899a01dded..83abd274370b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1375,7 +1375,7 @@ static long btrfs_fallocate(struct file *file, int mode, while (1) { em = btrfs_get_extent(inode, NULL, 0, cur_offset, alloc_end - cur_offset, 0); - BUG_ON(IS_ERR(em) || !em); + BUG_ON(IS_ERR_OR_NULL(em)); last_byte = min(extent_map_end(em), alloc_end); last_byte = (last_byte + mask) & ~mask; if (em->block_start == EXTENT_MAP_HOLE || diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fc966472e3ad..ba760c3ced28 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1855,7 +1855,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, } read_unlock(&em_tree->lock); - if (!em || IS_ERR(em)) { + if (IS_ERR_OR_NULL(em)) { kfree(failrec); return -EIO; } @@ -3006,7 +3006,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); + BUG_ON(IS_ERR_OR_NULL(di)); leaf = path->nodes[0]; btrfs_dir_item_key_to_cpu(leaf, di, &key); @@ -3022,7 +3022,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, BUG_ON(ret != -ENOENT); di = btrfs_search_dir_index_item(root, path, dir->i_ino, name, name_len); - BUG_ON(!di || IS_ERR(di)); + BUG_ON(IS_ERR_OR_NULL(di)); leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); @@ -3032,7 +3032,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); + BUG_ON(IS_ERR_OR_NULL(di)); leaf = path->nodes[0]; btrfs_dir_item_key_to_cpu(leaf, di, &key); @@ -3635,7 +3635,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) while (1) { em = btrfs_get_extent(inode, NULL, 0, cur_offset, block_end - cur_offset, 0); - BUG_ON(IS_ERR(em) || !em); + BUG_ON(IS_ERR_OR_NULL(em)); last_byte = min(extent_map_end(em), block_end); last_byte = (last_byte + mask) & ~mask; if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { @@ -3841,7 +3841,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, if (IS_ERR(di)) ret = PTR_ERR(di); - if (!di || IS_ERR(di)) + if (IS_ERR_OR_NULL(di)) goto out_err; btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 199a80134312..fed0aaec0753 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3220,7 +3220,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, key.offset = 0; inode = btrfs_iget(fs_info->sb, &key, root, NULL); - if (!inode || IS_ERR(inode) || is_bad_inode(inode)) { + if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) { if (inode && !IS_ERR(inode)) iput(inode); return -ENOENT; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..d5313d63f967 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1205,7 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, } else { BUG(); } - if (!dst_di || IS_ERR(dst_di)) { + if (IS_ERR_OR_NULL(dst_di)) { /* we need a sequence number to insert, so we only * do inserts for the BTRFS_DIR_INDEX_KEY types */ @@ -1426,7 +1426,7 @@ again: dir_key->offset, name, name_len, 0); } - if (!log_di || IS_ERR(log_di)) { + if (IS_ERR_OR_NULL(log_di)) { btrfs_dir_item_key_to_cpu(eb, di, &location); btrfs_release_path(root, path); btrfs_release_path(log, log_path); -- cgit v1.2.3 From 62a45b60923a576170a1a0c309c240d9f40d193d Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Apr 2011 15:52:26 +0200 Subject: btrfs: make functions static when possible Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 4 ++-- fs/btrfs/extent-tree.c | 6 +++--- fs/btrfs/free-space-cache.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c60197b36bc8..a36c87db4dc4 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -74,8 +74,8 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p) * retake all the spinlocks in the path. You can safely use NULL * for held */ -noinline void btrfs_clear_path_blocking(struct btrfs_path *p, - struct extent_buffer *held) +static noinline void btrfs_clear_path_blocking(struct btrfs_path *p, + struct extent_buffer *held) { int i; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index cd52f7f556ef..7cdce82e03e7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -94,7 +94,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) return (cache->flags & bits) == bits; } -void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) { atomic_inc(&cache->count); } @@ -3651,8 +3651,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, spin_unlock(&block_rsv->lock); } -void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, - struct btrfs_block_rsv *dest, u64 num_bytes) +static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, + struct btrfs_block_rsv *dest, u64 num_bytes) { struct btrfs_space_info *space_info = block_rsv->space_info; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 9e69c6b8409c..d06abe20a729 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1455,8 +1455,8 @@ out: return ret; } -bool try_merge_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info, bool update_stat) +static bool try_merge_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; struct btrfs_free_space *right_info; -- cgit v1.2.3 From f993c883ad8e111fb9e9ae603540acbe94f7246c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Apr 2011 23:35:57 +0200 Subject: btrfs: drop unused argument from extent_io_tree_init all callers pass GFP_NOFS, but the GFP mask argument is not used in the function; GFP_ATOMIC is passed to radix tree initialization and it's the only correct one, since we're using the preload/insert mechanism of radix tree. Let's drop the gfp mask from btrfs function, this will not change behaviour. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 9 ++++----- fs/btrfs/extent_io.c | 2 +- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 4 ++-- fs/btrfs/relocation.c | 2 +- fs/btrfs/transaction.c | 3 +-- 6 files changed, 10 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 25e4b8f1d0ef..3ce80f71e98a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1080,7 +1080,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->log_transid = 0; root->last_log_commit = 0; extent_io_tree_init(&root->dirty_log_pages, - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); @@ -1712,8 +1712,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, - fs_info->btree_inode->i_mapping, - GFP_NOFS); + fs_info->btree_inode->i_mapping); extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, GFP_NOFS); @@ -1729,9 +1728,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->block_group_cache_tree = RB_ROOT; extent_io_tree_init(&fs_info->freed_extents[0], - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); extent_io_tree_init(&fs_info->freed_extents[1], - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); fs_info->pinned_extents = &fs_info->freed_extents[0]; fs_info->do_barriers = 1; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3c92712e9763..e67ed76668e0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -101,7 +101,7 @@ void extent_io_exit(void) } void extent_io_tree_init(struct extent_io_tree *tree, - struct address_space *mapping, gfp_t mask) + struct address_space *mapping) { tree->state = RB_ROOT; INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index b9ce2f720742..e9cfe8d1661c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -169,7 +169,7 @@ typedef struct extent_map *(get_extent_t)(struct inode *inode, int create); void extent_io_tree_init(struct extent_io_tree *tree, - struct address_space *mapping, gfp_t mask); + struct address_space *mapping); int try_release_extent_mapping(struct extent_map_tree *map, struct extent_io_tree *tree, struct page *page, gfp_t mask); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ba760c3ced28..3c98164f8b24 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6786,8 +6786,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); - extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); - extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS); + extent_io_tree_init(&ei->io_tree, &inode->i_data); + extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); mutex_init(&ei->log_mutex); btrfs_ordered_inode_tree_init(&ei->ordered_tree); INIT_LIST_HEAD(&ei->i_orphan); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fed0aaec0753..f3edf45317bc 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3935,7 +3935,7 @@ static struct reloc_control *alloc_reloc_control(void) INIT_LIST_HEAD(&rc->reloc_roots); backref_cache_init(&rc->backref_cache); mapping_tree_init(&rc->reloc_root_tree); - extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); + extent_io_tree_init(&rc->processed_blocks, NULL); return rc; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..955f76eb0fa8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -80,8 +80,7 @@ static noinline int join_transaction(struct btrfs_root *root) INIT_LIST_HEAD(&cur_trans->pending_snapshots); list_add_tail(&cur_trans->list, &root->fs_info->trans_list); extent_io_tree_init(&cur_trans->dirty_pages, - root->fs_info->btree_inode->i_mapping, - GFP_NOFS); + root->fs_info->btree_inode->i_mapping); spin_lock(&root->fs_info->new_trans_lock); root->fs_info->running_transaction = cur_trans; spin_unlock(&root->fs_info->new_trans_lock); -- cgit v1.2.3 From a8067e022ab54fde8953880a64572c3acca644dc Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 00:34:43 +0200 Subject: btrfs: drop unused parameter from extent_map_tree_init the GFP flags are not stored anywhere and all allocations are done via alloc_extent_map(GFP_NOFS). Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 3 +-- fs/btrfs/extent_map.c | 3 +-- fs/btrfs/extent_map.h | 2 +- fs/btrfs/inode.c | 2 +- fs/btrfs/volumes.c | 2 +- 5 files changed, 5 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3ce80f71e98a..f2ee584b8efd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1713,8 +1713,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, fs_info->btree_inode->i_mapping); - extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, - GFP_NOFS); + extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index a24a3f2fa13e..3c8f374a8e2d 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -28,12 +28,11 @@ void extent_map_exit(void) /** * extent_map_tree_init - initialize extent map tree * @tree: tree to initialize - * @mask: flags for memory allocations during tree operations * * Initialize the extent tree @tree. Should be called for each new inode * or other user of the extent_map interface. */ -void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) +void extent_map_tree_init(struct extent_map_tree *tree) { tree->map = RB_ROOT; rwlock_init(&tree->lock); diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 28b44dbd1e35..255813c51b9d 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -49,7 +49,7 @@ static inline u64 extent_map_block_end(struct extent_map *em) return em->block_start + em->block_len; } -void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask); +void extent_map_tree_init(struct extent_map_tree *tree); struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len); int add_extent_mapping(struct extent_map_tree *tree, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3c98164f8b24..f54c015cc294 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6785,7 +6785,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->force_compress = BTRFS_COMPRESS_NONE; inode = &ei->vfs_inode; - extent_map_tree_init(&ei->extent_tree, GFP_NOFS); + extent_map_tree_init(&ei->extent_tree); extent_io_tree_init(&ei->io_tree, &inode->i_data); extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); mutex_init(&ei->log_mutex); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c7367ae5a3e6..15d7dc943c9b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2849,7 +2849,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) void btrfs_mapping_init(struct btrfs_mapping_tree *tree) { - extent_map_tree_init(&tree->map_tree, GFP_NOFS); + extent_map_tree_init(&tree->map_tree); } void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) -- cgit v1.2.3 From 172ddd60a662c4d8bf2809462866ddddd6431ea5 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 00:48:27 +0200 Subject: btrfs: drop gfp parameter from alloc_extent_map pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/extent_map.c | 5 ++--- fs/btrfs/extent_map.h | 2 +- fs/btrfs/file.c | 4 ++-- fs/btrfs/inode.c | 12 ++++++------ fs/btrfs/relocation.c | 2 +- fs/btrfs/volumes.c | 4 ++-- 8 files changed, 16 insertions(+), 17 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f2ee584b8efd..e1e55679d061 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -154,7 +154,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, } read_unlock(&em_tree->lock); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7cdce82e03e7..6a3d53783d55 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6694,7 +6694,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, u64 start = extent_key->objectid - offset; u64 end = start + extent_key->offset - 1; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = start; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 3c8f374a8e2d..2d0410344ea3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -40,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree) /** * alloc_extent_map - allocate new extent map structure - * @mask: memory allocation flags * * Allocate a new extent_map structure. The new structure is * returned with a reference count of one and needs to be * freed using free_extent_map() */ -struct extent_map *alloc_extent_map(gfp_t mask) +struct extent_map *alloc_extent_map(void) { struct extent_map *em; - em = kmem_cache_alloc(extent_map_cache, mask); + em = kmem_cache_alloc(extent_map_cache, GFP_NOFS); if (!em) return NULL; em->in_tree = 0; diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 255813c51b9d..33a7890b1f40 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -56,7 +56,7 @@ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); -struct extent_map *alloc_extent_map(gfp_t mask); +struct extent_map *alloc_extent_map(void); void free_extent_map(struct extent_map *em); int __init extent_map_init(void); void extent_map_exit(void); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 83abd274370b..80eabe85409a 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -191,9 +191,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, } while (1) { if (!split) - split = alloc_extent_map(GFP_NOFS); + split = alloc_extent_map(); if (!split2) - split2 = alloc_extent_map(GFP_NOFS); + split2 = alloc_extent_map(); BUG_ON(!split || !split2); write_lock(&em_tree->lock); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f54c015cc294..26f4d56cf049 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -649,7 +649,7 @@ retry: async_extent->start + async_extent->ram_size - 1, 0); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = async_extent->start; em->len = async_extent->ram_size; @@ -826,7 +826,7 @@ static noinline int cow_file_range(struct inode *inode, (u64)-1, &ins, 1); BUG_ON(ret); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = start; em->orig_start = em->start; @@ -1177,7 +1177,7 @@ out_check: struct extent_map *em; struct extent_map_tree *em_tree; em_tree = &BTRFS_I(inode)->extent_tree; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = cur_offset; em->orig_start = em->start; @@ -5069,7 +5069,7 @@ again: else goto out; } - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { err = -ENOMEM; goto out; @@ -5382,7 +5382,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag u64 hole_start = start; u64 hole_len = len; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { err = -ENOMEM; goto out; @@ -5483,7 +5483,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, } if (!em) { - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f3edf45317bc..2097a88f60aa 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2870,7 +2870,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, struct extent_map *em; int ret = 0; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) return -ENOMEM; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 15d7dc943c9b..76acd1d235e4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2609,7 +2609,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { ret = -ENOMEM; goto error; @@ -3499,7 +3499,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, free_extent_map(em); } - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) return -ENOMEM; num_stripes = btrfs_chunk_num_stripes(leaf, chunk); -- cgit v1.2.3 From f09d1f60e6aa82fb4cfaa525e21f6287fc1516f4 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:08:01 +0200 Subject: btrfs: drop gfp parameter from find_extent_buffer pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/extent_io.c | 3 +-- fs/btrfs/extent_io.h | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1e55679d061..1c0752e99066 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -963,7 +963,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, - bytenr, blocksize, GFP_NOFS); + bytenr, blocksize); return eb; } @@ -2696,7 +2696,7 @@ int btree_lock_page_hook(struct page *page) goto out; len = page->private >> 2; - eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS); + eb = find_extent_buffer(io_tree, bytenr, len); if (!eb) goto out; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e67ed76668e0..ad0f0a95ad3a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3377,8 +3377,7 @@ free_eb: } struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, - u64 start, unsigned long len, - gfp_t mask) + u64 start, unsigned long len) { struct extent_buffer *eb; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e9cfe8d1661c..ff220c3c01b0 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -263,8 +263,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, struct page *page0, gfp_t mask); struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, - u64 start, unsigned long len, - gfp_t mask); + u64 start, unsigned long len); void free_extent_buffer(struct extent_buffer *eb); int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, u64 start, int wait, -- cgit v1.2.3 From ba14419264684b290f0d0b7f48d26eafb11fc0c6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:12:06 +0200 Subject: btrfs: drop gfp parameter from alloc_extent_buffer pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/extent_io.c | 7 +++---- fs/btrfs/extent_io.h | 3 +-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1c0752e99066..4084959b36fd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -380,7 +380,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) len = page->private >> 2; WARN_ON(len == 0); - eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { WARN_ON(1); goto out; @@ -525,7 +525,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, len = page->private >> 2; WARN_ON(len == 0); - eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { ret = -EIO; goto out; @@ -974,7 +974,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, struct extent_buffer *eb; eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, - bytenr, blocksize, NULL, GFP_NOFS); + bytenr, blocksize, NULL); return eb; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ad0f0a95ad3a..9369289ce771 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3266,8 +3266,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, - struct page *page0, - gfp_t mask) + struct page *page0) { unsigned long num_pages = num_extent_pages(start, len); unsigned long i; @@ -3288,7 +3287,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, } rcu_read_unlock(); - eb = __alloc_extent_buffer(tree, start, len, mask); + eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS); if (!eb) return NULL; @@ -3305,7 +3304,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, i = 0; } for (; i < num_pages; i++, index++) { - p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); + p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); if (!p) { WARN_ON(1); goto free_eb; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index ff220c3c01b0..3c3be74c934e 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -260,8 +260,7 @@ void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, - struct page *page0, - gfp_t mask); + struct page *page0); struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len); void free_extent_buffer(struct extent_buffer *eb); -- cgit v1.2.3 From b3b4aa74b58bded927f579fff787fb6fa1c0393c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:20:15 +0200 Subject: btrfs: drop unused parameter from btrfs_release_path parameter tree root it's not used since commit 5f39d397dfbe140a14edecd4e73c34ce23c4f9ee ("Btrfs: Create extent_buffer interface for large blocksizes") Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 28 ++++++------- fs/btrfs/ctree.h | 2 +- fs/btrfs/dir-item.c | 2 +- fs/btrfs/extent-tree.c | 44 ++++++++++---------- fs/btrfs/file-item.c | 12 +++--- fs/btrfs/file.c | 12 +++--- fs/btrfs/free-space-cache.c | 14 +++---- fs/btrfs/inode.c | 34 ++++++++-------- fs/btrfs/ioctl.c | 12 +++--- fs/btrfs/relocation.c | 30 +++++++------- fs/btrfs/root-tree.c | 10 ++--- fs/btrfs/tree-defrag.c | 2 +- fs/btrfs/tree-log.c | 98 ++++++++++++++++++++++----------------------- fs/btrfs/volumes.c | 16 ++++---- fs/btrfs/xattr.c | 4 +- 15 files changed, 160 insertions(+), 160 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a36c87db4dc4..fad8f23d70f0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -107,7 +107,7 @@ void btrfs_free_path(struct btrfs_path *p) { if (!p) return; - btrfs_release_path(NULL, p); + btrfs_release_path(p); kmem_cache_free(btrfs_path_cachep, p); } @@ -117,7 +117,7 @@ void btrfs_free_path(struct btrfs_path *p) * * It is safe to call this on paths that no locks or extent buffers held. */ -noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) +noinline void btrfs_release_path(struct btrfs_path *p) { int i; @@ -1328,7 +1328,7 @@ static noinline int reada_for_balance(struct btrfs_root *root, ret = -EAGAIN; /* release the whole path */ - btrfs_release_path(root, path); + btrfs_release_path(path); /* read the blocks */ if (block1) @@ -1475,7 +1475,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, return 0; } free_extent_buffer(tmp); - btrfs_release_path(NULL, p); + btrfs_release_path(p); return -EIO; } } @@ -1494,7 +1494,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, if (p->reada) reada_for_search(root, p, level, slot, key->objectid); - btrfs_release_path(NULL, p); + btrfs_release_path(p); ret = -EAGAIN; tmp = read_tree_block(root, blocknr, blocksize, 0); @@ -1563,7 +1563,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans, } b = p->nodes[level]; if (!b) { - btrfs_release_path(NULL, p); + btrfs_release_path(p); goto again; } BUG_ON(btrfs_header_nritems(b) == 1); @@ -1753,7 +1753,7 @@ done: if (!p->leave_spinning) btrfs_set_path_blocking(p); if (ret < 0) - btrfs_release_path(root, p); + btrfs_release_path(p); return ret; } @@ -3026,7 +3026,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, struct btrfs_file_extent_item); extent_len = btrfs_file_extent_num_bytes(leaf, fi); } - btrfs_release_path(root, path); + btrfs_release_path(path); path->keep_locks = 1; path->search_for_split = 1; @@ -3948,7 +3948,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) else return 1; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; @@ -4072,7 +4072,7 @@ find_next_key: sret = btrfs_find_next_key(root, path, min_key, level, cache_only, min_trans); if (sret == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } else { goto out; @@ -4151,7 +4151,7 @@ next: btrfs_node_key_to_cpu(c, &cur_key, slot); orig_lowest = path->lowest_level; - btrfs_release_path(root, path); + btrfs_release_path(path); path->lowest_level = level; ret = btrfs_search_slot(NULL, root, &cur_key, path, 0, 0); @@ -4228,7 +4228,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) again: level = 1; next = NULL; - btrfs_release_path(root, path); + btrfs_release_path(path); path->keep_locks = 1; @@ -4284,7 +4284,7 @@ again: goto again; if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto done; } @@ -4323,7 +4323,7 @@ again: goto again; if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto done; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b5433bbe7516..3f301f05099d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2290,7 +2290,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, int cache_only, u64 *last_ret, struct btrfs_key *progress); -void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); +void btrfs_release_path(struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); void btrfs_set_path_blocking(struct btrfs_path *p); diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae69..ab8afed671a0 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -172,7 +172,7 @@ second_insert: ret = 0; goto out_free; } - btrfs_release_path(root, path); + btrfs_release_path(path); btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); key.offset = index; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6a3d53783d55..a160f11465f8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -379,7 +379,7 @@ again: break; caching_ctl->progress = last; - btrfs_release_path(extent_root, path); + btrfs_release_path(path); up_read(&fs_info->extent_commit_sem); mutex_unlock(&caching_ctl->mutex); if (btrfs_transaction_in_commit(fs_info)) @@ -754,7 +754,7 @@ again: atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); - btrfs_release_path(root->fs_info->extent_root, path); + btrfs_release_path(path); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); @@ -934,7 +934,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans, break; } } - btrfs_release_path(root, path); + btrfs_release_path(path); if (owner < BTRFS_FIRST_FREE_OBJECTID) new_size += sizeof(*bi); @@ -1042,7 +1042,7 @@ again: return 0; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 key.type = BTRFS_EXTENT_REF_V0_KEY; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) { err = ret; @@ -1080,7 +1080,7 @@ again: if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) { if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } err = 0; @@ -1141,7 +1141,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, if (match_extent_data_ref(leaf, ref, root_objectid, owner, offset)) break; - btrfs_release_path(root, path); + btrfs_release_path(path); key.offset++; ret = btrfs_insert_empty_item(trans, root, path, &key, size); @@ -1167,7 +1167,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); ret = 0; fail: - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } @@ -1293,7 +1293,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, ret = -ENOENT; #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (ret == -ENOENT && parent) { - btrfs_release_path(root, path); + btrfs_release_path(path); key.type = BTRFS_EXTENT_REF_V0_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) @@ -1322,7 +1322,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, } ret = btrfs_insert_empty_item(trans, root, path, &key, 0); - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } @@ -1608,7 +1608,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans, if (ret != -ENOENT) return ret; - btrfs_release_path(root, path); + btrfs_release_path(path); *ref_ret = NULL; if (owner < BTRFS_FIRST_FREE_OBJECTID) { @@ -1862,7 +1862,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, __run_delayed_extent_op(extent_op, leaf, item); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root->fs_info->extent_root, path); + btrfs_release_path(path); path->reada = 1; path->leave_spinning = 1; @@ -2361,7 +2361,7 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); - btrfs_release_path(root->fs_info->extent_root, path); + btrfs_release_path(path); mutex_lock(&head->mutex); mutex_unlock(&head->mutex); @@ -2732,7 +2732,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, bi = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); fail: if (ret) return ret; @@ -2785,7 +2785,7 @@ again: inode = lookup_free_space_inode(root, block_group, path); if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { ret = PTR_ERR(inode); - btrfs_release_path(root, path); + btrfs_release_path(path); goto out; } @@ -2854,7 +2854,7 @@ again: out_put: iput(inode); out_free: - btrfs_release_path(root, path); + btrfs_release_path(path); out: spin_lock(&block_group->lock); block_group->disk_cache_state = dcs; @@ -4541,7 +4541,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, NULL, refs_to_drop, is_data); BUG_ON(ret); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); path->leave_spinning = 1; key.objectid = bytenr; @@ -4580,7 +4580,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, owner_objectid, 0); BUG_ON(ret < 0); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); path->leave_spinning = 1; key.objectid = bytenr; @@ -4650,7 +4650,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, ret = btrfs_del_items(trans, extent_root, path, path->slots[0], num_to_del); BUG_ON(ret); - btrfs_release_path(extent_root, path); + btrfs_release_path(path); if (is_data) { ret = btrfs_del_csums(trans, root, bytenr, num_bytes); @@ -6480,7 +6480,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, trans->block_rsv = block_rsv; } } - btrfs_release_path(root, path); + btrfs_release_path(path); BUG_ON(err); ret = btrfs_del_root(trans, tree_root, &root->root_key); @@ -8580,7 +8580,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) memcpy(&cache->key, &found_key, sizeof(found_key)); key.objectid = found_key.objectid + found_key.offset; - btrfs_release_path(root, path); + btrfs_release_path(path); cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; @@ -8802,12 +8802,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, if (ret < 0) goto out; if (ret > 0) - btrfs_release_path(tree_root, path); + btrfs_release_path(path); if (ret == 0) { ret = btrfs_del_item(trans, tree_root, path); if (ret) goto out; - btrfs_release_path(tree_root, path); + btrfs_release_path(path); } spin_lock(&root->fs_info->block_group_cache_lock); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..f47e43d855aa 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, u32 item_size; if (item) - btrfs_release_path(root, path); + btrfs_release_path(path); item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, disk_bytenr, 0); if (IS_ERR(item)) { @@ -213,7 +213,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, (unsigned long long)offset); } item = NULL; - btrfs_release_path(root, path); + btrfs_release_path(path); goto found; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, @@ -631,7 +631,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, if (key.offset < bytenr) break; } - btrfs_release_path(root, path); + btrfs_release_path(path); } out: btrfs_free_path(path); @@ -722,7 +722,7 @@ again: * at this point, we know the tree has an item, but it isn't big * enough yet to put our csum in. Grow it */ - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(trans, root, &file_key, path, csum_size, 1); if (ret < 0) @@ -766,7 +766,7 @@ again: } insert: - btrfs_release_path(root, path); + btrfs_release_path(path); csum_offset = 0; if (found_next) { u64 tmp = total_bytes + root->sectorsize; @@ -850,7 +850,7 @@ next_sector: } btrfs_mark_buffer_dirty(path->nodes[0]); if (total_bytes < sums->len) { - btrfs_release_path(root, path); + btrfs_release_path(path); cond_resched(); goto again; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 80eabe85409a..566bdf298ea8 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -376,7 +376,7 @@ next_slot: search_start = max(key.offset, start); if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } @@ -393,7 +393,7 @@ next_slot: ret = btrfs_duplicate_item(trans, root, path, &new_key); if (ret == -EAGAIN) { - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } if (ret < 0) @@ -516,7 +516,7 @@ next_slot: del_nr = 0; del_slot = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } @@ -681,7 +681,7 @@ again: new_key.offset = split; ret = btrfs_duplicate_item(trans, root, path, &new_key); if (ret == -EAGAIN) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } BUG_ON(ret < 0); @@ -721,7 +721,7 @@ again: inode->i_ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } extent_end = other_end; @@ -738,7 +738,7 @@ again: inode->i_ino, bytenr, orig_offset, &other_start, &other_end)) { if (recow) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } key.offset = other_start; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d06abe20a729..48fafcb85b0e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -61,7 +61,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, if (ret < 0) return ERR_PTR(ret); if (ret > 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return ERR_PTR(-ENOENT); } @@ -70,7 +70,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, struct btrfs_free_space_header); btrfs_free_space_key(leaf, header, &disk_key); btrfs_disk_key_to_cpu(&location, &disk_key); - btrfs_release_path(root, path); + btrfs_release_path(path); inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); if (!inode) @@ -134,7 +134,7 @@ int create_free_space_inode(struct btrfs_root *root, btrfs_set_inode_block_group(leaf, inode_item, block_group->key.objectid); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); key.objectid = BTRFS_FREE_SPACE_OBJECTID; key.offset = block_group->key.objectid; @@ -143,7 +143,7 @@ int create_free_space_inode(struct btrfs_root *root, ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(struct btrfs_free_space_header)); if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } leaf = path->nodes[0]; @@ -152,7 +152,7 @@ int create_free_space_inode(struct btrfs_root *root, memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); btrfs_set_free_space_key(leaf, header, &disk_key); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } @@ -822,7 +822,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); - btrfs_release_path(root, path); + btrfs_release_path(path); goto out_free; } } @@ -832,7 +832,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, btrfs_set_free_space_bitmaps(leaf, header, bitmaps); btrfs_set_free_space_generation(leaf, header, trans->transid); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = 1; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 26f4d56cf049..2840989737b7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1164,7 +1164,7 @@ out_check: goto next_slot; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, found_key.offset - 1, page_started, @@ -1222,7 +1222,7 @@ out_check: if (cur_offset > end) break; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (cur_offset <= end && cow_start == (u64)-1) cow_start = cur_offset; @@ -2346,7 +2346,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) break; /* release the path since we're done with it */ - btrfs_release_path(root, path); + btrfs_release_path(path); /* * this is where we are basically btrfs_lookup, without the @@ -2712,7 +2712,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto err; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_del_inode_ref(trans, root, name, name_len, inode->i_ino, @@ -2735,7 +2735,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; } ret = btrfs_delete_one_dir_name(trans, root, path, di); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, dir->i_ino); @@ -2862,7 +2862,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, } else { check_link = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, 0); @@ -2876,7 +2876,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, } else { check_link = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret == 0 && S_ISREG(inode->i_mode)) { ret = btrfs_lookup_file_extent(trans, root, path, @@ -2888,7 +2888,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, BUG_ON(ret == 0); if (check_path_shared(root, path)) goto out; - btrfs_release_path(root, path); + btrfs_release_path(path); } if (!check_link) { @@ -2909,7 +2909,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, err = 0; goto out; } - btrfs_release_path(root, path); + btrfs_release_path(path); ref = btrfs_lookup_inode_ref(trans, root, path, dentry->d_name.name, dentry->d_name.len, @@ -2922,7 +2922,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, if (check_path_shared(root, path)) goto out; index = btrfs_inode_ref_index(path->nodes[0], ref); - btrfs_release_path(root, path); + btrfs_release_path(path); di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, dentry->d_name.name, dentry->d_name.len, 0); @@ -3013,7 +3013,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); ret = btrfs_delete_one_dir_name(trans, root, path, di); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, objectid, root->root_key.objectid, @@ -3026,7 +3026,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(root, path); + btrfs_release_path(path); index = key.offset; } @@ -3039,7 +3039,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); ret = btrfs_delete_one_dir_name(trans, root, path, di); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); btrfs_i_size_write(dir, dir->i_size - name_len * 2); dir->i_mtime = dir->i_ctime = CURRENT_TIME; @@ -3477,7 +3477,7 @@ delete: BUG_ON(ret); pending_del_nr = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); goto search_again; } else { path->slots[0]--; @@ -3899,7 +3899,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, if (ret) goto out; - btrfs_release_path(root->fs_info->tree_root, path); + btrfs_release_path(path); new_root = btrfs_read_fs_root_no_name(root->fs_info, location); if (IS_ERR(new_root)) { @@ -5223,7 +5223,7 @@ again: kunmap(page); free_extent_map(em); em = NULL; - btrfs_release_path(root, path); + btrfs_release_path(path); trans = btrfs_join_transaction(root, 1); if (IS_ERR(trans)) return ERR_CAST(trans); @@ -5249,7 +5249,7 @@ not_found_em: em->block_start = EXTENT_MAP_HOLE; set_bit(EXTENT_FLAG_VACANCY, &em->flags); insert: - btrfs_release_path(root, path); + btrfs_release_path(path); if (em->start > start || extent_map_end(em) <= start) { printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " "[%llu %llu]\n", (unsigned long long)em->start, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ffb48d6c5433..d11fc6548e15 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1396,7 +1396,7 @@ static noinline int search_ioctl(struct inode *inode, } ret = copy_to_sk(root, path, &key, sk, args->buf, &sk_offset, &num_found); - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret || num_found >= sk->nr_items) break; @@ -1503,7 +1503,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, if (key.offset == BTRFS_FIRST_FREE_OBJECTID) break; - btrfs_release_path(root, path); + btrfs_release_path(path); key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; @@ -1982,7 +1982,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, datal = btrfs_file_extent_ram_bytes(leaf, extent); } - btrfs_release_path(root, path); + btrfs_release_path(path); if (key.offset + datal <= off || key.offset >= off+len) @@ -2092,7 +2092,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); inode->i_mtime = inode->i_ctime = CURRENT_TIME; @@ -2113,12 +2113,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_end_transaction(trans, root); } next: - btrfs_release_path(root, path); + btrfs_release_path(path); key.offset++; } ret = 0; out: - btrfs_release_path(root, path); + btrfs_release_path(path); unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); out_unlock: mutex_unlock(&src->i_mutex); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 2097a88f60aa..f7b799b151aa 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -961,7 +961,7 @@ again: lower = upper; upper = NULL; } - btrfs_release_path(root, path2); + btrfs_release_path(path2); next: if (ptr < end) { ptr += btrfs_extent_inline_ref_size(key.type); @@ -974,7 +974,7 @@ next: if (ptr >= end) path1->slots[0]++; } - btrfs_release_path(rc->extent_root, path1); + btrfs_release_path(path1); cur->checked = 1; WARN_ON(exist); @@ -1749,7 +1749,7 @@ again: btrfs_node_key_to_cpu(path->nodes[level], &key, path->slots[level]); - btrfs_release_path(src, path); + btrfs_release_path(path); path->lowest_level = level; ret = btrfs_search_slot(trans, src, &key, path, 0, 1); @@ -2496,7 +2496,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, path->locks[upper->level] = 0; slot = path->slots[upper->level]; - btrfs_release_path(NULL, path); + btrfs_release_path(path); } else { ret = btrfs_bin_search(upper->eb, key, upper->level, &slot); @@ -2737,7 +2737,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, } else { path->lowest_level = node->level; ret = btrfs_search_slot(trans, root, key, path, 0, 1); - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret > 0) ret = 0; } @@ -3119,7 +3119,7 @@ static int add_tree_block(struct reloc_control *rc, #endif } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); BUG_ON(level == -1); @@ -3505,7 +3505,7 @@ int add_data_references(struct reloc_control *rc, } path->slots[0]++; } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); if (err) free_block_list(blocks); return err; @@ -3568,7 +3568,7 @@ next: EXTENT_DIRTY); if (ret == 0 && start <= key.objectid) { - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); rc->search_start = end + 1; } else { rc->search_start = key.objectid + key.offset; @@ -3576,7 +3576,7 @@ next: return 0; } } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); return ret; } @@ -3713,7 +3713,7 @@ restart: flags = BTRFS_EXTENT_FLAG_DATA; if (path_change) { - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); path->search_commit_root = 1; path->skip_locking = 1; @@ -3736,7 +3736,7 @@ restart: (flags & BTRFS_EXTENT_FLAG_DATA)) { ret = add_data_references(rc, &key, path, &blocks); } else { - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); ret = 0; } if (ret < 0) { @@ -3799,7 +3799,7 @@ restart: } } - btrfs_release_path(rc->extent_root, path); + btrfs_release_path(path); clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, GFP_NOFS); @@ -3867,7 +3867,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(root, path); + btrfs_release_path(path); out: btrfs_free_path(path); return ret; @@ -4109,7 +4109,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(root->fs_info->tree_root, path); + btrfs_release_path(path); if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || key.type != BTRFS_ROOT_ITEM_KEY) @@ -4141,7 +4141,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) key.offset--; } - btrfs_release_path(root->fs_info->tree_root, path); + btrfs_release_path(path); if (list_empty(&reloc_roots)) goto out; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6928bff62daa..59a94c1d9815 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -57,7 +57,7 @@ again: btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]); if (search_key.type != BTRFS_ROOT_ITEM_KEY) { search_key.offset++; - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } ret = 0; @@ -230,7 +230,7 @@ again: memcpy(&found_key, &key, sizeof(key)); key.offset++; - btrfs_release_path(root, path); + btrfs_release_path(path); dead_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, &found_key); @@ -292,7 +292,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(tree_root, path); + btrfs_release_path(path); if (key.objectid != BTRFS_ORPHAN_OBJECTID || key.type != BTRFS_ORPHAN_ITEM_KEY) @@ -390,7 +390,7 @@ again: err = -ENOENT; if (key.type == BTRFS_ROOT_BACKREF_KEY) { - btrfs_release_path(tree_root, path); + btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; @@ -463,7 +463,7 @@ again: btrfs_mark_buffer_dirty(leaf); if (key.type == BTRFS_ROOT_BACKREF_KEY) { - btrfs_release_path(tree_root, path); + btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 992ab425599d..3b580ee8ab1d 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, ret = 0; goto out; } - btrfs_release_path(root, path); + btrfs_release_path(path); wret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (wret < 0) { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d5313d63f967..c599e8c2a53c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -333,13 +333,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, goto insert; if (item_size == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); if (!dst_copy || !src_copy) { - btrfs_release_path(root, path); + btrfs_release_path(path); kfree(dst_copy); kfree(src_copy); return -ENOMEM; @@ -361,13 +361,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, * sync */ if (ret == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } } insert: - btrfs_release_path(root, path); + btrfs_release_path(path); /* try to insert the key into the destination tree */ ret = btrfs_insert_empty_item(trans, root, path, key, item_size); @@ -438,7 +438,7 @@ insert: } no_copy: btrfs_mark_buffer_dirty(path->nodes[0]); - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } @@ -544,11 +544,11 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, * we don't have to do anything */ if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); goto out; } } - btrfs_release_path(root, path); + btrfs_release_path(path); saved_nbytes = inode_get_bytes(inode); /* drop any overlapping extents */ @@ -600,7 +600,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, key->objectid, offset, &ins); BUG_ON(ret); } - btrfs_release_path(root, path); + btrfs_release_path(path); if (btrfs_file_extent_compression(eb, item)) { csum_start = ins.objectid; @@ -629,7 +629,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, kfree(sums); } } else { - btrfs_release_path(root, path); + btrfs_release_path(path); } } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { /* inline extents are easy, we just overwrite them */ @@ -675,7 +675,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, return -ENOMEM; read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); - btrfs_release_path(root, path); + btrfs_release_path(path); inode = read_one_inode(root, location.objectid); BUG_ON(!inode); @@ -713,7 +713,7 @@ static noinline int inode_in_dir(struct btrfs_root *root, goto out; } else goto out; - btrfs_release_path(root, path); + btrfs_release_path(path); di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); if (di && !IS_ERR(di)) { @@ -724,7 +724,7 @@ static noinline int inode_in_dir(struct btrfs_root *root, goto out; match = 1; out: - btrfs_release_path(root, path); + btrfs_release_path(path); return match; } @@ -884,7 +884,7 @@ again: if (!backref_in_log(log, key, victim_name, victim_name_len)) { btrfs_inc_nlink(inode); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_unlink_inode(trans, root, dir, inode, victim_name, @@ -901,7 +901,7 @@ again: */ search_done = 1; } - btrfs_release_path(root, path); + btrfs_release_path(path); insert: /* insert our name */ @@ -922,7 +922,7 @@ out: BUG_ON(ret); out_nowrite: - btrfs_release_path(root, path); + btrfs_release_path(path); iput(dir); iput(inode); return 0; @@ -999,9 +999,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (key.offset == 0) break; key.offset--; - btrfs_release_path(root, path); + btrfs_release_path(path); } - btrfs_release_path(root, path); + btrfs_release_path(path); if (nlink != inode->i_nlink) { inode->i_nlink = nlink; btrfs_update_inode(trans, root, inode); @@ -1052,7 +1052,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); inode = read_one_inode(root, key.offset); BUG_ON(!inode); @@ -1068,7 +1068,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, */ key.offset = (u64)-1; } - btrfs_release_path(root, path); + btrfs_release_path(path); return 0; } @@ -1096,7 +1096,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_item(trans, root, path, &key, 0); - btrfs_release_path(root, path); + btrfs_release_path(path); if (ret == 0) { btrfs_inc_nlink(inode); btrfs_update_inode(trans, root, inode); @@ -1192,7 +1192,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, exists = 1; else exists = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); if (key->type == BTRFS_DIR_ITEM_KEY) { dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, @@ -1236,13 +1236,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, if (key->type == BTRFS_DIR_INDEX_KEY) goto insert; out: - btrfs_release_path(root, path); + btrfs_release_path(path); kfree(name); iput(dir); return 0; insert: - btrfs_release_path(root, path); + btrfs_release_path(path); ret = insert_one_name(trans, root, path, key->objectid, key->offset, name, name_len, log_type, &log_key); @@ -1363,7 +1363,7 @@ next: *end_ret = found_end; ret = 0; out: - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } @@ -1428,8 +1428,8 @@ again: } if (IS_ERR_OR_NULL(log_di)) { btrfs_dir_item_key_to_cpu(eb, di, &location); - btrfs_release_path(root, path); - btrfs_release_path(log, log_path); + btrfs_release_path(path); + btrfs_release_path(log_path); inode = read_one_inode(root, location.objectid); BUG_ON(!inode); @@ -1453,7 +1453,7 @@ again: ret = 0; goto out; } - btrfs_release_path(log, log_path); + btrfs_release_path(log_path); kfree(name); ptr = (unsigned long)(di + 1); @@ -1461,8 +1461,8 @@ again: } ret = 0; out: - btrfs_release_path(root, path); - btrfs_release_path(log, log_path); + btrfs_release_path(path); + btrfs_release_path(log_path); return ret; } @@ -1550,7 +1550,7 @@ again: break; dir_key.offset = found_key.offset + 1; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (range_end == (u64)-1) break; range_start = range_end + 1; @@ -1561,11 +1561,11 @@ next_type: if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { key_type = BTRFS_DIR_LOG_INDEX_KEY; dir_key.type = BTRFS_DIR_INDEX_KEY; - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } out: - btrfs_release_path(root, path); + btrfs_release_path(path); btrfs_free_path(log_path); iput(dir); return ret; @@ -2225,7 +2225,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, bytes_del += name_len; BUG_ON(ret); } - btrfs_release_path(log, path); + btrfs_release_path(path); di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, index, name, name_len, -1); if (IS_ERR(di)) { @@ -2247,7 +2247,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, key.objectid = dir->i_ino; key.offset = 0; key.type = BTRFS_INODE_ITEM_KEY; - btrfs_release_path(log, path); + btrfs_release_path(path); ret = btrfs_search_slot(trans, log, &key, path, 0, 1); if (ret < 0) { @@ -2269,7 +2269,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(path->nodes[0]); } else ret = 0; - btrfs_release_path(log, path); + btrfs_release_path(path); } fail: btrfs_free_path(path); @@ -2344,7 +2344,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, struct btrfs_dir_log_item); btrfs_set_dir_log_end(path->nodes[0], item, last_offset); btrfs_mark_buffer_dirty(path->nodes[0]); - btrfs_release_path(log, path); + btrfs_release_path(path); return 0; } @@ -2393,10 +2393,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, min_key.objectid = inode->i_ino; min_key.type = key_type; min_key.offset = (u64)-1; - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); if (ret < 0) { - btrfs_release_path(root, path); + btrfs_release_path(path); return ret; } ret = btrfs_previous_item(root, path, inode->i_ino, key_type); @@ -2432,7 +2432,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } } } - btrfs_release_path(root, path); + btrfs_release_path(path); /* find the first key from this transaction again */ ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); @@ -2490,8 +2490,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, } } done: - btrfs_release_path(root, path); - btrfs_release_path(log, dst_path); + btrfs_release_path(path); + btrfs_release_path(dst_path); if (err == 0) { *last_offset_ret = last_offset; @@ -2588,9 +2588,9 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, log, path); BUG_ON(ret); - btrfs_release_path(log, path); + btrfs_release_path(path); } - btrfs_release_path(log, path); + btrfs_release_path(path); return ret; } @@ -2696,7 +2696,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, } btrfs_mark_buffer_dirty(dst_path->nodes[0]); - btrfs_release_path(log, dst_path); + btrfs_release_path(dst_path); kfree(ins_data); /* @@ -2845,7 +2845,7 @@ next_slot: } ins_nr = 0; } - btrfs_release_path(root, path); + btrfs_release_path(path); if (min_key.offset < (u64)-1) min_key.offset++; @@ -2868,8 +2868,8 @@ next_slot: } WARN_ON(ins_nr); if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { - btrfs_release_path(root, path); - btrfs_release_path(log, dst_path); + btrfs_release_path(path); + btrfs_release_path(dst_path); ret = log_directory_changes(trans, root, inode, path, dst_path); if (ret) { err = ret; @@ -3136,7 +3136,7 @@ again: } btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); - btrfs_release_path(log_root_tree, path); + btrfs_release_path(path); if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) break; @@ -3171,7 +3171,7 @@ again: if (found_key.offset == 0) break; } - btrfs_release_path(log_root_tree, path); + btrfs_release_path(path); /* step one is to pin it all, step two is to replay just inodes */ if (wc.pin) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 76acd1d235e4..e21130d3f98a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1475,7 +1475,7 @@ next_slot: goto error; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(root, path); + btrfs_release_path(path); continue; } @@ -1947,7 +1947,7 @@ again: chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); - btrfs_release_path(chunk_root, path); + btrfs_release_path(path); if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_relocate_chunk(chunk_root, chunk_tree, @@ -2065,7 +2065,7 @@ int btrfs_balance(struct btrfs_root *dev_root) if (found_key.offset == 0) break; - btrfs_release_path(chunk_root, path); + btrfs_release_path(path); ret = btrfs_relocate_chunk(chunk_root, chunk_root->root_key.objectid, found_key.objectid, @@ -2137,7 +2137,7 @@ again: goto done; if (ret) { ret = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); break; } @@ -2146,7 +2146,7 @@ again: btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { - btrfs_release_path(root, path); + btrfs_release_path(path); break; } @@ -2154,14 +2154,14 @@ again: length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { - btrfs_release_path(root, path); + btrfs_release_path(path); break; } chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); - btrfs_release_path(root, path); + btrfs_release_path(path); ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, chunk_offset); @@ -3813,7 +3813,7 @@ again: } if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { key.objectid = 0; - btrfs_release_path(root, path); + btrfs_release_path(path); goto again; } ret = 0; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index cfd660550ded..4ca88d1e18e2 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans, ret = btrfs_delete_one_dir_name(trans, root, path, di); BUG_ON(ret); - btrfs_release_path(root, path); + btrfs_release_path(path); /* if we don't have a value then we are removing the xattr */ if (!value) goto out; } else { - btrfs_release_path(root, path); + btrfs_release_path(path); if (flags & XATTR_REPLACE) { /* we couldn't find the attr to replace */ -- cgit v1.2.3 From 8cc33e5c19bf01c7617608669be8c1b4f663eb2f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 2 May 2011 15:29:25 +0200 Subject: btrfs: Document a mutex lock/unlock sequence --- fs/btrfs/extent-tree.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a160f11465f8..fba1348cb2a0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -756,6 +756,10 @@ again: btrfs_release_path(path); + /* + * Mutex was contended, block until it's released and try + * again + */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); @@ -2297,6 +2301,10 @@ again: atomic_inc(&ref->refs); spin_unlock(&delayed_refs->lock); + /* + * Mutex was contended, block until it's + * released and try again + */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); @@ -2363,6 +2371,10 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, btrfs_release_path(path); + /* + * Mutex was contended, block until it's released and let + * caller try again + */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); -- cgit v1.2.3 From 621496f4fd56195b7b273521f467c2945165481f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 4 May 2011 12:56:49 +0200 Subject: btrfs: remove unused function prototypes function prototypes without a body Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 12 ------------ fs/btrfs/delayed-ref.h | 5 ----- fs/btrfs/disk-io.h | 11 ----------- fs/btrfs/extent_io.h | 9 --------- fs/btrfs/transaction.h | 3 --- fs/btrfs/tree-log.h | 1 - fs/btrfs/volumes.h | 2 -- 7 files changed, 43 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3f301f05099d..b66216e636c2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2108,12 +2108,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, u64 num_bytes, u64 *refs, u64 *flags); int btrfs_pin_extent(struct btrfs_root *root, u64 bytenr, u64 num, int reserved); -int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct extent_buffer *leaf); int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 offset, u64 bytenr); -int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy); struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_fs_info *info, u64 bytenr); @@ -2463,15 +2460,10 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio, u64 file_start, int contig); -int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, - u64 start, unsigned long len); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, int cow); -int btrfs_csum_truncate(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - u64 isize); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list); /* inode.c */ @@ -2520,7 +2512,6 @@ unsigned long btrfs_force_ra(struct address_space *mapping, int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); int btrfs_readpage(struct file *file, struct page *page); void btrfs_evict_inode(struct inode *inode); -void btrfs_put_inode(struct inode *inode); int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); void btrfs_dirty_inode(struct inode *inode); struct inode *btrfs_alloc_inode(struct super_block *sb); @@ -2531,8 +2522,6 @@ void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root, int *was_new); -int btrfs_commit_write(struct file *file, struct page *page, - unsigned from, unsigned to); struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 end, int create); @@ -2571,7 +2560,6 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); int btrfs_sync_file(struct file *file, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); -int btrfs_check_file(struct btrfs_root *root, struct inode *inode); extern const struct file_operations btrfs_file_operations; int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, u64 start, u64 end, u64 *hint_byte, int drop_cache); diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 50e3cf92fbda..946ed71ab84f 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -167,11 +167,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); -int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, u64 orig_parent, - u64 parent, u64 orig_ref_root, u64 ref_root, - u64 orig_ref_generation, u64 ref_generation, - u64 owner_objectid, int pin); int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 07b20dc2fd95..758f3ca614ee 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -65,25 +65,14 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); -int btrfs_insert_dev_radix(struct btrfs_root *root, - struct block_device *bdev, - u64 device_id, - u64 block_start, - u64 num_blocks); void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); -void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf); int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); int btrfs_set_buffer_uptodate(struct extent_buffer *buf); -int wait_on_tree_block_writeback(struct btrfs_root *root, - struct extent_buffer *buf); int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); void btrfs_csum_final(u32 crc, char *result); -int btrfs_open_device(struct btrfs_device *dev); -int btrfs_verify_block_csum(struct btrfs_root *root, - struct extent_buffer *buf); int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata); int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 3c3be74c934e..d1c5a57c9984 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -215,14 +215,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); -int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); -int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, - u64 end, gfp_t mask); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); -int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits); struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, @@ -298,8 +292,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); -int test_extent_buffer_dirty(struct extent_io_tree *tree, - struct extent_buffer *eb); int set_extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb); int clear_extent_buffer_uptodate(struct extent_io_tree *tree, @@ -317,7 +309,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, unsigned long *map_start, unsigned long *map_len, int km); void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); -int release_extent_buffer_tail_pages(struct extent_buffer *eb); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end); int extent_clear_unlock_delalloc(struct inode *inode, diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c584..000a41008c3b 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); -int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, - struct btrfs_root *root); int btrfs_add_dead_root(struct btrfs_root *root); -int btrfs_drop_dead_root(struct btrfs_root *root); int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); int btrfs_clean_old_snapshots(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 3dfae84c8cc8..2270ac58d746 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h @@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, struct inode *inode, u64 dirid); -int btrfs_join_running_log_trans(struct btrfs_root *root); int btrfs_end_log_trans(struct btrfs_root *root); int btrfs_pin_log_trans(struct btrfs_root *root); int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..036b276b4860 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -209,8 +209,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans, int btrfs_rm_device(struct btrfs_root *root, char *device_path); int btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); -int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, - u64 logical, struct page *page); int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size); struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, -- cgit v1.2.3 From f2a97a9dbd86eb1ef956bdf20e05c507b32beb96 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 May 2011 12:44:41 +0200 Subject: btrfs: remove all unused functions Remove static and global declarations and/or definitions. Reduces size of btrfs.ko by ~3.4kB. text data bss dec hex filename 402081 7464 200 409745 64091 btrfs.ko.base 398620 7144 200 405964 631cc btrfs.ko.remove-all Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 78 --------------- fs/btrfs/delayed-ref.c | 38 -------- fs/btrfs/delayed-ref.h | 1 - fs/btrfs/disk-io.c | 27 ------ fs/btrfs/disk-io.h | 7 -- fs/btrfs/extent_io.c | 227 -------------------------------------------- fs/btrfs/extent_io.h | 21 ---- fs/btrfs/free-space-cache.c | 15 --- fs/btrfs/free-space-cache.h | 1 - fs/btrfs/inode.c | 52 ---------- fs/btrfs/locking.c | 25 ----- fs/btrfs/locking.h | 2 - fs/btrfs/ref-cache.c | 164 -------------------------------- fs/btrfs/ref-cache.h | 24 ----- fs/btrfs/relocation.c | 2 +- fs/btrfs/root-tree.c | 47 --------- fs/btrfs/sysfs.c | 65 ------------- fs/btrfs/volumes.c | 19 ---- fs/btrfs/volumes.h | 3 - 19 files changed, 1 insertion(+), 817 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b66216e636c2..e37d441617d2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1440,26 +1440,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); } -static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb, - struct btrfs_chunk *c, int nr, - u64 val) -{ - btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val); -} - static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, struct btrfs_chunk *c, int nr) { return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); } -static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb, - struct btrfs_chunk *c, int nr, - u64 val) -{ - btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val); -} - /* struct btrfs_block_group_item */ BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, used, 64); @@ -1517,14 +1503,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item) return (struct btrfs_timespec *)ptr; } -static inline struct btrfs_timespec * -btrfs_inode_otime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, otime); - return (struct btrfs_timespec *)ptr; -} - BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); @@ -1875,33 +1853,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) return (u8 *)ptr; } -static inline u8 *btrfs_super_fsid(struct extent_buffer *eb) -{ - unsigned long ptr = offsetof(struct btrfs_super_block, fsid); - return (u8 *)ptr; -} - -static inline u8 *btrfs_header_csum(struct extent_buffer *eb) -{ - unsigned long ptr = offsetof(struct btrfs_header, csum); - return (u8 *)ptr; -} - -static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb) -{ - return NULL; -} - -static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb) -{ - return NULL; -} - -static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb) -{ - return NULL; -} - static inline int btrfs_is_leaf(struct extent_buffer *eb) { return btrfs_header_level(eb) == 0; @@ -2055,22 +2006,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb) return sb->s_fs_info; } -static inline int btrfs_set_root_name(struct btrfs_root *root, - const char *name, int len) -{ - /* if we already have a name just free it */ - kfree(root->name); - - root->name = kmalloc(len+1, GFP_KERNEL); - if (!root->name) - return -ENOMEM; - - memcpy(root->name, name, len); - root->name[len] = '\0'; - - return 0; -} - static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { if (level == 0) @@ -2304,11 +2239,6 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); -int btrfs_insert_some_items(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - int nr); int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -2354,8 +2284,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *item); int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct btrfs_root_item *item, struct btrfs_key *key); -int btrfs_search_root(struct btrfs_root *root, u64 search_start, - u64 *found_objectid); int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); int btrfs_find_orphan_roots(struct btrfs_root *tree_root); int btrfs_set_root_node(struct btrfs_root_item *item, @@ -2494,8 +2422,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u32 min_type); int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, - int sync); int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, struct extent_state **cached_state); int btrfs_writepages(struct address_space *mapping, @@ -2579,10 +2505,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, /* sysfs.c */ int btrfs_init_sysfs(void); void btrfs_exit_sysfs(void); -int btrfs_sysfs_add_super(struct btrfs_fs_info *fs); -int btrfs_sysfs_add_root(struct btrfs_root *root); -void btrfs_sysfs_del_root(struct btrfs_root *root); -void btrfs_sysfs_del_super(struct btrfs_fs_info *root); /* xattr.c */ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index bce28f653899..cb9b9a431fc9 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -280,44 +280,6 @@ again: return 1; } -/* - * This checks to see if there are any delayed refs in the - * btree for a given bytenr. It returns one if it finds any - * and zero otherwise. - * - * If it only finds a head node, it returns 0. - * - * The idea is to use this when deciding if you can safely delete an - * extent from the extent allocation tree. There may be a pending - * ref in the rbtree that adds or removes references, so as long as this - * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent - * allocation tree. - */ -int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr) -{ - struct btrfs_delayed_ref_node *ref; - struct btrfs_delayed_ref_root *delayed_refs; - struct rb_node *prev_node; - int ret = 0; - - delayed_refs = &trans->transaction->delayed_refs; - spin_lock(&delayed_refs->lock); - - ref = find_ref_head(&delayed_refs->root, bytenr, NULL); - if (ref) { - prev_node = rb_prev(&ref->rb_node); - if (!prev_node) - goto out; - ref = rb_entry(prev_node, struct btrfs_delayed_ref_node, - rb_node); - if (ref->bytenr == bytenr) - ret = 1; - } -out: - spin_unlock(&delayed_refs->lock); - return ret; -} - /* * helper function to update an extent delayed ref in the * rbtree. existing and update must both have the same diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 946ed71ab84f..e287e3b0eab0 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -166,7 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); -int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4084959b36fd..fa287c551ffc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -650,12 +650,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) return 256 * limit; } -int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) -{ - return atomic_read(&info->nr_async_bios) > - btrfs_async_submit_limit(info); -} - static void run_one_async_start(struct btrfs_work *work) { struct async_submit_bio *async; @@ -1283,21 +1277,6 @@ out: return root; } -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, - u64 root_objectid) -{ - struct btrfs_root *root; - - if (root_objectid == BTRFS_ROOT_TREE_OBJECTID) - return fs_info->tree_root; - if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID) - return fs_info->extent_root; - - root = radix_tree_lookup(&fs_info->fs_roots_radix, - (unsigned long)root_objectid); - return root; -} - struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location) { @@ -1369,11 +1348,6 @@ fail: return ERR_PTR(ret); } -struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *location, - const char *name, int namelen) -{ - return btrfs_read_fs_root_no_name(fs_info, location); #if 0 struct btrfs_root *root; int ret; @@ -1402,7 +1376,6 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, root->in_sysfs = 1; return root; #endif -} static int btrfs_congested_fn(void *congested_data, int bdi_bits) { diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 758f3ca614ee..2d75f9e896f6 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -55,11 +55,6 @@ int btrfs_commit_super(struct btrfs_root *root); int btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, - u64 root_objectid); -struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *location, - const char *name, int namelen); struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_key *location); struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, @@ -80,8 +75,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done); - -int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); int btrfs_write_tree_block(struct extent_buffer *buf); int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9369289ce771..91208296ff2b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -941,13 +941,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, NULL, mask); } -static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) -{ - return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, - NULL, mask); -} - int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask) { @@ -963,11 +956,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, cached_state, mask); } -int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) -{ - return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); -} - /* * either insert or lock state struct between start and end use mask to tell * us if waiting is desired. @@ -1027,25 +1015,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) mask); } -/* - * helper function to set pages and extents in the tree dirty - */ -int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end) -{ - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; - struct page *page; - - while (index <= end_index) { - page = find_get_page(tree->mapping, index); - BUG_ON(!page); - __set_page_dirty_nobuffers(page); - page_cache_release(page); - index++; - } - return 0; -} - /* * helper function to set both pages and extents in the tree writeback */ @@ -1819,46 +1788,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) bio_put(bio); } -/* - * IO done from prepare_write is pretty simple, we just unlock - * the structs in the extent tree when done, and set the uptodate bits - * as appropriate. - */ -static void end_bio_extent_preparewrite(struct bio *bio, int err) -{ - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_io_tree *tree; - u64 start; - u64 end; - - do { - struct page *page = bvec->bv_page; - struct extent_state *cached = NULL; - tree = &BTRFS_I(page->mapping->host)->io_tree; - - start = ((u64)page->index << PAGE_CACHE_SHIFT) + - bvec->bv_offset; - end = start + bvec->bv_len - 1; - - if (--bvec >= bio->bi_io_vec) - prefetchw(&bvec->bv_page->flags); - - if (uptodate) { - set_extent_uptodate(tree, start, end, &cached, - GFP_ATOMIC); - } else { - ClearPageUptodate(page); - SetPageError(page); - } - - unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); - - } while (bvec >= bio->bi_io_vec); - - bio_put(bio); -} - struct bio * btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, gfp_t gfp_flags) @@ -2719,128 +2648,6 @@ int extent_invalidatepage(struct extent_io_tree *tree, return 0; } -/* - * simple commit_write call, set_range_dirty is used to mark both - * the pages and the extent records as dirty - */ -int extent_commit_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to) -{ - loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; - - set_page_extent_mapped(page); - set_page_dirty(page); - - if (pos > inode->i_size) { - i_size_write(inode, pos); - mark_inode_dirty(inode); - } - return 0; -} - -int extent_prepare_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to, get_extent_t *get_extent) -{ - u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; - u64 block_start; - u64 orig_block_start; - u64 block_end; - u64 cur_end; - struct extent_map *em; - unsigned blocksize = 1 << inode->i_blkbits; - size_t pg_offset = 0; - size_t block_off_start; - size_t block_off_end; - int err = 0; - int iocount = 0; - int ret = 0; - int isnew; - - set_page_extent_mapped(page); - - block_start = (page_start + from) & ~((u64)blocksize - 1); - block_end = (page_start + to - 1) | (blocksize - 1); - orig_block_start = block_start; - - lock_extent(tree, page_start, page_end, GFP_NOFS); - while (block_start <= block_end) { - em = get_extent(inode, page, pg_offset, block_start, - block_end - block_start + 1, 1); - if (IS_ERR_OR_NULL(em)) - goto err; - - cur_end = min(block_end, extent_map_end(em) - 1); - block_off_start = block_start & (PAGE_CACHE_SIZE - 1); - block_off_end = block_off_start + blocksize; - isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); - - if (!PageUptodate(page) && isnew && - (block_off_end > to || block_off_start < from)) { - void *kaddr; - - kaddr = kmap_atomic(page, KM_USER0); - if (block_off_end > to) - memset(kaddr + to, 0, block_off_end - to); - if (block_off_start < from) - memset(kaddr + block_off_start, 0, - from - block_off_start); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - } - if ((em->block_start != EXTENT_MAP_HOLE && - em->block_start != EXTENT_MAP_INLINE) && - !isnew && !PageUptodate(page) && - (block_off_end > to || block_off_start < from) && - !test_range_bit(tree, block_start, cur_end, - EXTENT_UPTODATE, 1, NULL)) { - u64 sector; - u64 extent_offset = block_start - em->start; - size_t iosize; - sector = (em->block_start + extent_offset) >> 9; - iosize = (cur_end - block_start + blocksize) & - ~((u64)blocksize - 1); - /* - * we've already got the extent locked, but we - * need to split the state such that our end_bio - * handler can clear the lock. - */ - set_extent_bit(tree, block_start, - block_start + iosize - 1, - EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS); - ret = submit_extent_page(READ, tree, page, - sector, iosize, pg_offset, em->bdev, - NULL, 1, - end_bio_extent_preparewrite, 0, - 0, 0); - if (ret && !err) - err = ret; - iocount++; - block_start = block_start + iosize; - } else { - struct extent_state *cached = NULL; - - set_extent_uptodate(tree, block_start, cur_end, &cached, - GFP_NOFS); - unlock_extent_cached(tree, block_start, cur_end, - &cached, GFP_NOFS); - block_start = cur_end + 1; - } - pg_offset = block_start & (PAGE_CACHE_SIZE - 1); - free_extent_map(em); - } - if (iocount) { - wait_extent_bit(tree, orig_block_start, - block_end, EXTENT_LOCKED); - } - check_page_uptodate(tree, page); -err: - /* FIXME, zero out newly allocated blocks on error */ - return err; -} - /* * a helper for releasepage, this tests for areas of the page that * are locked or under IO and drops the related state bits if it is safe @@ -2927,33 +2734,6 @@ int try_release_extent_mapping(struct extent_map_tree *map, return try_release_extent_state(map, tree, page, mask); } -sector_t extent_bmap(struct address_space *mapping, sector_t iblock, - get_extent_t *get_extent) -{ - struct inode *inode = mapping->host; - struct extent_state *cached_state = NULL; - u64 start = iblock << inode->i_blkbits; - sector_t sector = 0; - size_t blksize = (1 << inode->i_blkbits); - struct extent_map *em; - - lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, - 0, &cached_state, GFP_NOFS); - em = get_extent(inode, NULL, 0, start, blksize, 0); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, - start + blksize - 1, &cached_state, GFP_NOFS); - if (IS_ERR_OR_NULL(em)) - return 0; - - if (em->block_start > EXTENT_MAP_LAST_BYTE) - goto out; - - sector = (em->block_start + start - em->start) >> inode->i_blkbits; -out: - free_extent_map(em); - return sector; -} - /* * helper function for fiemap, which doesn't want to see any holes. * This maps until we find something past 'last' @@ -3437,13 +3217,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, return 0; } -int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, - struct extent_buffer *eb) -{ - return wait_on_extent_writeback(tree, eb->start, - eb->start + eb->len - 1); -} - int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index d1c5a57c9984..4e8445a4757c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -153,15 +153,6 @@ static inline int extent_compress_type(unsigned long bio_flags) struct extent_map_tree; -static inline struct extent_state *extent_state_next(struct extent_state *state) -{ - struct rb_node *node; - node = rb_next(&state->rb_node); - if (!node) - return NULL; - return rb_entry(node, struct extent_state, rb_node); -} - typedef struct extent_map *(get_extent_t)(struct inode *inode, struct page *page, size_t pg_offset, @@ -237,17 +228,8 @@ int extent_readpages(struct extent_io_tree *tree, struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_extent_t get_extent); -int extent_prepare_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to, get_extent_t *get_extent); -int extent_commit_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to); -sector_t extent_bmap(struct address_space *mapping, sector_t iblock, - get_extent_t *get_extent); int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent); -int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end); int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); void set_page_extent_mapped(struct page *page); @@ -284,9 +266,6 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_offset, unsigned long len); void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len); -int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, - struct extent_buffer *eb); -int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end); int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 48fafcb85b0e..0290b0c7b003 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1685,21 +1685,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_free_space *info; - struct rb_node *n; - u64 ret = 0; - - for (n = rb_first(&block_group->free_space_offset); n; - n = rb_next(n)) { - info = rb_entry(n, struct btrfs_free_space, offset_index); - ret += info->bytes; - } - - return ret; -} - /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 65c3b935289f..12b2b5165f8a 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -55,7 +55,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_group_cache *block_group, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2840989737b7..57122a5e8473 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7185,58 +7185,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) return 0; } -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, - int sync) -{ - struct btrfs_inode *binode; - struct inode *inode = NULL; - - spin_lock(&root->fs_info->delalloc_lock); - while (!list_empty(&root->fs_info->delalloc_inodes)) { - binode = list_entry(root->fs_info->delalloc_inodes.next, - struct btrfs_inode, delalloc_inodes); - inode = igrab(&binode->vfs_inode); - if (inode) { - list_move_tail(&binode->delalloc_inodes, - &root->fs_info->delalloc_inodes); - break; - } - - list_del_init(&binode->delalloc_inodes); - cond_resched_lock(&root->fs_info->delalloc_lock); - } - spin_unlock(&root->fs_info->delalloc_lock); - - if (inode) { - if (sync) { - filemap_write_and_wait(inode->i_mapping); - /* - * We have to do this because compression doesn't - * actually set PG_writeback until it submits the pages - * for IO, which happens in an async thread, so we could - * race and not actually wait for any writeback pages - * because they've not been submitted yet. Technically - * this could still be the case for the ordered stuff - * since the async thread may not have started to do its - * work yet. If this becomes the case then we need to - * figure out a way to make sure that in writepage we - * wait for any async pages to be submitted before - * returning so that fdatawait does what its supposed to - * do. - */ - btrfs_wait_ordered_range(inode, 0, (u64)-1); - } else { - filemap_flush(inode->i_mapping); - } - if (delay_iput) - btrfs_add_delayed_iput(inode); - else - iput(inode); - return 1; - } - return 0; -} - static int btrfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 6151f2ea38bb..66fa43dc3f0f 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -185,31 +185,6 @@ sleep: return 0; } -/* - * Very quick trylock, this does not spin or schedule. It returns - * 1 with the spinlock held if it was able to take the lock, or it - * returns zero if it was unable to take the lock. - * - * After this call, scheduling is not safe without first calling - * btrfs_set_lock_blocking() - */ -int btrfs_try_tree_lock(struct extent_buffer *eb) -{ - if (spin_trylock(&eb->lock)) { - if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { - /* - * we've got the spinlock, but the real owner is - * blocking. Drop the spinlock and return failure - */ - spin_unlock(&eb->lock); - return 0; - } - return 1; - } - /* someone else has the spinlock giveup */ - return 0; -} - int btrfs_tree_unlock(struct extent_buffer *eb) { /* diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 6c4ce457168c..5c33a560a2f1 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -21,8 +21,6 @@ int btrfs_tree_lock(struct extent_buffer *eb); int btrfs_tree_unlock(struct extent_buffer *eb); - -int btrfs_try_tree_lock(struct extent_buffer *eb); int btrfs_try_spin_lock(struct extent_buffer *eb); void btrfs_set_lock_blocking(struct extent_buffer *eb); diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index a97314cf6bd6..82d569cb6267 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -23,56 +23,6 @@ #include "ref-cache.h" #include "transaction.h" -/* - * leaf refs are used to cache the information about which extents - * a given leaf has references on. This allows us to process that leaf - * in btrfs_drop_snapshot without needing to read it back from disk. - */ - -/* - * kmalloc a leaf reference struct and update the counters for the - * total ref cache size - */ -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, - int nr_extents) -{ - struct btrfs_leaf_ref *ref; - size_t size = btrfs_leaf_ref_size(nr_extents); - - ref = kmalloc(size, GFP_NOFS); - if (ref) { - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size += size; - spin_unlock(&root->fs_info->ref_cache_lock); - - memset(ref, 0, sizeof(*ref)); - atomic_set(&ref->usage, 1); - INIT_LIST_HEAD(&ref->list); - } - return ref; -} - -/* - * free a leaf reference struct and update the counters for the - * total ref cache size - */ -void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) -{ - if (!ref) - return; - WARN_ON(atomic_read(&ref->usage) == 0); - if (atomic_dec_and_test(&ref->usage)) { - size_t size = btrfs_leaf_ref_size(ref->nritems); - - BUG_ON(ref->in_tree); - kfree(ref); - - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size -= size; - spin_unlock(&root->fs_info->ref_cache_lock); - } -} - static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, struct rb_node *node) { @@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) } return NULL; } - -int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, - int shared) -{ - struct btrfs_leaf_ref *ref = NULL; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; - - if (shared) - tree = &root->fs_info->shared_ref_tree; - if (!tree) - return 0; - - spin_lock(&tree->lock); - while (!list_empty(&tree->list)) { - ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); - BUG_ON(ref->tree != tree); - if (ref->root_gen > max_root_gen) - break; - if (!xchg(&ref->in_tree, 0)) { - cond_resched_lock(&tree->lock); - continue; - } - - rb_erase(&ref->rb_node, &tree->root); - list_del_init(&ref->list); - - spin_unlock(&tree->lock); - btrfs_free_leaf_ref(root, ref); - cond_resched(); - spin_lock(&tree->lock); - } - spin_unlock(&tree->lock); - return 0; -} - -/* - * find the leaf ref for a given extent. This returns the ref struct with - * a usage reference incremented - */ -struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, - u64 bytenr) -{ - struct rb_node *rb; - struct btrfs_leaf_ref *ref = NULL; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; -again: - if (tree) { - spin_lock(&tree->lock); - rb = tree_search(&tree->root, bytenr); - if (rb) - ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); - if (ref) - atomic_inc(&ref->usage); - spin_unlock(&tree->lock); - if (ref) - return ref; - } - if (tree != &root->fs_info->shared_ref_tree) { - tree = &root->fs_info->shared_ref_tree; - goto again; - } - return NULL; -} - -/* - * add a fully filled in leaf ref struct - * remove all the refs older than a given root generation - */ -int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, - int shared) -{ - int ret = 0; - struct rb_node *rb; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; - - if (shared) - tree = &root->fs_info->shared_ref_tree; - - spin_lock(&tree->lock); - rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node); - if (rb) { - ret = -EEXIST; - } else { - atomic_inc(&ref->usage); - ref->tree = tree; - ref->in_tree = 1; - list_add_tail(&ref->list, &tree->list); - } - spin_unlock(&tree->lock); - return ret; -} - -/* - * remove a single leaf ref from the tree. This drops the ref held by the tree - * only - */ -int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) -{ - struct btrfs_leaf_ref_tree *tree; - - if (!xchg(&ref->in_tree, 0)) - return 0; - - tree = ref->tree; - spin_lock(&tree->lock); - - rb_erase(&ref->rb_node, &tree->root); - list_del_init(&ref->list); - - spin_unlock(&tree->lock); - - btrfs_free_leaf_ref(root, ref); - return 0; -} diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h index e2a55cb2072b..24f7001f6387 100644 --- a/fs/btrfs/ref-cache.h +++ b/fs/btrfs/ref-cache.h @@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents) return sizeof(struct btrfs_leaf_ref) + sizeof(struct btrfs_extent_info) * nr_extents; } - -static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree) -{ - tree->root = RB_ROOT; - INIT_LIST_HEAD(&tree->list); - spin_lock_init(&tree->lock); -} - -static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree) -{ - return RB_EMPTY_ROOT(&tree->root); -} - -void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree); -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, - int nr_extents); -void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); -struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, - u64 bytenr); -int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, - int shared); -int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, - int shared); -int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); #endif diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f7b799b151aa..f726e72dd362 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -507,6 +507,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans, return 1; } + static int should_ignore_root(struct btrfs_root *root) { struct btrfs_root *reloc_root; @@ -529,7 +530,6 @@ static int should_ignore_root(struct btrfs_root *root) */ return 1; } - /* * find reloc tree by address of tree root */ diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 59a94c1d9815..3bcfe5a7c330 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -21,53 +21,6 @@ #include "disk-io.h" #include "print-tree.h" -/* - * search forward for a root, starting with objectid 'search_start' - * if a root key is found, the objectid we find is filled into 'found_objectid' - * and 0 is returned. < 0 is returned on error, 1 if there is nothing - * left in the tree. - */ -int btrfs_search_root(struct btrfs_root *root, u64 search_start, - u64 *found_objectid) -{ - struct btrfs_path *path; - struct btrfs_key search_key; - int ret; - - root = root->fs_info->tree_root; - search_key.objectid = search_start; - search_key.type = (u8)-1; - search_key.offset = (u64)-1; - - path = btrfs_alloc_path(); - BUG_ON(!path); -again: - ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); - if (ret < 0) - goto out; - if (ret == 0) { - ret = 1; - goto out; - } - if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { - ret = btrfs_next_leaf(root, path); - if (ret) - goto out; - } - btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]); - if (search_key.type != BTRFS_ROOT_ITEM_KEY) { - search_key.offset++; - btrfs_release_path(path); - goto again; - } - ret = 0; - *found_objectid = search_key.objectid; - -out: - btrfs_free_path(path); - return ret; -} - /* * lookup the root with the highest offset for a given objectid. The key we do * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 4ce16ef702a3..ab9633fd72a4 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -189,71 +189,6 @@ static struct kobj_type btrfs_super_ktype = { /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; -int btrfs_sysfs_add_super(struct btrfs_fs_info *fs) -{ - int error; - char *name; - char c; - int len = strlen(fs->sb->s_id) + 1; - int i; - - name = kmalloc(len, GFP_NOFS); - if (!name) { - error = -ENOMEM; - goto fail; - } - - for (i = 0; i < len; i++) { - c = fs->sb->s_id[i]; - if (c == '/' || c == '\\') - c = '!'; - name[i] = c; - } - name[len] = '\0'; - - fs->super_kobj.kset = btrfs_kset; - error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype, - NULL, "%s", name); - kfree(name); - if (error) - goto fail; - - return 0; - -fail: - printk(KERN_ERR "btrfs: sysfs creation for super failed\n"); - return error; -} - -int btrfs_sysfs_add_root(struct btrfs_root *root) -{ - int error; - - error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype, - &root->fs_info->super_kobj, - "%s", root->name); - if (error) - goto fail; - - return 0; - -fail: - printk(KERN_ERR "btrfs: sysfs creation for root failed\n"); - return error; -} - -void btrfs_sysfs_del_root(struct btrfs_root *root) -{ - kobject_put(&root->root_kobj); - wait_for_completion(&root->kobj_unregister); -} - -void btrfs_sysfs_del_super(struct btrfs_fs_info *fs) -{ - kobject_put(&fs->super_kobj); - wait_for_completion(&fs->kobj_unregister); -} - int btrfs_init_sysfs(void) { btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e21130d3f98a..cd0b31a9ba3d 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -44,16 +44,6 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root); static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); -void btrfs_lock_volumes(void) -{ - mutex_lock(&uuid_mutex); -} - -void btrfs_unlock_volumes(void) -{ - mutex_unlock(&uuid_mutex); -} - static void lock_chunks(struct btrfs_root *root) { mutex_lock(&root->fs_info->chunk_mutex); @@ -3688,15 +3678,6 @@ static int read_one_dev(struct btrfs_root *root, return ret; } -int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) -{ - struct btrfs_dev_item *dev_item; - - dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, - dev_item); - return read_one_dev(root, buf, dev_item); -} - int btrfs_read_sys_array(struct btrfs_root *root) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 036b276b4860..5669ae8ea1c9 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -196,7 +196,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree); void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, int mirror_num, int async_submit); -int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder); int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, @@ -216,8 +215,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); int btrfs_init_new_device(struct btrfs_root *root, char *path); int btrfs_balance(struct btrfs_root *dev_root); -void btrfs_unlock_volumes(void); -void btrfs_lock_volumes(void); int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, -- cgit v1.2.3 From 182608c8294b5fe90d7bbd4b026c82bf0a24b736 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 May 2011 13:13:16 +0200 Subject: btrfs: remove old unused commented out code Remove code which has been #if0-ed out for a very long time and does not seem to be related to current codebase anymore. Signed-off-by: David Sterba --- fs/btrfs/delayed-ref.c | 76 --- fs/btrfs/disk-io.c | 29 - fs/btrfs/extent-tree.c | 1661 +----------------------------------------------- fs/btrfs/inode.c | 172 ----- fs/btrfs/transaction.c | 134 ---- 5 files changed, 1 insertion(+), 2071 deletions(-) diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index cb9b9a431fc9..125cf76fcd08 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -709,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) return btrfs_delayed_node_to_head(ref); return NULL; } - -/* - * add a delayed ref to the tree. This does all of the accounting required - * to make sure the delayed ref is eventually processed before this - * transaction commits. - * - * The main point of this call is to add and remove a backreference in a single - * shot, taking the lock only once, and only searching for the head node once. - * - * It is the same as doing a ref add and delete in two separate calls. - */ -#if 0 -int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, u64 orig_parent, - u64 parent, u64 orig_ref_root, u64 ref_root, - u64 orig_ref_generation, u64 ref_generation, - u64 owner_objectid, int pin) -{ - struct btrfs_delayed_ref *ref; - struct btrfs_delayed_ref *old_ref; - struct btrfs_delayed_ref_head *head_ref; - struct btrfs_delayed_ref_root *delayed_refs; - int ret; - - ref = kmalloc(sizeof(*ref), GFP_NOFS); - if (!ref) - return -ENOMEM; - - old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS); - if (!old_ref) { - kfree(ref); - return -ENOMEM; - } - - /* - * the parent = 0 case comes from cases where we don't actually - * know the parent yet. It will get updated later via a add/drop - * pair. - */ - if (parent == 0) - parent = bytenr; - if (orig_parent == 0) - orig_parent = bytenr; - - head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); - if (!head_ref) { - kfree(ref); - kfree(old_ref); - return -ENOMEM; - } - delayed_refs = &trans->transaction->delayed_refs; - spin_lock(&delayed_refs->lock); - - /* - * insert both the head node and the new ref without dropping - * the spin lock - */ - ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes, - (u64)-1, 0, 0, 0, - BTRFS_UPDATE_DELAYED_HEAD, 0); - BUG_ON(ret); - - ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes, - parent, ref_root, ref_generation, - owner_objectid, BTRFS_ADD_DELAYED_REF, 0); - BUG_ON(ret); - - ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes, - orig_parent, orig_ref_root, - orig_ref_generation, owner_objectid, - BTRFS_DROP_DELAYED_REF, pin); - BUG_ON(ret); - spin_unlock(&delayed_refs->lock); - return 0; -} -#endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fa287c551ffc..de7b4770ab17 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1348,35 +1348,6 @@ fail: return ERR_PTR(ret); } -#if 0 - struct btrfs_root *root; - int ret; - - root = btrfs_read_fs_root_no_name(fs_info, location); - if (!root) - return NULL; - - if (root->in_sysfs) - return root; - - ret = btrfs_set_root_name(root, name, namelen); - if (ret) { - free_extent_buffer(root->node); - kfree(root); - return ERR_PTR(ret); - } - - ret = btrfs_sysfs_add_root(root); - if (ret) { - free_extent_buffer(root->node); - kfree(root->name); - kfree(root); - return ERR_PTR(ret); - } - root->in_sysfs = 1; - return root; -#endif - static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index fba1348cb2a0..b457f195636e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2522,126 +2522,6 @@ out: return ret; } -#if 0 -int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct extent_buffer *buf, u32 nr_extents) -{ - struct btrfs_key key; - struct btrfs_file_extent_item *fi; - u64 root_gen; - u32 nritems; - int i; - int level; - int ret = 0; - int shared = 0; - - if (!root->ref_cows) - return 0; - - if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { - shared = 0; - root_gen = root->root_key.offset; - } else { - shared = 1; - root_gen = trans->transid - 1; - } - - level = btrfs_header_level(buf); - nritems = btrfs_header_nritems(buf); - - if (level == 0) { - struct btrfs_leaf_ref *ref; - struct btrfs_extent_info *info; - - ref = btrfs_alloc_leaf_ref(root, nr_extents); - if (!ref) { - ret = -ENOMEM; - goto out; - } - - ref->root_gen = root_gen; - ref->bytenr = buf->start; - ref->owner = btrfs_header_owner(buf); - ref->generation = btrfs_header_generation(buf); - ref->nritems = nr_extents; - info = ref->extents; - - for (i = 0; nr_extents > 0 && i < nritems; i++) { - u64 disk_bytenr; - btrfs_item_key_to_cpu(buf, &key, i); - if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(buf, i, - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(buf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); - if (disk_bytenr == 0) - continue; - - info->bytenr = disk_bytenr; - info->num_bytes = - btrfs_file_extent_disk_num_bytes(buf, fi); - info->objectid = key.objectid; - info->offset = key.offset; - info++; - } - - ret = btrfs_add_leaf_ref(root, ref, shared); - if (ret == -EEXIST && shared) { - struct btrfs_leaf_ref *old; - old = btrfs_lookup_leaf_ref(root, ref->bytenr); - BUG_ON(!old); - btrfs_remove_leaf_ref(root, old); - btrfs_free_leaf_ref(root, old); - ret = btrfs_add_leaf_ref(root, ref, shared); - } - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } -out: - return ret; -} - -/* when a block goes through cow, we update the reference counts of - * everything that block points to. The internal pointers of the block - * can be in just about any order, and it is likely to have clusters of - * things that are close together and clusters of things that are not. - * - * To help reduce the seeks that come with updating all of these reference - * counts, sort them by byte number before actual updates are done. - * - * struct refsort is used to match byte number to slot in the btree block. - * we sort based on the byte number and then use the slot to actually - * find the item. - * - * struct refsort is smaller than strcut btrfs_item and smaller than - * struct btrfs_key_ptr. Since we're currently limited to the page size - * for a btree block, there's no way for a kmalloc of refsorts for a - * single node to be bigger than a page. - */ -struct refsort { - u64 bytenr; - u32 slot; -}; - -/* - * for passing into sort() - */ -static int refsort_cmp(const void *a_void, const void *b_void) -{ - const struct refsort *a = a_void; - const struct refsort *b = b_void; - - if (a->bytenr < b->bytenr) - return -1; - if (a->bytenr > b->bytenr) - return 1; - return 0; -} -#endif - static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -3223,18 +3103,6 @@ commit_trans: goto again; } -#if 0 /* I hope we never need this code again, just in case */ - printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " - "%llu bytes_reserved, " "%llu bytes_pinned, " - "%llu bytes_readonly, %llu may use %llu total\n", - (unsigned long long)bytes, - (unsigned long long)data_sinfo->bytes_used, - (unsigned long long)data_sinfo->bytes_reserved, - (unsigned long long)data_sinfo->bytes_pinned, - (unsigned long long)data_sinfo->bytes_readonly, - (unsigned long long)data_sinfo->bytes_may_use, - (unsigned long long)data_sinfo->total_bytes); -#endif return -ENOSPC; } data_sinfo->bytes_may_use += bytes; @@ -3867,23 +3735,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) u64 meta_used; u64 data_used; int csum_size = btrfs_super_csum_size(&fs_info->super_copy); -#if 0 - /* - * per tree used space accounting can be inaccuracy, so we - * can't rely on it. - */ - spin_lock(&fs_info->extent_root->accounting_lock); - num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); - spin_unlock(&fs_info->extent_root->accounting_lock); - spin_lock(&fs_info->csum_root->accounting_lock); - num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); - spin_unlock(&fs_info->csum_root->accounting_lock); - - spin_lock(&fs_info->tree_root->accounting_lock); - num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); - spin_unlock(&fs_info->tree_root->accounting_lock); -#endif sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); spin_lock(&sinfo->lock); data_used = sinfo->bytes_used; @@ -3936,10 +3788,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->reserved = block_rsv->size; block_rsv->full = 1; } -#if 0 - printk(KERN_INFO"global block rsv size %llu reserved %llu\n", - block_rsv->size, block_rsv->reserved); -#endif + spin_unlock(&sinfo->lock); spin_unlock(&block_rsv->lock); } @@ -6596,1514 +6445,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, return ret; } -#if 0 -static unsigned long calc_ra(unsigned long start, unsigned long last, - unsigned long nr) -{ - return min(last, start + nr - 1); -} - -static noinline int relocate_inode_pages(struct inode *inode, u64 start, - u64 len) -{ - u64 page_start; - u64 page_end; - unsigned long first_index; - unsigned long last_index; - unsigned long i; - struct page *page; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct file_ra_state *ra; - struct btrfs_ordered_extent *ordered; - unsigned int total_read = 0; - unsigned int total_dirty = 0; - int ret = 0; - - ra = kzalloc(sizeof(*ra), GFP_NOFS); - if (!ra) - return -ENOMEM; - - mutex_lock(&inode->i_mutex); - first_index = start >> PAGE_CACHE_SHIFT; - last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; - - /* make sure the dirty trick played by the caller work */ - ret = invalidate_inode_pages2_range(inode->i_mapping, - first_index, last_index); - if (ret) - goto out_unlock; - - file_ra_state_init(ra, inode->i_mapping); - - for (i = first_index ; i <= last_index; i++) { - if (total_read % ra->ra_pages == 0) { - btrfs_force_ra(inode->i_mapping, ra, NULL, i, - calc_ra(i, last_index, ra->ra_pages)); - } - total_read++; -again: - if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode)) - BUG_ON(1); - page = grab_cache_page(inode->i_mapping, i); - if (!page) { - ret = -ENOMEM; - goto out_unlock; - } - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - page_cache_release(page); - ret = -EIO; - goto out_unlock; - } - } - wait_on_page_writeback(page); - - page_start = (u64)page->index << PAGE_CACHE_SHIFT; - page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); - - ordered = btrfs_lookup_ordered_extent(inode, page_start); - if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - goto again; - } - set_page_extent_mapped(page); - - if (i == first_index) - set_extent_bits(io_tree, page_start, page_end, - EXTENT_BOUNDARY, GFP_NOFS); - btrfs_set_extent_delalloc(inode, page_start, page_end); - - set_page_dirty(page); - total_dirty++; - - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - } - -out_unlock: - kfree(ra); - mutex_unlock(&inode->i_mutex); - balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty); - return ret; -} - -static noinline int relocate_data_extent(struct inode *reloc_inode, - struct btrfs_key *extent_key, - u64 offset) -{ - struct btrfs_root *root = BTRFS_I(reloc_inode)->root; - struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree; - struct extent_map *em; - u64 start = extent_key->objectid - offset; - u64 end = start + extent_key->offset - 1; - - em = alloc_extent_map(); - BUG_ON(!em); - - em->start = start; - em->len = extent_key->offset; - em->block_len = extent_key->offset; - em->block_start = extent_key->objectid; - em->bdev = root->fs_info->fs_devices->latest_bdev; - set_bit(EXTENT_FLAG_PINNED, &em->flags); - - /* setup extent map to cheat btrfs_readpage */ - lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); - while (1) { - int ret; - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(reloc_inode, start, end, 0); - } - unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); - - return relocate_inode_pages(reloc_inode, start, extent_key->offset); -} - -struct btrfs_ref_path { - u64 extent_start; - u64 nodes[BTRFS_MAX_LEVEL]; - u64 root_objectid; - u64 root_generation; - u64 owner_objectid; - u32 num_refs; - int lowest_level; - int current_level; - int shared_level; - - struct btrfs_key node_keys[BTRFS_MAX_LEVEL]; - u64 new_nodes[BTRFS_MAX_LEVEL]; -}; - -struct disk_extent { - u64 ram_bytes; - u64 disk_bytenr; - u64 disk_num_bytes; - u64 offset; - u64 num_bytes; - u8 compression; - u8 encryption; - u16 other_encoding; -}; - -static int is_cowonly_root(u64 root_objectid) -{ - if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || - root_objectid == BTRFS_EXTENT_TREE_OBJECTID || - root_objectid == BTRFS_CHUNK_TREE_OBJECTID || - root_objectid == BTRFS_DEV_TREE_OBJECTID || - root_objectid == BTRFS_TREE_LOG_OBJECTID || - root_objectid == BTRFS_CSUM_TREE_OBJECTID) - return 1; - return 0; -} - -static noinline int __next_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path, - int first_time) -{ - struct extent_buffer *leaf; - struct btrfs_path *path; - struct btrfs_extent_ref *ref; - struct btrfs_key key; - struct btrfs_key found_key; - u64 bytenr; - u32 nritems; - int level; - int ret = 1; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - if (first_time) { - ref_path->lowest_level = -1; - ref_path->current_level = -1; - ref_path->shared_level = -1; - goto walk_up; - } -walk_down: - level = ref_path->current_level - 1; - while (level >= -1) { - u64 parent; - if (level < ref_path->lowest_level) - break; - - if (level >= 0) - bytenr = ref_path->nodes[level]; - else - bytenr = ref_path->extent_start; - BUG_ON(bytenr == 0); - - parent = ref_path->nodes[level + 1]; - ref_path->nodes[level + 1] = 0; - ref_path->current_level = level; - BUG_ON(parent == 0); - - key.objectid = bytenr; - key.offset = parent + 1; - key.type = BTRFS_EXTENT_REF_KEY; - - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); - if (ret < 0) - goto out; - BUG_ON(ret == 0); - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(extent_root, path); - if (ret < 0) - goto out; - if (ret > 0) - goto next; - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid == bytenr && - found_key.type == BTRFS_EXTENT_REF_KEY) { - if (level < ref_path->shared_level) - ref_path->shared_level = level; - goto found; - } -next: - level--; - btrfs_release_path(extent_root, path); - cond_resched(); - } - /* reached lowest level */ - ret = 1; - goto out; -walk_up: - level = ref_path->current_level; - while (level < BTRFS_MAX_LEVEL - 1) { - u64 ref_objectid; - - if (level >= 0) - bytenr = ref_path->nodes[level]; - else - bytenr = ref_path->extent_start; - - BUG_ON(bytenr == 0); - - key.objectid = bytenr; - key.offset = 0; - key.type = BTRFS_EXTENT_REF_KEY; - - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); - if (ret < 0) - goto out; - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(extent_root, path); - if (ret < 0) - goto out; - if (ret > 0) { - /* the extent was freed by someone */ - if (ref_path->lowest_level == level) - goto out; - btrfs_release_path(extent_root, path); - goto walk_down; - } - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != bytenr || - found_key.type != BTRFS_EXTENT_REF_KEY) { - /* the extent was freed by someone */ - if (ref_path->lowest_level == level) { - ret = 1; - goto out; - } - btrfs_release_path(extent_root, path); - goto walk_down; - } -found: - ref = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_extent_ref); - ref_objectid = btrfs_ref_objectid(leaf, ref); - if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) { - if (first_time) { - level = (int)ref_objectid; - BUG_ON(level >= BTRFS_MAX_LEVEL); - ref_path->lowest_level = level; - ref_path->current_level = level; - ref_path->nodes[level] = bytenr; - } else { - WARN_ON(ref_objectid != level); - } - } else { - WARN_ON(level != -1); - } - first_time = 0; - - if (ref_path->lowest_level == level) { - ref_path->owner_objectid = ref_objectid; - ref_path->num_refs = btrfs_ref_num_refs(leaf, ref); - } - - /* - * the block is tree root or the block isn't in reference - * counted tree. - */ - if (found_key.objectid == found_key.offset || - is_cowonly_root(btrfs_ref_root(leaf, ref))) { - ref_path->root_objectid = btrfs_ref_root(leaf, ref); - ref_path->root_generation = - btrfs_ref_generation(leaf, ref); - if (level < 0) { - /* special reference from the tree log */ - ref_path->nodes[0] = found_key.offset; - ref_path->current_level = 0; - } - ret = 0; - goto out; - } - - level++; - BUG_ON(ref_path->nodes[level] != 0); - ref_path->nodes[level] = found_key.offset; - ref_path->current_level = level; - - /* - * the reference was created in the running transaction, - * no need to continue walking up. - */ - if (btrfs_ref_generation(leaf, ref) == trans->transid) { - ref_path->root_objectid = btrfs_ref_root(leaf, ref); - ref_path->root_generation = - btrfs_ref_generation(leaf, ref); - ret = 0; - goto out; - } - - btrfs_release_path(extent_root, path); - cond_resched(); - } - /* reached max tree level, but no tree root found. */ - BUG(); -out: - btrfs_free_path(path); - return ret; -} - -static int btrfs_first_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path, - u64 extent_start) -{ - memset(ref_path, 0, sizeof(*ref_path)); - ref_path->extent_start = extent_start; - - return __next_ref_path(trans, extent_root, ref_path, 1); -} - -static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path) -{ - return __next_ref_path(trans, extent_root, ref_path, 0); -} - -static noinline int get_new_locations(struct inode *reloc_inode, - struct btrfs_key *extent_key, - u64 offset, int no_fragment, - struct disk_extent **extents, - int *nr_extents) -{ - struct btrfs_root *root = BTRFS_I(reloc_inode)->root; - struct btrfs_path *path; - struct btrfs_file_extent_item *fi; - struct extent_buffer *leaf; - struct disk_extent *exts = *extents; - struct btrfs_key found_key; - u64 cur_pos; - u64 last_byte; - u32 nritems; - int nr = 0; - int max = *nr_extents; - int ret; - - WARN_ON(!no_fragment && *extents); - if (!exts) { - max = 1; - exts = kmalloc(sizeof(*exts) * max, GFP_NOFS); - if (!exts) - return -ENOMEM; - } - - path = btrfs_alloc_path(); - if (!path) { - if (exts != *extents) - kfree(exts); - return -ENOMEM; - } - - cur_pos = extent_key->objectid - offset; - last_byte = extent_key->objectid + extent_key->offset; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, - cur_pos, 0); - if (ret < 0) - goto out; - if (ret > 0) { - ret = -ENOENT; - goto out; - } - - while (1) { - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.offset != cur_pos || - found_key.type != BTRFS_EXTENT_DATA_KEY || - found_key.objectid != reloc_inode->i_ino) - break; - - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) != - BTRFS_FILE_EXTENT_REG || - btrfs_file_extent_disk_bytenr(leaf, fi) == 0) - break; - - if (nr == max) { - struct disk_extent *old = exts; - max *= 2; - exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); - if (!exts) { - ret = -ENOMEM; - goto out; - } - memcpy(exts, old, sizeof(*exts) * nr); - if (old != *extents) - kfree(old); - } - - exts[nr].disk_bytenr = - btrfs_file_extent_disk_bytenr(leaf, fi); - exts[nr].disk_num_bytes = - btrfs_file_extent_disk_num_bytes(leaf, fi); - exts[nr].offset = btrfs_file_extent_offset(leaf, fi); - exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); - exts[nr].compression = btrfs_file_extent_compression(leaf, fi); - exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); - exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, - fi); - BUG_ON(exts[nr].offset > 0); - BUG_ON(exts[nr].compression || exts[nr].encryption); - BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); - - cur_pos += exts[nr].num_bytes; - nr++; - - if (cur_pos + offset >= last_byte) - break; - - if (no_fragment) { - ret = 1; - goto out; - } - path->slots[0]++; - } - - BUG_ON(cur_pos + offset > last_byte); - if (cur_pos + offset < last_byte) { - ret = -ENOENT; - goto out; - } - ret = 0; -out: - btrfs_free_path(path); - if (ret) { - if (exts != *extents) - kfree(exts); - } else { - *extents = exts; - *nr_extents = nr; - } - return ret; -} - -static noinline int replace_one_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *extent_key, - struct btrfs_key *leaf_key, - struct btrfs_ref_path *ref_path, - struct disk_extent *new_extents, - int nr_extents) -{ - struct extent_buffer *leaf; - struct btrfs_file_extent_item *fi; - struct inode *inode = NULL; - struct btrfs_key key; - u64 lock_start = 0; - u64 lock_end = 0; - u64 num_bytes; - u64 ext_offset; - u64 search_end = (u64)-1; - u32 nritems; - int nr_scaned = 0; - int extent_locked = 0; - int extent_type; - int ret; - - memcpy(&key, leaf_key, sizeof(key)); - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { - if (key.objectid < ref_path->owner_objectid || - (key.objectid == ref_path->owner_objectid && - key.type < BTRFS_EXTENT_DATA_KEY)) { - key.objectid = ref_path->owner_objectid; - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = 0; - } - } - - while (1) { - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); - if (ret < 0) - goto out; - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); -next: - if (extent_locked && ret > 0) { - /* - * the file extent item was modified by someone - * before the extent got locked. - */ - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - extent_locked = 0; - } - - if (path->slots[0] >= nritems) { - if (++nr_scaned > 2) - break; - - BUG_ON(extent_locked); - ret = btrfs_next_leaf(root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - } - - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { - if ((key.objectid > ref_path->owner_objectid) || - (key.objectid == ref_path->owner_objectid && - key.type > BTRFS_EXTENT_DATA_KEY) || - key.offset >= search_end) - break; - } - - if (inode && key.objectid != inode->i_ino) { - BUG_ON(extent_locked); - btrfs_release_path(root, path); - mutex_unlock(&inode->i_mutex); - iput(inode); - inode = NULL; - continue; - } - - if (key.type != BTRFS_EXTENT_DATA_KEY) { - path->slots[0]++; - ret = 1; - goto next; - } - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - extent_type = btrfs_file_extent_type(leaf, fi); - if ((extent_type != BTRFS_FILE_EXTENT_REG && - extent_type != BTRFS_FILE_EXTENT_PREALLOC) || - (btrfs_file_extent_disk_bytenr(leaf, fi) != - extent_key->objectid)) { - path->slots[0]++; - ret = 1; - goto next; - } - - num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - ext_offset = btrfs_file_extent_offset(leaf, fi); - - if (search_end == (u64)-1) { - search_end = key.offset - ext_offset + - btrfs_file_extent_ram_bytes(leaf, fi); - } - - if (!extent_locked) { - lock_start = key.offset; - lock_end = lock_start + num_bytes - 1; - } else { - if (lock_start > key.offset || - lock_end + 1 < key.offset + num_bytes) { - unlock_extent(&BTRFS_I(inode)->io_tree, - lock_start, lock_end, GFP_NOFS); - extent_locked = 0; - } - } - - if (!inode) { - btrfs_release_path(root, path); - - inode = btrfs_iget_locked(root->fs_info->sb, - key.objectid, root); - if (inode->i_state & I_NEW) { - BTRFS_I(inode)->root = root; - BTRFS_I(inode)->location.objectid = - key.objectid; - BTRFS_I(inode)->location.type = - BTRFS_INODE_ITEM_KEY; - BTRFS_I(inode)->location.offset = 0; - btrfs_read_locked_inode(inode); - unlock_new_inode(inode); - } - /* - * some code call btrfs_commit_transaction while - * holding the i_mutex, so we can't use mutex_lock - * here. - */ - if (is_bad_inode(inode) || - !mutex_trylock(&inode->i_mutex)) { - iput(inode); - inode = NULL; - key.offset = (u64)-1; - goto skip; - } - } - - if (!extent_locked) { - struct btrfs_ordered_extent *ordered; - - btrfs_release_path(root, path); - - lock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, - lock_end); - if (ordered && - ordered->file_offset <= lock_end && - ordered->file_offset + ordered->len > lock_start) { - unlock_extent(&BTRFS_I(inode)->io_tree, - lock_start, lock_end, GFP_NOFS); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - key.offset += num_bytes; - goto skip; - } - if (ordered) - btrfs_put_ordered_extent(ordered); - - extent_locked = 1; - continue; - } - - if (nr_extents == 1) { - /* update extent pointer in place */ - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extents[0].disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extents[0].disk_num_bytes); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, key.offset, - key.offset + num_bytes - 1, 0); - - ret = btrfs_inc_extent_ref(trans, root, - new_extents[0].disk_bytenr, - new_extents[0].disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, - key.objectid); - BUG_ON(ret); - - ret = btrfs_free_extent(trans, root, - extent_key->objectid, - extent_key->offset, - leaf->start, - btrfs_header_owner(leaf), - btrfs_header_generation(leaf), - key.objectid, 0); - BUG_ON(ret); - - btrfs_release_path(root, path); - key.offset += num_bytes; - } else { - BUG_ON(1); -#if 0 - u64 alloc_hint; - u64 extent_len; - int i; - /* - * drop old extent pointer at first, then insert the - * new pointers one bye one - */ - btrfs_release_path(root, path); - ret = btrfs_drop_extents(trans, root, inode, key.offset, - key.offset + num_bytes, - key.offset, &alloc_hint); - BUG_ON(ret); - - for (i = 0; i < nr_extents; i++) { - if (ext_offset >= new_extents[i].num_bytes) { - ext_offset -= new_extents[i].num_bytes; - continue; - } - extent_len = min(new_extents[i].num_bytes - - ext_offset, num_bytes); - - ret = btrfs_insert_empty_item(trans, root, - path, &key, - sizeof(*fi)); - BUG_ON(ret); - - leaf = path->nodes[0]; - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, fi, - trans->transid); - btrfs_set_file_extent_type(leaf, fi, - BTRFS_FILE_EXTENT_REG); - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extents[i].disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extents[i].disk_num_bytes); - btrfs_set_file_extent_ram_bytes(leaf, fi, - new_extents[i].ram_bytes); - - btrfs_set_file_extent_compression(leaf, fi, - new_extents[i].compression); - btrfs_set_file_extent_encryption(leaf, fi, - new_extents[i].encryption); - btrfs_set_file_extent_other_encoding(leaf, fi, - new_extents[i].other_encoding); - - btrfs_set_file_extent_num_bytes(leaf, fi, - extent_len); - ext_offset += new_extents[i].offset; - btrfs_set_file_extent_offset(leaf, fi, - ext_offset); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, key.offset, - key.offset + extent_len - 1, 0); - - ret = btrfs_inc_extent_ref(trans, root, - new_extents[i].disk_bytenr, - new_extents[i].disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, key.objectid); - BUG_ON(ret); - btrfs_release_path(root, path); - - inode_add_bytes(inode, extent_len); - - ext_offset = 0; - num_bytes -= extent_len; - key.offset += extent_len; - - if (num_bytes == 0) - break; - } - BUG_ON(i >= nr_extents); -#endif - } - - if (extent_locked) { - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - extent_locked = 0; - } -skip: - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && - key.offset >= search_end) - break; - - cond_resched(); - } - ret = 0; -out: - btrfs_release_path(root, path); - if (inode) { - mutex_unlock(&inode->i_mutex); - if (extent_locked) { - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - } - iput(inode); - } - return ret; -} - -int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *buf, u64 orig_start) -{ - int level; - int ret; - - BUG_ON(btrfs_header_generation(buf) != trans->transid); - BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); - - level = btrfs_header_level(buf); - if (level == 0) { - struct btrfs_leaf_ref *ref; - struct btrfs_leaf_ref *orig_ref; - - orig_ref = btrfs_lookup_leaf_ref(root, orig_start); - if (!orig_ref) - return -ENOENT; - - ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems); - if (!ref) { - btrfs_free_leaf_ref(root, orig_ref); - return -ENOMEM; - } - - ref->nritems = orig_ref->nritems; - memcpy(ref->extents, orig_ref->extents, - sizeof(ref->extents[0]) * ref->nritems); - - btrfs_free_leaf_ref(root, orig_ref); - - ref->root_gen = trans->transid; - ref->bytenr = buf->start; - ref->owner = btrfs_header_owner(buf); - ref->generation = btrfs_header_generation(buf); - - ret = btrfs_add_leaf_ref(root, ref, 0); - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } - return 0; -} - -static noinline int invalidate_extent_cache(struct btrfs_root *root, - struct extent_buffer *leaf, - struct btrfs_block_group_cache *group, - struct btrfs_root *target_root) -{ - struct btrfs_key key; - struct inode *inode = NULL; - struct btrfs_file_extent_item *fi; - struct extent_state *cached_state = NULL; - u64 num_bytes; - u64 skip_objectid = 0; - u32 nritems; - u32 i; - - nritems = btrfs_header_nritems(leaf); - for (i = 0; i < nritems; i++) { - btrfs_item_key_to_cpu(leaf, &key, i); - if (key.objectid == skip_objectid || - key.type != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) - continue; - if (!inode || inode->i_ino != key.objectid) { - iput(inode); - inode = btrfs_ilookup(target_root->fs_info->sb, - key.objectid, target_root, 1); - } - if (!inode) { - skip_objectid = key.objectid; - continue; - } - num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - - lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, 0, &cached_state, - GFP_NOFS); - btrfs_drop_extent_cache(inode, key.offset, - key.offset + num_bytes - 1, 1); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, &cached_state, - GFP_NOFS); - cond_resched(); - } - iput(inode); - return 0; -} - -static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *leaf, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode) -{ - struct btrfs_key key; - struct btrfs_key extent_key; - struct btrfs_file_extent_item *fi; - struct btrfs_leaf_ref *ref; - struct disk_extent *new_extent; - u64 bytenr; - u64 num_bytes; - u32 nritems; - u32 i; - int ext_index; - int nr_extent; - int ret; - - new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); - if (!new_extent) - return -ENOMEM; - - ref = btrfs_lookup_leaf_ref(root, leaf->start); - BUG_ON(!ref); - - ext_index = -1; - nritems = btrfs_header_nritems(leaf); - for (i = 0; i < nritems; i++) { - btrfs_item_key_to_cpu(leaf, &key, i); - if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); - num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); - if (bytenr == 0) - continue; - - ext_index++; - if (bytenr >= group->key.objectid + group->key.offset || - bytenr + num_bytes <= group->key.objectid) - continue; - - extent_key.objectid = bytenr; - extent_key.offset = num_bytes; - extent_key.type = BTRFS_EXTENT_ITEM_KEY; - nr_extent = 1; - ret = get_new_locations(reloc_inode, &extent_key, - group->key.objectid, 1, - &new_extent, &nr_extent); - if (ret > 0) - continue; - BUG_ON(ret < 0); - - BUG_ON(ref->extents[ext_index].bytenr != bytenr); - BUG_ON(ref->extents[ext_index].num_bytes != num_bytes); - ref->extents[ext_index].bytenr = new_extent->disk_bytenr; - ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; - - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extent->disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extent->disk_num_bytes); - btrfs_mark_buffer_dirty(leaf); - - ret = btrfs_inc_extent_ref(trans, root, - new_extent->disk_bytenr, - new_extent->disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, key.objectid); - BUG_ON(ret); - - ret = btrfs_free_extent(trans, root, - bytenr, num_bytes, leaf->start, - btrfs_header_owner(leaf), - btrfs_header_generation(leaf), - key.objectid, 0); - BUG_ON(ret); - cond_resched(); - } - kfree(new_extent); - BUG_ON(ext_index + 1 != ref->nritems); - btrfs_free_leaf_ref(root, ref); - return 0; -} - -int btrfs_free_reloc_root(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - int ret; - - if (root->reloc_root) { - reloc_root = root->reloc_root; - root->reloc_root = NULL; - list_add(&reloc_root->dead_list, - &root->fs_info->dead_reloc_roots); - - btrfs_set_root_bytenr(&reloc_root->root_item, - reloc_root->node->start); - btrfs_set_root_level(&root->root_item, - btrfs_header_level(reloc_root->node)); - memset(&reloc_root->root_item.drop_progress, 0, - sizeof(struct btrfs_disk_key)); - reloc_root->root_item.drop_level = 0; - - ret = btrfs_update_root(trans, root->fs_info->tree_root, - &reloc_root->root_key, - &reloc_root->root_item); - BUG_ON(ret); - } - return 0; -} - -int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *reloc_root; - struct btrfs_root *prev_root = NULL; - struct list_head dead_roots; - int ret; - unsigned long nr; - - INIT_LIST_HEAD(&dead_roots); - list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots); - - while (!list_empty(&dead_roots)) { - reloc_root = list_entry(dead_roots.prev, - struct btrfs_root, dead_list); - list_del_init(&reloc_root->dead_list); - - BUG_ON(reloc_root->commit_root != NULL); - while (1) { - trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - - mutex_lock(&root->fs_info->drop_mutex); - ret = btrfs_drop_snapshot(trans, reloc_root); - if (ret != -EAGAIN) - break; - mutex_unlock(&root->fs_info->drop_mutex); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, root); - BUG_ON(ret); - btrfs_btree_balance_dirty(root, nr); - } - - free_extent_buffer(reloc_root->node); - - ret = btrfs_del_root(trans, root->fs_info->tree_root, - &reloc_root->root_key); - BUG_ON(ret); - mutex_unlock(&root->fs_info->drop_mutex); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, root); - BUG_ON(ret); - btrfs_btree_balance_dirty(root, nr); - - kfree(prev_root); - prev_root = reloc_root; - } - if (prev_root) { - btrfs_remove_leaf_refs(prev_root, (u64)-1, 0); - kfree(prev_root); - } - return 0; -} - -int btrfs_add_dead_reloc_root(struct btrfs_root *root) -{ - list_add(&root->dead_list, &root->fs_info->dead_reloc_roots); - return 0; -} - -int btrfs_cleanup_reloc_trees(struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - struct btrfs_trans_handle *trans; - struct btrfs_key location; - int found; - int ret; - - mutex_lock(&root->fs_info->tree_reloc_mutex); - ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL); - BUG_ON(ret); - found = !list_empty(&root->fs_info->dead_reloc_roots); - mutex_unlock(&root->fs_info->tree_reloc_mutex); - - if (found) { - trans = btrfs_start_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - ret = btrfs_commit_transaction(trans, root); - BUG_ON(ret); - } - - location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID; - location.offset = (u64)-1; - location.type = BTRFS_ROOT_ITEM_KEY; - - reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); - BUG_ON(!reloc_root); - ret = btrfs_orphan_cleanup(reloc_root); - BUG_ON(ret); - return 0; -} - -static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - struct extent_buffer *eb; - struct btrfs_root_item *root_item; - struct btrfs_key root_key; - int ret; - - BUG_ON(!root->ref_cows); - if (root->reloc_root) - return 0; - - root_item = kmalloc(sizeof(*root_item), GFP_NOFS); - if (!root_item) - return -ENOMEM; - - ret = btrfs_copy_root(trans, root, root->commit_root, - &eb, BTRFS_TREE_RELOC_OBJECTID); - BUG_ON(ret); - - root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; - root_key.offset = root->root_key.objectid; - root_key.type = BTRFS_ROOT_ITEM_KEY; - - memcpy(root_item, &root->root_item, sizeof(root_item)); - btrfs_set_root_refs(root_item, 0); - btrfs_set_root_bytenr(root_item, eb->start); - btrfs_set_root_level(root_item, btrfs_header_level(eb)); - btrfs_set_root_generation(root_item, trans->transid); - - btrfs_tree_unlock(eb); - free_extent_buffer(eb); - - ret = btrfs_insert_root(trans, root->fs_info->tree_root, - &root_key, root_item); - BUG_ON(ret); - kfree(root_item); - - reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, - &root_key); - BUG_ON(IS_ERR(reloc_root)); - reloc_root->last_trans = trans->transid; - reloc_root->commit_root = NULL; - reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; - - root->reloc_root = reloc_root; - return 0; -} - -/* - * Core function of space balance. - * - * The idea is using reloc trees to relocate tree blocks in reference - * counted roots. There is one reloc tree for each subvol, and all - * reloc trees share same root key objectid. Reloc trees are snapshots - * of the latest committed roots of subvols (root->commit_root). - * - * To relocate a tree block referenced by a subvol, there are two steps. - * COW the block through subvol's reloc tree, then update block pointer - * in the subvol to point to the new block. Since all reloc trees share - * same root key objectid, doing special handing for tree blocks owned - * by them is easy. Once a tree block has been COWed in one reloc tree, - * we can use the resulting new block directly when the same block is - * required to COW again through other reloc trees. By this way, relocated - * tree blocks are shared between reloc trees, so they are also shared - * between subvols. - */ -static noinline int relocate_one_path(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *first_key, - struct btrfs_ref_path *ref_path, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode) -{ - struct btrfs_root *reloc_root; - struct extent_buffer *eb = NULL; - struct btrfs_key *keys; - u64 *nodes; - int level; - int shared_level; - int lowest_level = 0; - int ret; - - if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) - lowest_level = ref_path->owner_objectid; - - if (!root->ref_cows) { - path->lowest_level = lowest_level; - ret = btrfs_search_slot(trans, root, first_key, path, 0, 1); - BUG_ON(ret < 0); - path->lowest_level = 0; - btrfs_release_path(root, path); - return 0; - } - - mutex_lock(&root->fs_info->tree_reloc_mutex); - ret = init_reloc_tree(trans, root); - BUG_ON(ret); - reloc_root = root->reloc_root; - - shared_level = ref_path->shared_level; - ref_path->shared_level = BTRFS_MAX_LEVEL - 1; - - keys = ref_path->node_keys; - nodes = ref_path->new_nodes; - memset(&keys[shared_level + 1], 0, - sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1)); - memset(&nodes[shared_level + 1], 0, - sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1)); - - if (nodes[lowest_level] == 0) { - path->lowest_level = lowest_level; - ret = btrfs_search_slot(trans, reloc_root, first_key, path, - 0, 1); - BUG_ON(ret); - for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) { - eb = path->nodes[level]; - if (!eb || eb == reloc_root->node) - break; - nodes[level] = eb->start; - if (level == 0) - btrfs_item_key_to_cpu(eb, &keys[level], 0); - else - btrfs_node_key_to_cpu(eb, &keys[level], 0); - } - if (nodes[0] && - ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - eb = path->nodes[0]; - ret = replace_extents_in_leaf(trans, reloc_root, eb, - group, reloc_inode); - BUG_ON(ret); - } - btrfs_release_path(reloc_root, path); - } else { - ret = btrfs_merge_path(trans, reloc_root, keys, nodes, - lowest_level); - BUG_ON(ret); - } - - /* - * replace tree blocks in the fs tree with tree blocks in - * the reloc tree. - */ - ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level); - BUG_ON(ret < 0); - - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - ret = btrfs_search_slot(trans, reloc_root, first_key, path, - 0, 0); - BUG_ON(ret); - extent_buffer_get(path->nodes[0]); - eb = path->nodes[0]; - btrfs_release_path(reloc_root, path); - ret = invalidate_extent_cache(reloc_root, eb, group, root); - BUG_ON(ret); - free_extent_buffer(eb); - } - - mutex_unlock(&root->fs_info->tree_reloc_mutex); - path->lowest_level = 0; - return 0; -} - -static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *first_key, - struct btrfs_ref_path *ref_path) -{ - int ret; - - ret = relocate_one_path(trans, root, path, first_key, - ref_path, NULL, NULL); - BUG_ON(ret); - - return 0; -} - -static noinline int del_extent_zero(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_path *path, - struct btrfs_key *extent_key) -{ - int ret; - - ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); - if (ret) - goto out; - ret = btrfs_del_item(trans, extent_root, path); -out: - btrfs_release_path(extent_root, path); - return ret; -} - -static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, - struct btrfs_ref_path *ref_path) -{ - struct btrfs_key root_key; - - root_key.objectid = ref_path->root_objectid; - root_key.type = BTRFS_ROOT_ITEM_KEY; - if (is_cowonly_root(ref_path->root_objectid)) - root_key.offset = 0; - else - root_key.offset = (u64)-1; - - return btrfs_read_fs_root_no_name(fs_info, &root_key); -} - -static noinline int relocate_one_extent(struct btrfs_root *extent_root, - struct btrfs_path *path, - struct btrfs_key *extent_key, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode, int pass) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *found_root; - struct btrfs_ref_path *ref_path = NULL; - struct disk_extent *new_extents = NULL; - int nr_extents = 0; - int loops; - int ret; - int level; - struct btrfs_key first_key; - u64 prev_block = 0; - - - trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(IS_ERR(trans)); - - if (extent_key->objectid == 0) { - ret = del_extent_zero(trans, extent_root, path, extent_key); - goto out; - } - - ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); - if (!ref_path) { - ret = -ENOMEM; - goto out; - } - - for (loops = 0; ; loops++) { - if (loops == 0) { - ret = btrfs_first_ref_path(trans, extent_root, ref_path, - extent_key->objectid); - } else { - ret = btrfs_next_ref_path(trans, extent_root, ref_path); - } - if (ret < 0) - goto out; - if (ret > 0) - break; - - if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID || - ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID) - continue; - - found_root = read_ref_root(extent_root->fs_info, ref_path); - BUG_ON(!found_root); - /* - * for reference counted tree, only process reference paths - * rooted at the latest committed root. - */ - if (found_root->ref_cows && - ref_path->root_generation != found_root->root_key.offset) - continue; - - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - if (pass == 0) { - /* - * copy data extents to new locations - */ - u64 group_start = group->key.objectid; - ret = relocate_data_extent(reloc_inode, - extent_key, - group_start); - if (ret < 0) - goto out; - break; - } - level = 0; - } else { - level = ref_path->owner_objectid; - } - - if (prev_block != ref_path->nodes[level]) { - struct extent_buffer *eb; - u64 block_start = ref_path->nodes[level]; - u64 block_size = btrfs_level_size(found_root, level); - - eb = read_tree_block(found_root, block_start, - block_size, 0); - if (!eb) { - ret = -EIO; - goto out; - } - btrfs_tree_lock(eb); - BUG_ON(level != btrfs_header_level(eb)); - - if (level == 0) - btrfs_item_key_to_cpu(eb, &first_key, 0); - else - btrfs_node_key_to_cpu(eb, &first_key, 0); - - btrfs_tree_unlock(eb); - free_extent_buffer(eb); - prev_block = block_start; - } - - mutex_lock(&extent_root->fs_info->trans_mutex); - btrfs_record_root_in_trans(found_root); - mutex_unlock(&extent_root->fs_info->trans_mutex); - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - /* - * try to update data extent references while - * keeping metadata shared between snapshots. - */ - if (pass == 1) { - ret = relocate_one_path(trans, found_root, - path, &first_key, ref_path, - group, reloc_inode); - if (ret < 0) - goto out; - continue; - } - /* - * use fallback method to process the remaining - * references. - */ - if (!new_extents) { - u64 group_start = group->key.objectid; - new_extents = kmalloc(sizeof(*new_extents), - GFP_NOFS); - if (!new_extents) { - ret = -ENOMEM; - goto out; - } - nr_extents = 1; - ret = get_new_locations(reloc_inode, - extent_key, - group_start, 1, - &new_extents, - &nr_extents); - if (ret) - goto out; - } - ret = replace_one_extent(trans, found_root, - path, extent_key, - &first_key, ref_path, - new_extents, nr_extents); - } else { - ret = relocate_tree_block(trans, found_root, path, - &first_key, ref_path); - } - if (ret < 0) - goto out; - } - ret = 0; -out: - btrfs_end_transaction(trans, extent_root); - kfree(new_extents); - kfree(ref_path); - return ret; -} -#endif - static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) { u64 num_devices; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 57122a5e8473..5ff52b644a60 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3093,178 +3093,6 @@ out: return err; } -#if 0 -/* - * when truncating bytes in a file, it is possible to avoid reading - * the leaves that contain only checksum items. This can be the - * majority of the IO required to delete a large file, but it must - * be done carefully. - * - * The keys in the level just above the leaves are checked to make sure - * the lowest key in a given leaf is a csum key, and starts at an offset - * after the new size. - * - * Then the key for the next leaf is checked to make sure it also has - * a checksum item for the same file. If it does, we know our target leaf - * contains only checksum items, and it can be safely freed without reading - * it. - * - * This is just an optimization targeted at large files. It may do - * nothing. It will return 0 unless things went badly. - */ -static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct inode *inode, u64 new_size) -{ - struct btrfs_key key; - int ret; - int nritems; - struct btrfs_key found_key; - struct btrfs_key other_key; - struct btrfs_leaf_ref *ref; - u64 leaf_gen; - u64 leaf_start; - - path->lowest_level = 1; - key.objectid = inode->i_ino; - key.type = BTRFS_CSUM_ITEM_KEY; - key.offset = new_size; -again: - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) - goto out; - - if (path->nodes[1] == NULL) { - ret = 0; - goto out; - } - ret = 0; - btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]); - nritems = btrfs_header_nritems(path->nodes[1]); - - if (!nritems) - goto out; - - if (path->slots[1] >= nritems) - goto next_node; - - /* did we find a key greater than anything we want to delete? */ - if (found_key.objectid > inode->i_ino || - (found_key.objectid == inode->i_ino && found_key.type > key.type)) - goto out; - - /* we check the next key in the node to make sure the leave contains - * only checksum items. This comparison doesn't work if our - * leaf is the last one in the node - */ - if (path->slots[1] + 1 >= nritems) { -next_node: - /* search forward from the last key in the node, this - * will bring us into the next node in the tree - */ - btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1); - - /* unlikely, but we inc below, so check to be safe */ - if (found_key.offset == (u64)-1) - goto out; - - /* search_forward needs a path with locks held, do the - * search again for the original key. It is possible - * this will race with a balance and return a path that - * we could modify, but this drop is just an optimization - * and is allowed to miss some leaves. - */ - btrfs_release_path(root, path); - found_key.offset++; - - /* setup a max key for search_forward */ - other_key.offset = (u64)-1; - other_key.type = key.type; - other_key.objectid = key.objectid; - - path->keep_locks = 1; - ret = btrfs_search_forward(root, &found_key, &other_key, - path, 0, 0); - path->keep_locks = 0; - if (ret || found_key.objectid != key.objectid || - found_key.type != key.type) { - ret = 0; - goto out; - } - - key.offset = found_key.offset; - btrfs_release_path(root, path); - cond_resched(); - goto again; - } - - /* we know there's one more slot after us in the tree, - * read that key so we can verify it is also a checksum item - */ - btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1); - - if (found_key.objectid < inode->i_ino) - goto next_key; - - if (found_key.type != key.type || found_key.offset < new_size) - goto next_key; - - /* - * if the key for the next leaf isn't a csum key from this objectid, - * we can't be sure there aren't good items inside this leaf. - * Bail out - */ - if (other_key.objectid != inode->i_ino || other_key.type != key.type) - goto out; - - leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]); - leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]); - /* - * it is safe to delete this leaf, it contains only - * csum items from this inode at an offset >= new_size - */ - ret = btrfs_del_leaf(trans, root, path, leaf_start); - BUG_ON(ret); - - if (root->ref_cows && leaf_gen < trans->transid) { - ref = btrfs_alloc_leaf_ref(root, 0); - if (ref) { - ref->root_gen = root->root_key.offset; - ref->bytenr = leaf_start; - ref->owner = 0; - ref->generation = leaf_gen; - ref->nritems = 0; - - btrfs_sort_leaf_ref(ref); - - ret = btrfs_add_leaf_ref(root, ref, 0); - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } else { - WARN_ON(1); - } - } -next_key: - btrfs_release_path(root, path); - - if (other_key.objectid == inode->i_ino && - other_key.type == key.type && other_key.offset > key.offset) { - key.offset = other_key.offset; - cond_resched(); - goto again; - } - ret = 0; -out: - /* fixup any changes we've made to the path */ - path->lowest_level = 0; - path->keep_locks = 0; - btrfs_release_path(root, path); - return ret; -} - -#endif - /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 955f76eb0fa8..211aceeb9ea0 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -346,49 +346,6 @@ out_unlock: return ret; } -#if 0 -/* - * rate limit against the drop_snapshot code. This helps to slow down new - * operations if the drop_snapshot code isn't able to keep up. - */ -static void throttle_on_drops(struct btrfs_root *root) -{ - struct btrfs_fs_info *info = root->fs_info; - int harder_count = 0; - -harder: - if (atomic_read(&info->throttles)) { - DEFINE_WAIT(wait); - int thr; - thr = atomic_read(&info->throttle_gen); - - do { - prepare_to_wait(&info->transaction_throttle, - &wait, TASK_UNINTERRUPTIBLE); - if (!atomic_read(&info->throttles)) { - finish_wait(&info->transaction_throttle, &wait); - break; - } - schedule(); - finish_wait(&info->transaction_throttle, &wait); - } while (thr == atomic_read(&info->throttle_gen)); - harder_count++; - - if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 && - harder_count < 2) - goto harder; - - if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 && - harder_count < 10) - goto harder; - - if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 && - harder_count < 20) - goto harder; - } -} -#endif - void btrfs_throttle(struct btrfs_root *root) { mutex_lock(&root->fs_info->trans_mutex); @@ -808,97 +765,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) return ret; } -#if 0 -/* - * when dropping snapshots, we generate a ton of delayed refs, and it makes - * sense not to join the transaction while it is trying to flush the current - * queue of delayed refs out. - * - * This is used by the drop snapshot code only - */ -static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info) -{ - DEFINE_WAIT(wait); - - mutex_lock(&info->trans_mutex); - while (info->running_transaction && - info->running_transaction->delayed_refs.flushing) { - prepare_to_wait(&info->transaction_wait, &wait, - TASK_UNINTERRUPTIBLE); - mutex_unlock(&info->trans_mutex); - - schedule(); - - mutex_lock(&info->trans_mutex); - finish_wait(&info->transaction_wait, &wait); - } - mutex_unlock(&info->trans_mutex); - return 0; -} - -/* - * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on - * all of them - */ -int btrfs_drop_dead_root(struct btrfs_root *root) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *tree_root = root->fs_info->tree_root; - unsigned long nr; - int ret; - - while (1) { - /* - * we don't want to jump in and create a bunch of - * delayed refs if the transaction is starting to close - */ - wait_transaction_pre_flush(tree_root->fs_info); - trans = btrfs_start_transaction(tree_root, 1); - - /* - * we've joined a transaction, make sure it isn't - * closing right now - */ - if (trans->transaction->delayed_refs.flushing) { - btrfs_end_transaction(trans, tree_root); - continue; - } - - ret = btrfs_drop_snapshot(trans, root); - if (ret != -EAGAIN) - break; - - ret = btrfs_update_root(trans, tree_root, - &root->root_key, - &root->root_item); - if (ret) - break; - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, tree_root); - BUG_ON(ret); - - btrfs_btree_balance_dirty(tree_root, nr); - cond_resched(); - } - BUG_ON(ret); - - ret = btrfs_del_root(trans, tree_root, &root->root_key); - BUG_ON(ret); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, tree_root); - BUG_ON(ret); - - free_extent_buffer(root->node); - free_extent_buffer(root->commit_root); - kfree(root); - - btrfs_btree_balance_dirty(tree_root, nr); - return ret; -} -#endif - /* * new snapshots need to be created at a very specific time in the * transaction commit. This does the actual creation -- cgit v1.2.3 From a2de733c78fa7af51ba9670482fa7d392aa67c57 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 8 Mar 2011 14:14:00 +0100 Subject: btrfs: scrub This adds an initial implementation for scrub. It works quite straightforward. The usermode issues an ioctl for each device in the fs. For each device, it enumerates the allocated device chunks. For each chunk, the contained extents are enumerated and the data checksums fetched. The extents are read sequentially and the checksums verified. If an error occurs (checksum or EIO), a good copy is searched for. If one is found, the bad copy will be rewritten. All enumerations happen from the commit roots. During a transaction commit, the scrubs get paused and afterwards continue from the new roots. This commit is based on the series originally posted to linux-btrfs with some improvements that resulted from comments from David Sterba, Ilya Dryomov and Jan Schmidt. Signed-off-by: Arne Jansen --- fs/btrfs/Makefile | 2 +- fs/btrfs/ctree.h | 37 +- fs/btrfs/disk-io.c | 12 + fs/btrfs/file-item.c | 8 +- fs/btrfs/inode.c | 2 +- fs/btrfs/ioctl.h | 37 ++ fs/btrfs/relocation.c | 2 +- fs/btrfs/scrub.c | 1492 ++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/transaction.c | 3 + fs/btrfs/tree-log.c | 6 +- fs/btrfs/volumes.c | 4 +- fs/btrfs/volumes.h | 6 + 12 files changed, 1600 insertions(+), 11 deletions(-) create mode 100644 fs/btrfs/scrub.c diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73aec..8fda3133c1b8 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ - compression.o delayed-ref.o relocation.o + compression.o delayed-ref.o relocation.o scrub.o diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e61fe1b6b8c..31141ba6072d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" +#include "ioctl.h" struct btrfs_trans_handle; struct btrfs_transaction; @@ -510,6 +512,12 @@ struct btrfs_extent_item_v0 { /* use full backrefs for extent pointers in the block */ #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) +/* + * this flag is only used internally by scrub and may be changed at any time + * it is only declared here to avoid collisions + */ +#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) + struct btrfs_tree_block_info { struct btrfs_disk_key key; u8 level; @@ -1077,6 +1085,17 @@ struct btrfs_fs_info { void *bdev_holder; + /* private scrub information */ + struct mutex scrub_lock; + atomic_t scrubs_running; + atomic_t scrub_pause_req; + atomic_t scrubs_paused; + atomic_t scrub_cancel_req; + wait_queue_head_t scrub_pause_wait; + struct rw_semaphore scrub_super_lock; + int scrub_workers_refcnt; + struct btrfs_workers scrub_workers; + /* filesystem state */ u64 fs_state; }; @@ -2472,8 +2491,8 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); -int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, - u64 end, struct list_head *list); +int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, + struct list_head *list, int search_commit); /* inode.c */ /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ @@ -2637,4 +2656,18 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, u64 *bytes_to_reserve); void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending); + +/* scrub.c */ +int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, + struct btrfs_scrub_progress *progress); +int btrfs_scrub_pause(struct btrfs_root *root); +int btrfs_scrub_pause_super(struct btrfs_root *root); +int btrfs_scrub_continue(struct btrfs_root *root); +int btrfs_scrub_continue_super(struct btrfs_root *root); +int btrfs_scrub_cancel(struct btrfs_root *root); +int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev); +int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); +int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, + struct btrfs_scrub_progress *progress); + #endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fe5aec9b3924..e48e8095c61f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1773,6 +1773,17 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); + mutex_init(&fs_info->scrub_lock); + atomic_set(&fs_info->scrubs_running, 0); + atomic_set(&fs_info->scrub_pause_req, 0); + atomic_set(&fs_info->scrubs_paused, 0); + atomic_set(&fs_info->scrub_cancel_req, 0); + init_waitqueue_head(&fs_info->scrub_pause_wait); + init_rwsem(&fs_info->scrub_super_lock); + fs_info->scrub_workers_refcnt = 0; + btrfs_init_workers(&fs_info->scrub_workers, "scrub", + fs_info->thread_pool_size, &fs_info->generic_worker); + sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); sb->s_bdi = &fs_info->bdi; @@ -2599,6 +2610,7 @@ int close_ctree(struct btrfs_root *root) fs_info->closing = 1; smp_mb(); + btrfs_scrub_cancel(root); btrfs_put_block_group_cache(fs_info); /* diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..39ca7c1250e7 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -266,7 +266,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, - struct list_head *list) + struct list_head *list, int search_commit) { struct btrfs_key key; struct btrfs_path *path; @@ -283,6 +283,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, path = btrfs_alloc_path(); BUG_ON(!path); + if (search_commit) { + path->skip_locking = 1; + path->reada = 2; + path->search_commit_root = 1; + } + key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = start; key.type = BTRFS_EXTENT_CSUM_KEY; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 870869aab0b8..27142446b30a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1007,7 +1007,7 @@ static noinline int csum_exist_in_range(struct btrfs_root *root, LIST_HEAD(list); ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, - bytenr + num_bytes - 1, &list); + bytenr + num_bytes - 1, &list, 0); if (ret == 0 && list_empty(&list)) return 0; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 8fb382167b13..37ac030d64b4 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -42,6 +42,43 @@ struct btrfs_ioctl_vol_args_v2 { char name[BTRFS_SUBVOL_NAME_MAX + 1]; }; +/* + * structure to report errors and progress to userspace, either as a + * result of a finished scrub, a canceled scrub or a progress inquiry + */ +struct btrfs_scrub_progress { + __u64 data_extents_scrubbed; /* # of data extents scrubbed */ + __u64 tree_extents_scrubbed; /* # of tree extents scrubbed */ + __u64 data_bytes_scrubbed; /* # of data bytes scrubbed */ + __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */ + __u64 read_errors; /* # of read errors encountered (EIO) */ + __u64 csum_errors; /* # of failed csum checks */ + __u64 verify_errors; /* # of occurences, where the metadata + * of a tree block did not match the + * expected values, like generation or + * logical */ + __u64 no_csum; /* # of 4k data block for which no csum + * is present, probably the result of + * data written with nodatasum */ + __u64 csum_discards; /* # of csum for which no data was found + * in the extent tree. */ + __u64 super_errors; /* # of bad super blocks encountered */ + __u64 malloc_errors; /* # of internal kmalloc errors. These + * will likely cause an incomplete + * scrub */ + __u64 uncorrectable_errors; /* # of errors where either no intact + * copy was found or the writeback + * failed */ + __u64 corrected_errors; /* # of errors corrected */ + __u64 last_physical; /* last physical address scrubbed. In + * case a scrub was aborted, this can + * be used to restart the scrub */ + __u64 unverified_errors; /* # of occurences where a read for a + * full (64k) bio failed, but the re- + * check succeeded for each 4k piece. + * Intermittent error. */ +}; + #define BTRFS_INO_LOOKUP_PATH_MAX 4080 struct btrfs_ioctl_ino_lookup_args { __u64 treeid; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 58250e09eb05..db1dffa9952b 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4242,7 +4242,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, - disk_bytenr + len - 1, &list); + disk_bytenr + len - 1, &list, 0); while (!list_empty(&list)) { sums = list_entry(list.next, struct btrfs_ordered_sum, list); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c new file mode 100644 index 000000000000..70f9fa772ee9 --- /dev/null +++ b/fs/btrfs/scrub.c @@ -0,0 +1,1492 @@ +/* + * Copyright (C) 2011 STRATO. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ctree.h" +#include "volumes.h" +#include "disk-io.h" +#include "ordered-data.h" + +/* + * This is only the first step towards a full-features scrub. It reads all + * extent and super block and verifies the checksums. In case a bad checksum + * is found or the extent cannot be read, good data will be written back if + * any can be found. + * + * Future enhancements: + * - To enhance the performance, better read-ahead strategies for the + * extent-tree can be employed. + * - In case an unrepairable extent is encountered, track which files are + * affected and report them + * - In case of a read error on files with nodatasum, map the file and read + * the extent to trigger a writeback of the good copy + * - track and record media errors, throw out bad devices + * - add a readonly mode + * - add a mode to also read unallocated space + * - make the prefetch cancellable + */ + +struct scrub_bio; +struct scrub_page; +struct scrub_dev; +struct scrub_fixup; +static void scrub_bio_end_io(struct bio *bio, int err); +static void scrub_checksum(struct btrfs_work *work); +static int scrub_checksum_data(struct scrub_dev *sdev, + struct scrub_page *spag, void *buffer); +static int scrub_checksum_tree_block(struct scrub_dev *sdev, + struct scrub_page *spag, u64 logical, + void *buffer); +static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); +static void scrub_recheck_end_io(struct bio *bio, int err); +static void scrub_fixup_worker(struct btrfs_work *work); +static void scrub_fixup(struct scrub_fixup *fixup); + +#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ +#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ + +struct scrub_page { + u64 flags; /* extent flags */ + u64 generation; + u64 mirror_num; + int have_csum; + u8 csum[BTRFS_CSUM_SIZE]; +}; + +struct scrub_bio { + int index; + struct scrub_dev *sdev; + struct bio *bio; + int err; + u64 logical; + u64 physical; + struct scrub_page spag[SCRUB_PAGES_PER_BIO]; + u64 count; + int next_free; + struct btrfs_work work; +}; + +struct scrub_dev { + struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; + struct btrfs_device *dev; + int first_free; + int curr; + atomic_t in_flight; + spinlock_t list_lock; + wait_queue_head_t list_wait; + u16 csum_size; + struct list_head csum_list; + atomic_t cancel_req; + /* + * statistics + */ + struct btrfs_scrub_progress stat; + spinlock_t stat_lock; +}; + +struct scrub_fixup { + struct scrub_dev *sdev; + struct bio *bio; + u64 logical; + u64 physical; + struct scrub_page spag; + struct btrfs_work work; + int err; + int recheck; +}; + +static void scrub_free_csums(struct scrub_dev *sdev) +{ + while (!list_empty(&sdev->csum_list)) { + struct btrfs_ordered_sum *sum; + sum = list_first_entry(&sdev->csum_list, + struct btrfs_ordered_sum, list); + list_del(&sum->list); + kfree(sum); + } +} + +static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) +{ + int i; + int j; + struct page *last_page; + + if (!sdev) + return; + + for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { + struct scrub_bio *sbio = sdev->bios[i]; + struct bio *bio; + + if (!sbio) + break; + + bio = sbio->bio; + if (bio) { + last_page = NULL; + for (j = 0; j < bio->bi_vcnt; ++j) { + if (bio->bi_io_vec[j].bv_page == last_page) + continue; + last_page = bio->bi_io_vec[j].bv_page; + __free_page(last_page); + } + bio_put(bio); + } + kfree(sbio); + } + + scrub_free_csums(sdev); + kfree(sdev); +} + +static noinline_for_stack +struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) +{ + struct scrub_dev *sdev; + int i; + int j; + int ret; + struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; + + sdev = kzalloc(sizeof(*sdev), GFP_NOFS); + if (!sdev) + goto nomem; + sdev->dev = dev; + for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { + struct bio *bio; + struct scrub_bio *sbio; + + sbio = kzalloc(sizeof(*sbio), GFP_NOFS); + if (!sbio) + goto nomem; + sdev->bios[i] = sbio; + + bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); + if (!bio) + goto nomem; + + sbio->index = i; + sbio->sdev = sdev; + sbio->bio = bio; + sbio->count = 0; + sbio->work.func = scrub_checksum; + bio->bi_private = sdev->bios[i]; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_sector = 0; + bio->bi_bdev = dev->bdev; + bio->bi_size = 0; + + for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { + struct page *page; + page = alloc_page(GFP_NOFS); + if (!page) + goto nomem; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) + goto nomem; + } + WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); + + if (i != SCRUB_BIOS_PER_DEV-1) + sdev->bios[i]->next_free = i + 1; + else + sdev->bios[i]->next_free = -1; + } + sdev->first_free = 0; + sdev->curr = -1; + atomic_set(&sdev->in_flight, 0); + atomic_set(&sdev->cancel_req, 0); + sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy); + INIT_LIST_HEAD(&sdev->csum_list); + + spin_lock_init(&sdev->list_lock); + spin_lock_init(&sdev->stat_lock); + init_waitqueue_head(&sdev->list_wait); + return sdev; + +nomem: + scrub_free_dev(sdev); + return ERR_PTR(-ENOMEM); +} + +/* + * scrub_recheck_error gets called when either verification of the page + * failed or the bio failed to read, e.g. with EIO. In the latter case, + * recheck_error gets called for every page in the bio, even though only + * one may be bad + */ +static void scrub_recheck_error(struct scrub_bio *sbio, int ix) +{ + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct bio *bio = NULL; + struct page *page = NULL; + struct scrub_fixup *fixup = NULL; + int ret; + + /* + * while we're in here we do not want the transaction to commit. + * To prevent it, we increment scrubs_running. scrub_pause will + * have to wait until we're finished + * we can safely increment scrubs_running here, because we're + * in the context of the original bio which is still marked in_flight + */ + atomic_inc(&fs_info->scrubs_running); + + fixup = kzalloc(sizeof(*fixup), GFP_NOFS); + if (!fixup) + goto malloc_error; + + fixup->logical = sbio->logical + ix * PAGE_SIZE; + fixup->physical = sbio->physical + ix * PAGE_SIZE; + fixup->spag = sbio->spag[ix]; + fixup->sdev = sdev; + + bio = bio_alloc(GFP_NOFS, 1); + if (!bio) + goto malloc_error; + bio->bi_private = fixup; + bio->bi_size = 0; + bio->bi_bdev = sdev->dev->bdev; + fixup->bio = bio; + fixup->recheck = 0; + + page = alloc_page(GFP_NOFS); + if (!page) + goto malloc_error; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) + goto malloc_error; + + if (!sbio->err) { + /* + * shorter path: just a checksum error, go ahead and correct it + */ + scrub_fixup_worker(&fixup->work); + return; + } + + /* + * an I/O-error occured for one of the blocks in the bio, not + * necessarily for this one, so first try to read it separately + */ + fixup->work.func = scrub_fixup_worker; + fixup->recheck = 1; + bio->bi_end_io = scrub_recheck_end_io; + bio->bi_sector = fixup->physical >> 9; + bio->bi_bdev = sdev->dev->bdev; + submit_bio(0, bio); + + return; + +malloc_error: + if (bio) + bio_put(bio); + if (page) + __free_page(page); + kfree(fixup); + spin_lock(&sdev->stat_lock); + ++sdev->stat.malloc_errors; + spin_unlock(&sdev->stat_lock); + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); +} + +static void scrub_recheck_end_io(struct bio *bio, int err) +{ + struct scrub_fixup *fixup = bio->bi_private; + struct btrfs_fs_info *fs_info = fixup->sdev->dev->dev_root->fs_info; + + fixup->err = err; + btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work); +} + +static int scrub_fixup_check(struct scrub_fixup *fixup) +{ + int ret = 1; + struct page *page; + void *buffer; + u64 flags = fixup->spag.flags; + + page = fixup->bio->bi_io_vec[0].bv_page; + buffer = kmap_atomic(page, KM_USER0); + if (flags & BTRFS_EXTENT_FLAG_DATA) { + ret = scrub_checksum_data(fixup->sdev, + &fixup->spag, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + ret = scrub_checksum_tree_block(fixup->sdev, + &fixup->spag, + fixup->logical, + buffer); + } else { + WARN_ON(1); + } + kunmap_atomic(buffer, KM_USER0); + + return ret; +} + +static void scrub_fixup_worker(struct btrfs_work *work) +{ + struct scrub_fixup *fixup; + struct btrfs_fs_info *fs_info; + u64 flags; + int ret = 1; + + fixup = container_of(work, struct scrub_fixup, work); + fs_info = fixup->sdev->dev->dev_root->fs_info; + flags = fixup->spag.flags; + + if (fixup->recheck && fixup->err == 0) + ret = scrub_fixup_check(fixup); + + if (ret || fixup->err) + scrub_fixup(fixup); + + __free_page(fixup->bio->bi_io_vec[0].bv_page); + bio_put(fixup->bio); + + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); + + kfree(fixup); +} + +static void scrub_fixup_end_io(struct bio *bio, int err) +{ + complete((struct completion *)bio->bi_private); +} + +static void scrub_fixup(struct scrub_fixup *fixup) +{ + struct scrub_dev *sdev = fixup->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; + struct btrfs_multi_bio *multi = NULL; + struct bio *bio = fixup->bio; + u64 length; + int i; + int ret; + DECLARE_COMPLETION_ONSTACK(complete); + + if ((fixup->spag.flags & BTRFS_EXTENT_FLAG_DATA) && + (fixup->spag.have_csum == 0)) { + /* + * nodatasum, don't try to fix anything + * FIXME: we can do better, open the inode and trigger a + * writeback + */ + goto uncorrectable; + } + + length = PAGE_SIZE; + ret = btrfs_map_block(map_tree, REQ_WRITE, fixup->logical, &length, + &multi, 0); + if (ret || !multi || length < PAGE_SIZE) { + printk(KERN_ERR + "scrub_fixup: btrfs_map_block failed us for %llu\n", + (unsigned long long)fixup->logical); + WARN_ON(1); + return; + } + + if (multi->num_stripes == 1) { + /* there aren't any replicas */ + goto uncorrectable; + } + + /* + * first find a good copy + */ + for (i = 0; i < multi->num_stripes; ++i) { + if (i == fixup->spag.mirror_num) + continue; + + bio->bi_sector = multi->stripes[i].physical >> 9; + bio->bi_bdev = multi->stripes[i].dev->bdev; + bio->bi_size = PAGE_SIZE; + bio->bi_next = NULL; + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_comp_cpu = -1; + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + + submit_bio(0, bio); + + wait_for_completion(&complete); + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + /* I/O-error, this is not a good copy */ + continue; + + ret = scrub_fixup_check(fixup); + if (ret == 0) + break; + } + if (i == multi->num_stripes) + goto uncorrectable; + + /* + * the bio now contains good data, write it back + */ + bio->bi_sector = fixup->physical >> 9; + bio->bi_bdev = sdev->dev->bdev; + bio->bi_size = PAGE_SIZE; + bio->bi_next = NULL; + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_comp_cpu = -1; + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + + submit_bio(REQ_WRITE, bio); + + wait_for_completion(&complete); + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + /* I/O-error, writeback failed, give up */ + goto uncorrectable; + + kfree(multi); + spin_lock(&sdev->stat_lock); + ++sdev->stat.corrected_errors; + spin_unlock(&sdev->stat_lock); + + if (printk_ratelimit()) + printk(KERN_ERR "btrfs: fixed up at %llu\n", + (unsigned long long)fixup->logical); + return; + +uncorrectable: + kfree(multi); + spin_lock(&sdev->stat_lock); + ++sdev->stat.uncorrectable_errors; + spin_unlock(&sdev->stat_lock); + + if (printk_ratelimit()) + printk(KERN_ERR "btrfs: unable to fixup at %llu\n", + (unsigned long long)fixup->logical); +} + +static void scrub_bio_end_io(struct bio *bio, int err) +{ + struct scrub_bio *sbio = bio->bi_private; + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + + sbio->err = err; + + btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); +} + +static void scrub_checksum(struct btrfs_work *work) +{ + struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); + struct scrub_dev *sdev = sbio->sdev; + struct page *page; + void *buffer; + int i; + u64 flags; + u64 logical; + int ret; + + if (sbio->err) { + struct bio *bio; + struct bio *old_bio; + + for (i = 0; i < sbio->count; ++i) + scrub_recheck_error(sbio, i); + spin_lock(&sdev->stat_lock); + ++sdev->stat.read_errors; + spin_unlock(&sdev->stat_lock); + + /* + * FIXME: allocate a new bio after a media error. I haven't + * figured out how to reuse this one + */ + old_bio = sbio->bio; + bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); + if (!bio) { + /* + * alloc failed. cancel the scrub and don't requeue + * this sbio + */ + printk(KERN_ERR "btrfs scrub: allocation failure, " + "cancelling scrub\n"); + atomic_inc(&sdev->dev->dev_root->fs_info-> + scrub_cancel_req); + goto out_no_enqueue; + } + sbio->bio = bio; + bio->bi_private = sbio; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_sector = 0; + bio->bi_bdev = sbio->sdev->dev->bdev; + bio->bi_size = 0; + for (i = 0; i < SCRUB_PAGES_PER_BIO; ++i) { + struct page *page; + page = old_bio->bi_io_vec[i].bv_page; + bio_add_page(bio, page, PAGE_SIZE, 0); + } + bio_put(old_bio); + goto out; + } + for (i = 0; i < sbio->count; ++i) { + page = sbio->bio->bi_io_vec[i].bv_page; + buffer = kmap_atomic(page, KM_USER0); + flags = sbio->spag[i].flags; + logical = sbio->logical + i * PAGE_SIZE; + ret = 0; + if (flags & BTRFS_EXTENT_FLAG_DATA) { + ret = scrub_checksum_data(sdev, sbio->spag + i, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + ret = scrub_checksum_tree_block(sdev, sbio->spag + i, + logical, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_SUPER) { + BUG_ON(i); + (void)scrub_checksum_super(sbio, buffer); + } else { + WARN_ON(1); + } + kunmap_atomic(buffer, KM_USER0); + if (ret) + scrub_recheck_error(sbio, i); + } + +out: + spin_lock(&sdev->list_lock); + sbio->next_free = sdev->first_free; + sdev->first_free = sbio->index; + spin_unlock(&sdev->list_lock); +out_no_enqueue: + atomic_dec(&sdev->in_flight); + wake_up(&sdev->list_wait); +} + +static int scrub_checksum_data(struct scrub_dev *sdev, + struct scrub_page *spag, void *buffer) +{ + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + struct btrfs_root *root = sdev->dev->dev_root; + + if (!spag->have_csum) + return 0; + + crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, spag->csum, sdev->csum_size)) + fail = 1; + + spin_lock(&sdev->stat_lock); + ++sdev->stat.data_extents_scrubbed; + sdev->stat.data_bytes_scrubbed += PAGE_SIZE; + if (fail) + ++sdev->stat.csum_errors; + spin_unlock(&sdev->stat_lock); + + return fail; +} + +static int scrub_checksum_tree_block(struct scrub_dev *sdev, + struct scrub_page *spag, u64 logical, + void *buffer) +{ + struct btrfs_header *h; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + int crc_fail = 0; + + /* + * we don't use the getter functions here, as we + * a) don't have an extent buffer and + * b) the page is already kmapped + */ + h = (struct btrfs_header *)buffer; + + if (logical != le64_to_cpu(h->bytenr)) + ++fail; + + if (spag->generation != le64_to_cpu(h->generation)) + ++fail; + + if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) + ++fail; + + if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, + BTRFS_UUID_SIZE)) + ++fail; + + crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, + PAGE_SIZE - BTRFS_CSUM_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, h->csum, sdev->csum_size)) + ++crc_fail; + + spin_lock(&sdev->stat_lock); + ++sdev->stat.tree_extents_scrubbed; + sdev->stat.tree_bytes_scrubbed += PAGE_SIZE; + if (crc_fail) + ++sdev->stat.csum_errors; + if (fail) + ++sdev->stat.verify_errors; + spin_unlock(&sdev->stat_lock); + + return fail || crc_fail; +} + +static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) +{ + struct btrfs_super_block *s; + u64 logical; + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + + s = (struct btrfs_super_block *)buffer; + logical = sbio->logical; + + if (logical != le64_to_cpu(s->bytenr)) + ++fail; + + if (sbio->spag[0].generation != le64_to_cpu(s->generation)) + ++fail; + + if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) + ++fail; + + crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, + PAGE_SIZE - BTRFS_CSUM_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, s->csum, sbio->sdev->csum_size)) + ++fail; + + if (fail) { + /* + * if we find an error in a super block, we just report it. + * They will get written with the next transaction commit + * anyway + */ + spin_lock(&sdev->stat_lock); + ++sdev->stat.super_errors; + spin_unlock(&sdev->stat_lock); + } + + return fail; +} + +static int scrub_submit(struct scrub_dev *sdev) +{ + struct scrub_bio *sbio; + + if (sdev->curr == -1) + return 0; + + sbio = sdev->bios[sdev->curr]; + + sbio->bio->bi_sector = sbio->physical >> 9; + sbio->bio->bi_size = sbio->count * PAGE_SIZE; + sbio->bio->bi_next = NULL; + sbio->bio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bio->bi_comp_cpu = -1; + sbio->bio->bi_bdev = sdev->dev->bdev; + sbio->err = 0; + sdev->curr = -1; + atomic_inc(&sdev->in_flight); + + submit_bio(0, sbio->bio); + + return 0; +} + +static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, + u64 physical, u64 flags, u64 gen, u64 mirror_num, + u8 *csum, int force) +{ + struct scrub_bio *sbio; + +again: + /* + * grab a fresh bio or wait for one to become available + */ + while (sdev->curr == -1) { + spin_lock(&sdev->list_lock); + sdev->curr = sdev->first_free; + if (sdev->curr != -1) { + sdev->first_free = sdev->bios[sdev->curr]->next_free; + sdev->bios[sdev->curr]->next_free = -1; + sdev->bios[sdev->curr]->count = 0; + spin_unlock(&sdev->list_lock); + } else { + spin_unlock(&sdev->list_lock); + wait_event(sdev->list_wait, sdev->first_free != -1); + } + } + sbio = sdev->bios[sdev->curr]; + if (sbio->count == 0) { + sbio->physical = physical; + sbio->logical = logical; + } else if (sbio->physical + sbio->count * PAGE_SIZE != physical) { + scrub_submit(sdev); + goto again; + } + sbio->spag[sbio->count].flags = flags; + sbio->spag[sbio->count].generation = gen; + sbio->spag[sbio->count].have_csum = 0; + sbio->spag[sbio->count].mirror_num = mirror_num; + if (csum) { + sbio->spag[sbio->count].have_csum = 1; + memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); + } + ++sbio->count; + if (sbio->count == SCRUB_PAGES_PER_BIO || force) + scrub_submit(sdev); + + return 0; +} + +static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, + u8 *csum) +{ + struct btrfs_ordered_sum *sum = NULL; + int ret = 0; + unsigned long i; + unsigned long num_sectors; + u32 sectorsize = sdev->dev->dev_root->sectorsize; + + while (!list_empty(&sdev->csum_list)) { + sum = list_first_entry(&sdev->csum_list, + struct btrfs_ordered_sum, list); + if (sum->bytenr > logical) + return 0; + if (sum->bytenr + sum->len > logical) + break; + + ++sdev->stat.csum_discards; + list_del(&sum->list); + kfree(sum); + sum = NULL; + } + if (!sum) + return 0; + + num_sectors = sum->len / sectorsize; + for (i = 0; i < num_sectors; ++i) { + if (sum->sums[i].bytenr == logical) { + memcpy(csum, &sum->sums[i].sum, sdev->csum_size); + ret = 1; + break; + } + } + if (ret && i == num_sectors - 1) { + list_del(&sum->list); + kfree(sum); + } + return ret; +} + +/* scrub extent tries to collect up to 64 kB for each bio */ +static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, + u64 physical, u64 flags, u64 gen, u64 mirror_num) +{ + int ret; + u8 csum[BTRFS_CSUM_SIZE]; + + while (len) { + u64 l = min_t(u64, len, PAGE_SIZE); + int have_csum = 0; + + if (flags & BTRFS_EXTENT_FLAG_DATA) { + /* push csums to sbio */ + have_csum = scrub_find_csum(sdev, logical, l, csum); + if (have_csum == 0) + ++sdev->stat.no_csum; + } + ret = scrub_page(sdev, logical, l, physical, flags, gen, + mirror_num, have_csum ? csum : NULL, 0); + if (ret) + return ret; + len -= l; + logical += l; + physical += l; + } + return 0; +} + +static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, + struct map_lookup *map, int num, u64 base, u64 length) +{ + struct btrfs_path *path; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct btrfs_root *root = fs_info->extent_root; + struct btrfs_root *csum_root = fs_info->csum_root; + struct btrfs_extent_item *extent; + u64 flags; + int ret; + int slot; + int i; + u64 nstripes; + int start_stripe; + struct extent_buffer *l; + struct btrfs_key key; + u64 physical; + u64 logical; + u64 generation; + u64 mirror_num; + + u64 increment = map->stripe_len; + u64 offset; + + nstripes = length; + offset = 0; + do_div(nstripes, map->stripe_len); + if (map->type & BTRFS_BLOCK_GROUP_RAID0) { + offset = map->stripe_len * num; + increment = map->stripe_len * map->num_stripes; + mirror_num = 0; + } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { + int factor = map->num_stripes / map->sub_stripes; + offset = map->stripe_len * (num / map->sub_stripes); + increment = map->stripe_len * factor; + mirror_num = num % map->sub_stripes; + } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { + increment = map->stripe_len; + mirror_num = num % map->num_stripes; + } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { + increment = map->stripe_len; + mirror_num = num % map->num_stripes; + } else { + increment = map->stripe_len; + mirror_num = 0; + } + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + path->reada = 2; + path->search_commit_root = 1; + path->skip_locking = 1; + + /* + * find all extents for each stripe and just read them to get + * them into the page cache + * FIXME: we can do better. build a more intelligent prefetching + */ + logical = base + offset; + physical = map->stripes[num].physical; + ret = 0; + for (i = 0; i < nstripes; ++i) { + key.objectid = logical; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + l = path->nodes[0]; + slot = path->slots[0]; + btrfs_item_key_to_cpu(l, &key, slot); + if (key.objectid != logical) { + ret = btrfs_previous_item(root, path, 0, + BTRFS_EXTENT_ITEM_KEY); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid >= logical + map->stripe_len) + break; + + path->slots[0]++; + } + btrfs_release_path(root, path); + logical += increment; + physical += map->stripe_len; + cond_resched(); + } + + /* + * collect all data csums for the stripe to avoid seeking during + * the scrub. This might currently (crc32) end up to be about 1MB + */ + start_stripe = 0; +again: + logical = base + offset + start_stripe * increment; + for (i = start_stripe; i < nstripes; ++i) { + ret = btrfs_lookup_csums_range(csum_root, logical, + logical + map->stripe_len - 1, + &sdev->csum_list, 1); + if (ret) + goto out; + + logical += increment; + cond_resched(); + } + /* + * now find all extents for each stripe and scrub them + */ + logical = base + offset + start_stripe * increment; + physical = map->stripes[num].physical + start_stripe * map->stripe_len; + ret = 0; + for (i = start_stripe; i < nstripes; ++i) { + /* + * canceled? + */ + if (atomic_read(&fs_info->scrub_cancel_req) || + atomic_read(&sdev->cancel_req)) { + ret = -ECANCELED; + goto out; + } + /* + * check to see if we have to pause + */ + if (atomic_read(&fs_info->scrub_pause_req)) { + /* push queued extents */ + scrub_submit(sdev); + wait_event(sdev->list_wait, + atomic_read(&sdev->in_flight) == 0); + atomic_inc(&fs_info->scrubs_paused); + wake_up(&fs_info->scrub_pause_wait); + mutex_lock(&fs_info->scrub_lock); + while (atomic_read(&fs_info->scrub_pause_req)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrub_pause_req) == 0); + mutex_lock(&fs_info->scrub_lock); + } + atomic_dec(&fs_info->scrubs_paused); + mutex_unlock(&fs_info->scrub_lock); + wake_up(&fs_info->scrub_pause_wait); + scrub_free_csums(sdev); + start_stripe = i; + goto again; + } + + key.objectid = logical; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + l = path->nodes[0]; + slot = path->slots[0]; + btrfs_item_key_to_cpu(l, &key, slot); + if (key.objectid != logical) { + ret = btrfs_previous_item(root, path, 0, + BTRFS_EXTENT_ITEM_KEY); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid + key.offset <= logical) + goto next; + + if (key.objectid >= logical + map->stripe_len) + break; + + if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) + goto next; + + extent = btrfs_item_ptr(l, slot, + struct btrfs_extent_item); + flags = btrfs_extent_flags(l, extent); + generation = btrfs_extent_generation(l, extent); + + if (key.objectid < logical && + (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { + printk(KERN_ERR + "btrfs scrub: tree block %llu spanning " + "stripes, ignored. logical=%llu\n", + (unsigned long long)key.objectid, + (unsigned long long)logical); + goto next; + } + + /* + * trim extent to this stripe + */ + if (key.objectid < logical) { + key.offset -= logical - key.objectid; + key.objectid = logical; + } + if (key.objectid + key.offset > + logical + map->stripe_len) { + key.offset = logical + map->stripe_len - + key.objectid; + } + + ret = scrub_extent(sdev, key.objectid, key.offset, + key.objectid - logical + physical, + flags, generation, mirror_num); + if (ret) + goto out; + +next: + path->slots[0]++; + } + btrfs_release_path(root, path); + logical += increment; + physical += map->stripe_len; + spin_lock(&sdev->stat_lock); + sdev->stat.last_physical = physical; + spin_unlock(&sdev->stat_lock); + } + /* push queued extents */ + scrub_submit(sdev); + +out: + btrfs_free_path(path); + return ret < 0 ? ret : 0; +} + +static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, + u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) +{ + struct btrfs_mapping_tree *map_tree = + &sdev->dev->dev_root->fs_info->mapping_tree; + struct map_lookup *map; + struct extent_map *em; + int i; + int ret = -EINVAL; + + read_lock(&map_tree->map_tree.lock); + em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); + read_unlock(&map_tree->map_tree.lock); + + if (!em) + return -EINVAL; + + map = (struct map_lookup *)em->bdev; + if (em->start != chunk_offset) + goto out; + + if (em->len < length) + goto out; + + for (i = 0; i < map->num_stripes; ++i) { + if (map->stripes[i].dev == sdev->dev) { + ret = scrub_stripe(sdev, map, i, chunk_offset, length); + if (ret) + goto out; + } + } +out: + free_extent_map(em); + + return ret; +} + +static noinline_for_stack +int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) +{ + struct btrfs_dev_extent *dev_extent = NULL; + struct btrfs_path *path; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u64 length; + u64 chunk_tree; + u64 chunk_objectid; + u64 chunk_offset; + int ret; + int slot; + struct extent_buffer *l; + struct btrfs_key key; + struct btrfs_key found_key; + struct btrfs_block_group_cache *cache; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + path->reada = 2; + path->search_commit_root = 1; + path->skip_locking = 1; + + key.objectid = sdev->dev->devid; + key.offset = 0ull; + key.type = BTRFS_DEV_EXTENT_KEY; + + + while (1) { + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + ret = 0; + + l = path->nodes[0]; + slot = path->slots[0]; + + btrfs_item_key_to_cpu(l, &found_key, slot); + + if (found_key.objectid != sdev->dev->devid) + break; + + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + break; + + if (found_key.offset >= end) + break; + + if (found_key.offset < key.offset) + break; + + dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); + length = btrfs_dev_extent_length(l, dev_extent); + + if (found_key.offset + length <= start) { + key.offset = found_key.offset + length; + btrfs_release_path(root, path); + continue; + } + + chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); + chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); + chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); + + /* + * get a reference on the corresponding block group to prevent + * the chunk from going away while we scrub it + */ + cache = btrfs_lookup_block_group(fs_info, chunk_offset); + if (!cache) { + ret = -ENOENT; + goto out; + } + ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, + chunk_offset, length); + btrfs_put_block_group(cache); + if (ret) + break; + + key.offset = found_key.offset + length; + btrfs_release_path(root, path); + } + +out: + btrfs_free_path(path); + return ret; +} + +static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) +{ + int i; + u64 bytenr; + u64 gen; + int ret; + struct btrfs_device *device = sdev->dev; + struct btrfs_root *root = device->dev_root; + + gen = root->fs_info->last_trans_committed; + + for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { + bytenr = btrfs_sb_offset(i); + if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) + break; + + ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr, + BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); + if (ret) + return ret; + } + wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); + + return 0; +} + +/* + * get a reference count on fs_info->scrub_workers. start worker if necessary + */ +static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (fs_info->scrub_workers_refcnt == 0) + btrfs_start_workers(&fs_info->scrub_workers, 1); + ++fs_info->scrub_workers_refcnt; + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (--fs_info->scrub_workers_refcnt == 0) + btrfs_stop_workers(&fs_info->scrub_workers); + WARN_ON(fs_info->scrub_workers_refcnt < 0); + mutex_unlock(&fs_info->scrub_lock); +} + + +int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, + struct btrfs_scrub_progress *progress) +{ + struct scrub_dev *sdev; + struct btrfs_fs_info *fs_info = root->fs_info; + int ret; + struct btrfs_device *dev; + + if (root->fs_info->closing) + return -EINVAL; + + /* + * check some assumptions + */ + if (root->sectorsize != PAGE_SIZE || + root->sectorsize != root->leafsize || + root->sectorsize != root->nodesize) { + printk(KERN_ERR "btrfs_scrub: size assumptions fail\n"); + return -EINVAL; + } + + ret = scrub_workers_get(root); + if (ret) + return ret; + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (!dev || dev->missing) { + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -ENODEV; + } + mutex_lock(&fs_info->scrub_lock); + + if (!dev->in_fs_metadata) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -ENODEV; + } + + if (dev->scrub_device) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -EINPROGRESS; + } + sdev = scrub_setup_dev(dev); + if (IS_ERR(sdev)) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return PTR_ERR(sdev); + } + dev->scrub_device = sdev; + + atomic_inc(&fs_info->scrubs_running); + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + + down_read(&fs_info->scrub_super_lock); + ret = scrub_supers(sdev); + up_read(&fs_info->scrub_super_lock); + + if (!ret) + ret = scrub_enumerate_chunks(sdev, start, end); + + wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); + + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); + + if (progress) + memcpy(progress, &sdev->stat, sizeof(*progress)); + + mutex_lock(&fs_info->scrub_lock); + dev->scrub_device = NULL; + mutex_unlock(&fs_info->scrub_lock); + + scrub_free_dev(sdev); + scrub_workers_put(root); + + return ret; +} + +int btrfs_scrub_pause(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + atomic_inc(&fs_info->scrub_pause_req); + while (atomic_read(&fs_info->scrubs_paused) != + atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrubs_paused) == + atomic_read(&fs_info->scrubs_running)); + mutex_lock(&fs_info->scrub_lock); + } + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +int btrfs_scrub_continue(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + atomic_dec(&fs_info->scrub_pause_req); + wake_up(&fs_info->scrub_pause_wait); + return 0; +} + +int btrfs_scrub_pause_super(struct btrfs_root *root) +{ + down_write(&root->fs_info->scrub_super_lock); + return 0; +} + +int btrfs_scrub_continue_super(struct btrfs_root *root) +{ + up_write(&root->fs_info->scrub_super_lock); + return 0; +} + +int btrfs_scrub_cancel(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (!atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + return -ENOTCONN; + } + + atomic_inc(&fs_info->scrub_cancel_req); + while (atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrubs_running) == 0); + mutex_lock(&fs_info->scrub_lock); + } + atomic_dec(&fs_info->scrub_cancel_req); + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct scrub_dev *sdev; + + mutex_lock(&fs_info->scrub_lock); + sdev = dev->scrub_device; + if (!sdev) { + mutex_unlock(&fs_info->scrub_lock); + return -ENOTCONN; + } + atomic_inc(&sdev->cancel_req); + while (dev->scrub_device) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + dev->scrub_device == NULL); + mutex_lock(&fs_info->scrub_lock); + } + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} +int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_device *dev; + int ret; + + /* + * we have to hold the device_list_mutex here so the device + * does not go away in cancel_dev. FIXME: find a better solution + */ + mutex_lock(&fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (!dev) { + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + return -ENODEV; + } + ret = btrfs_scrub_cancel_dev(root, dev); + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + + return ret; +} + +int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, + struct btrfs_scrub_progress *progress) +{ + struct btrfs_device *dev; + struct scrub_dev *sdev = NULL; + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (dev) + sdev = dev->scrub_device; + if (sdev) + memcpy(progress, &sdev->stat, sizeof(*progress)); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + + return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; +} diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..37c2302a08d4 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1321,6 +1321,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, WARN_ON(cur_trans != trans->transaction); + btrfs_scrub_pause(root); /* btrfs_commit_tree_roots is responsible for getting the * various roots consistent with each other. Every pointer * in the tree of tree roots has to point to the most up to date @@ -1405,6 +1406,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); + btrfs_scrub_continue(root); + if (current->journal_info == trans) current->journal_info = NULL; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..f1a0726da5f5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -614,7 +614,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ret = btrfs_lookup_csums_range(root->log_root, csum_start, csum_end - 1, - &ordered_sums); + &ordered_sums, 0); BUG_ON(ret); while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums; @@ -2093,7 +2093,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, * the running transaction open, so a full commit can't hop * in and cause problems either. */ + btrfs_scrub_pause_super(root); write_ctree_super(trans, root->fs_info->tree_root, 1); + btrfs_scrub_continue_super(root); ret = 0; mutex_lock(&root->log_mutex); @@ -2689,7 +2691,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ret = btrfs_lookup_csums_range( log->fs_info->csum_root, ds + cs, ds + cs + cl - 1, - &ordered_sums); + &ordered_sums, 0); BUG_ON(ret); } } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8b9fb8c7683d..89ca8f110b6e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -38,9 +38,6 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_device *device); static int btrfs_relocate_sys_chunks(struct btrfs_root *root); -#define map_lookup_size(n) (sizeof(struct map_lookup) + \ - (sizeof(struct btrfs_bio_stripe) * (n))) - static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); @@ -1334,6 +1331,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto error_undo; device->in_fs_metadata = 0; + btrfs_scrub_cancel_dev(root, device); /* * the device list mutex makes sure that we don't change diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..f7c20123a1fe 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -85,6 +85,9 @@ struct btrfs_device { /* physical drive uuid (or lvm uuid) */ u8 uuid[BTRFS_UUID_SIZE]; + /* per-device scrub information */ + struct scrub_dev *scrub_device; + struct btrfs_work work; }; @@ -157,6 +160,9 @@ struct map_lookup { struct btrfs_bio_stripe stripes[]; }; +#define map_lookup_size(n) (sizeof(struct map_lookup) + \ + (sizeof(struct btrfs_bio_stripe) * (n))) + /* Used to sort the devices by max_avail(descending sort) */ int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); -- cgit v1.2.3 From 475f63874d739d7842a56da94687f18d583ae654 Mon Sep 17 00:00:00 2001 From: Jan Schmidt Date: Fri, 11 Mar 2011 15:41:01 +0100 Subject: btrfs: new ioctls for scrub adds ioctls necessary to start and cancel scrubs, to get current progress and to get info about devices to be scrubbed. Note that the scrub is done per-device and that the ioctl only returns after the scrub for this devices is finished or has been canceled. Signed-off-by: Arne Jansen --- fs/btrfs/ctree.h | 2 - fs/btrfs/ioctl.c | 131 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ioctl.h | 38 ++++++++++++++++ 3 files changed, 169 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 31141ba6072d..b7373b14e4cd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -189,7 +189,6 @@ struct btrfs_mapping_tree { struct extent_map_tree map_tree; }; -#define BTRFS_UUID_SIZE 16 struct btrfs_dev_item { /* the internal btrfs device id */ __le64 devid; @@ -296,7 +295,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) sizeof(struct btrfs_stripe) * (num_stripes - 1); } -#define BTRFS_FSID_SIZE 16 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f580a3a5d2fc..205cd011d2f3 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1803,6 +1803,75 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) return ret; } +static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) +{ + struct btrfs_ioctl_fs_info_args fi_args; + struct btrfs_device *device; + struct btrfs_device *next; + struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + fi_args.num_devices = fs_devices->num_devices; + fi_args.max_id = 0; + memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); + + mutex_lock(&fs_devices->device_list_mutex); + list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { + if (device->devid > fi_args.max_id) + fi_args.max_id = device->devid; + } + mutex_unlock(&fs_devices->device_list_mutex); + + if (copy_to_user(arg, &fi_args, sizeof(fi_args))) + return -EFAULT; + + return 0; +} + +static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) +{ + struct btrfs_ioctl_dev_info_args *di_args; + struct btrfs_device *dev; + struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + int ret = 0; + char *s_uuid = NULL; + char empty_uuid[BTRFS_UUID_SIZE] = {0}; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + di_args = memdup_user(arg, sizeof(*di_args)); + if (IS_ERR(di_args)) + return PTR_ERR(di_args); + + if (memcmp(empty_uuid, di_args->uuid, BTRFS_UUID_SIZE) != 0) + s_uuid = di_args->uuid; + + mutex_lock(&fs_devices->device_list_mutex); + dev = btrfs_find_device(root, di_args->devid, s_uuid, NULL); + mutex_unlock(&fs_devices->device_list_mutex); + + if (!dev) { + ret = -ENODEV; + goto out; + } + + di_args->devid = dev->devid; + di_args->bytes_used = dev->bytes_used; + di_args->total_bytes = dev->total_bytes; + memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); + strncpy(di_args->path, dev->name, sizeof(di_args->path)); + +out: + if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) + ret = -EFAULT; + + kfree(di_args); + return ret; +} + static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, u64 off, u64 olen, u64 destoff) { @@ -2465,6 +2534,58 @@ static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp) return btrfs_wait_for_commit(root, transid); } +static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg) +{ + int ret; + struct btrfs_ioctl_scrub_args *sa; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + sa = memdup_user(arg, sizeof(*sa)); + if (IS_ERR(sa)) + return PTR_ERR(sa); + + ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end, + &sa->progress); + + if (copy_to_user(arg, sa, sizeof(*sa))) + ret = -EFAULT; + + kfree(sa); + return ret; +} + +static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return btrfs_scrub_cancel(root); +} + +static long btrfs_ioctl_scrub_progress(struct btrfs_root *root, + void __user *arg) +{ + struct btrfs_ioctl_scrub_args *sa; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + sa = memdup_user(arg, sizeof(*sa)); + if (IS_ERR(sa)) + return PTR_ERR(sa); + + ret = btrfs_scrub_progress(root, sa->devid, &sa->progress); + + if (copy_to_user(arg, sa, sizeof(*sa))) + ret = -EFAULT; + + kfree(sa); + return ret; +} + long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2504,6 +2625,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_add_dev(root, argp); case BTRFS_IOC_RM_DEV: return btrfs_ioctl_rm_dev(root, argp); + case BTRFS_IOC_FS_INFO: + return btrfs_ioctl_fs_info(root, argp); + case BTRFS_IOC_DEV_INFO: + return btrfs_ioctl_dev_info(root, argp); case BTRFS_IOC_BALANCE: return btrfs_balance(root->fs_info->dev_root); case BTRFS_IOC_CLONE: @@ -2527,6 +2652,12 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_start_sync(file, argp); case BTRFS_IOC_WAIT_SYNC: return btrfs_ioctl_wait_sync(file, argp); + case BTRFS_IOC_SCRUB: + return btrfs_ioctl_scrub(root, argp); + case BTRFS_IOC_SCRUB_CANCEL: + return btrfs_ioctl_scrub_cancel(root, argp); + case BTRFS_IOC_SCRUB_PROGRESS: + return btrfs_ioctl_scrub_progress(root, argp); } return -ENOTTY; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 37ac030d64b4..1a638ceeead8 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -32,6 +32,8 @@ struct btrfs_ioctl_vol_args { #define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) #define BTRFS_SUBVOL_RDONLY (1ULL << 1) +#define BTRFS_FSID_SIZE 16 +#define BTRFS_UUID_SIZE 16 #define BTRFS_SUBVOL_NAME_MAX 4039 struct btrfs_ioctl_vol_args_v2 { @@ -79,6 +81,33 @@ struct btrfs_scrub_progress { * Intermittent error. */ }; +struct btrfs_ioctl_scrub_args { + __u64 devid; /* in */ + __u64 start; /* in */ + __u64 end; /* in */ + __u64 flags; /* in */ + struct btrfs_scrub_progress progress; /* out */ + /* pad to 1k */ + __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8]; +}; + +#define BTRFS_DEVICE_PATH_NAME_MAX 1024 +struct btrfs_ioctl_dev_info_args { + __u64 devid; /* in/out */ + __u8 uuid[BTRFS_UUID_SIZE]; /* in/out */ + __u64 bytes_used; /* out */ + __u64 total_bytes; /* out */ + __u64 unused[379]; /* pad to 4k */ + __u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */ +}; + +struct btrfs_ioctl_fs_info_args { + __u64 max_id; /* out */ + __u64 num_devices; /* out */ + __u8 fsid[BTRFS_FSID_SIZE]; /* out */ + __u64 reserved[124]; /* pad to 1k */ +}; + #define BTRFS_INO_LOOKUP_PATH_MAX 4080 struct btrfs_ioctl_ino_lookup_args { __u64 treeid; @@ -240,4 +269,13 @@ struct btrfs_ioctl_space_args { struct btrfs_ioctl_vol_args_v2) #define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) +#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \ + struct btrfs_ioctl_scrub_args) +#define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28) +#define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \ + struct btrfs_ioctl_scrub_args) +#define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \ + struct btrfs_ioctl_dev_info_args) +#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \ + struct btrfs_ioctl_fs_info_args) #endif -- cgit v1.2.3 From 96e369208e65a7d017a52361fd572df41fde8472 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sat, 9 Apr 2011 14:27:01 +0300 Subject: btrfs scrub: make fixups sync btrfs scrub - make fixups sync, don't reuse fixup bios Fixups are already sync for csum failures, this patch makes them sync for EIO case as well. Fixups are now sharing pages with the parent sbio - instead of allocating a separate page to do a fixup we grab the page from the sbio buffer. Fixup bios are no longer reused. struct fixup is no longer needed, instead pass [sbio pointer, index]. Originally this was added to look at the possibility of sharing the code between drive swap and scrub, but it actually fixes a serious bug in scrub code where errors that could be corrected were ignored and reported as uncorrectable. btrfs scrub - restore bios properly after media errors The current code reallocates a bio after a media error. This is a temporary measure introduced in v3 after a serious problem related to bio reuse was found in v2 of scrub patchset. Basically we did not reset bv_offset and bv_len fields of the bio_vec structure. They are changed in case I/O error happens, for example, at offset 512 or 1024 into the page. Also bi_flags field wasn't properly setup before reusing the bio. Signed-off-by: Arne Jansen --- fs/btrfs/scrub.c | 287 ++++++++++++++++--------------------------------------- 1 file changed, 80 insertions(+), 207 deletions(-) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 70f9fa772ee9..6a50801ecfa0 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -50,7 +50,6 @@ struct scrub_bio; struct scrub_page; struct scrub_dev; -struct scrub_fixup; static void scrub_bio_end_io(struct bio *bio, int err); static void scrub_checksum(struct btrfs_work *work); static int scrub_checksum_data(struct scrub_dev *sdev, @@ -59,9 +58,11 @@ static int scrub_checksum_tree_block(struct scrub_dev *sdev, struct scrub_page *spag, u64 logical, void *buffer); static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); -static void scrub_recheck_end_io(struct bio *bio, int err); -static void scrub_fixup_worker(struct btrfs_work *work); -static void scrub_fixup(struct scrub_fixup *fixup); +static int scrub_fixup_check(struct scrub_bio *sbio, int ix); +static void scrub_fixup_end_io(struct bio *bio, int err); +static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, + struct page *page); +static void scrub_fixup(struct scrub_bio *sbio, int ix); #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ @@ -105,17 +106,6 @@ struct scrub_dev { spinlock_t stat_lock; }; -struct scrub_fixup { - struct scrub_dev *sdev; - struct bio *bio; - u64 logical; - u64 physical; - struct scrub_page spag; - struct btrfs_work work; - int err; - int recheck; -}; - static void scrub_free_csums(struct scrub_dev *sdev) { while (!list_empty(&sdev->csum_list)) { @@ -240,107 +230,34 @@ nomem: */ static void scrub_recheck_error(struct scrub_bio *sbio, int ix) { - struct scrub_dev *sdev = sbio->sdev; - struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; - struct bio *bio = NULL; - struct page *page = NULL; - struct scrub_fixup *fixup = NULL; - int ret; - - /* - * while we're in here we do not want the transaction to commit. - * To prevent it, we increment scrubs_running. scrub_pause will - * have to wait until we're finished - * we can safely increment scrubs_running here, because we're - * in the context of the original bio which is still marked in_flight - */ - atomic_inc(&fs_info->scrubs_running); - - fixup = kzalloc(sizeof(*fixup), GFP_NOFS); - if (!fixup) - goto malloc_error; - - fixup->logical = sbio->logical + ix * PAGE_SIZE; - fixup->physical = sbio->physical + ix * PAGE_SIZE; - fixup->spag = sbio->spag[ix]; - fixup->sdev = sdev; - - bio = bio_alloc(GFP_NOFS, 1); - if (!bio) - goto malloc_error; - bio->bi_private = fixup; - bio->bi_size = 0; - bio->bi_bdev = sdev->dev->bdev; - fixup->bio = bio; - fixup->recheck = 0; - - page = alloc_page(GFP_NOFS); - if (!page) - goto malloc_error; - - ret = bio_add_page(bio, page, PAGE_SIZE, 0); - if (!ret) - goto malloc_error; - - if (!sbio->err) { - /* - * shorter path: just a checksum error, go ahead and correct it - */ - scrub_fixup_worker(&fixup->work); - return; + if (sbio->err) { + if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, + (sbio->physical + ix * PAGE_SIZE) >> 9, + sbio->bio->bi_io_vec[ix].bv_page) == 0) { + if (scrub_fixup_check(sbio, ix) == 0) + return; + } } - /* - * an I/O-error occured for one of the blocks in the bio, not - * necessarily for this one, so first try to read it separately - */ - fixup->work.func = scrub_fixup_worker; - fixup->recheck = 1; - bio->bi_end_io = scrub_recheck_end_io; - bio->bi_sector = fixup->physical >> 9; - bio->bi_bdev = sdev->dev->bdev; - submit_bio(0, bio); - - return; - -malloc_error: - if (bio) - bio_put(bio); - if (page) - __free_page(page); - kfree(fixup); - spin_lock(&sdev->stat_lock); - ++sdev->stat.malloc_errors; - spin_unlock(&sdev->stat_lock); - atomic_dec(&fs_info->scrubs_running); - wake_up(&fs_info->scrub_pause_wait); + scrub_fixup(sbio, ix); } -static void scrub_recheck_end_io(struct bio *bio, int err) -{ - struct scrub_fixup *fixup = bio->bi_private; - struct btrfs_fs_info *fs_info = fixup->sdev->dev->dev_root->fs_info; - - fixup->err = err; - btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work); -} - -static int scrub_fixup_check(struct scrub_fixup *fixup) +static int scrub_fixup_check(struct scrub_bio *sbio, int ix) { int ret = 1; struct page *page; void *buffer; - u64 flags = fixup->spag.flags; + u64 flags = sbio->spag[ix].flags; - page = fixup->bio->bi_io_vec[0].bv_page; + page = sbio->bio->bi_io_vec[ix].bv_page; buffer = kmap_atomic(page, KM_USER0); if (flags & BTRFS_EXTENT_FLAG_DATA) { - ret = scrub_checksum_data(fixup->sdev, - &fixup->spag, buffer); + ret = scrub_checksum_data(sbio->sdev, + sbio->spag + ix, buffer); } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { - ret = scrub_checksum_tree_block(fixup->sdev, - &fixup->spag, - fixup->logical, + ret = scrub_checksum_tree_block(sbio->sdev, + sbio->spag + ix, + sbio->logical + ix * PAGE_SIZE, buffer); } else { WARN_ON(1); @@ -350,51 +267,25 @@ static int scrub_fixup_check(struct scrub_fixup *fixup) return ret; } -static void scrub_fixup_worker(struct btrfs_work *work) -{ - struct scrub_fixup *fixup; - struct btrfs_fs_info *fs_info; - u64 flags; - int ret = 1; - - fixup = container_of(work, struct scrub_fixup, work); - fs_info = fixup->sdev->dev->dev_root->fs_info; - flags = fixup->spag.flags; - - if (fixup->recheck && fixup->err == 0) - ret = scrub_fixup_check(fixup); - - if (ret || fixup->err) - scrub_fixup(fixup); - - __free_page(fixup->bio->bi_io_vec[0].bv_page); - bio_put(fixup->bio); - - atomic_dec(&fs_info->scrubs_running); - wake_up(&fs_info->scrub_pause_wait); - - kfree(fixup); -} - static void scrub_fixup_end_io(struct bio *bio, int err) { complete((struct completion *)bio->bi_private); } -static void scrub_fixup(struct scrub_fixup *fixup) +static void scrub_fixup(struct scrub_bio *sbio, int ix) { - struct scrub_dev *sdev = fixup->sdev; + struct scrub_dev *sdev = sbio->sdev; struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct btrfs_multi_bio *multi = NULL; - struct bio *bio = fixup->bio; + u64 logical = sbio->logical + ix * PAGE_SIZE; u64 length; int i; int ret; DECLARE_COMPLETION_ONSTACK(complete); - if ((fixup->spag.flags & BTRFS_EXTENT_FLAG_DATA) && - (fixup->spag.have_csum == 0)) { + if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) && + (sbio->spag[ix].have_csum == 0)) { /* * nodatasum, don't try to fix anything * FIXME: we can do better, open the inode and trigger a @@ -404,71 +295,49 @@ static void scrub_fixup(struct scrub_fixup *fixup) } length = PAGE_SIZE; - ret = btrfs_map_block(map_tree, REQ_WRITE, fixup->logical, &length, + ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &multi, 0); if (ret || !multi || length < PAGE_SIZE) { printk(KERN_ERR "scrub_fixup: btrfs_map_block failed us for %llu\n", - (unsigned long long)fixup->logical); + (unsigned long long)logical); WARN_ON(1); return; } - if (multi->num_stripes == 1) { + if (multi->num_stripes == 1) /* there aren't any replicas */ goto uncorrectable; - } /* * first find a good copy */ for (i = 0; i < multi->num_stripes; ++i) { - if (i == fixup->spag.mirror_num) + if (i == sbio->spag[ix].mirror_num) continue; - bio->bi_sector = multi->stripes[i].physical >> 9; - bio->bi_bdev = multi->stripes[i].dev->bdev; - bio->bi_size = PAGE_SIZE; - bio->bi_next = NULL; - bio->bi_flags |= 1 << BIO_UPTODATE; - bio->bi_comp_cpu = -1; - bio->bi_end_io = scrub_fixup_end_io; - bio->bi_private = &complete; - - submit_bio(0, bio); - - wait_for_completion(&complete); - - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev, + multi->stripes[i].physical >> 9, + sbio->bio->bi_io_vec[ix].bv_page)) { /* I/O-error, this is not a good copy */ continue; + } - ret = scrub_fixup_check(fixup); - if (ret == 0) + if (scrub_fixup_check(sbio, ix) == 0) break; } if (i == multi->num_stripes) goto uncorrectable; /* - * the bio now contains good data, write it back + * bi_io_vec[ix].bv_page now contains good data, write it back */ - bio->bi_sector = fixup->physical >> 9; - bio->bi_bdev = sdev->dev->bdev; - bio->bi_size = PAGE_SIZE; - bio->bi_next = NULL; - bio->bi_flags |= 1 << BIO_UPTODATE; - bio->bi_comp_cpu = -1; - bio->bi_end_io = scrub_fixup_end_io; - bio->bi_private = &complete; - - submit_bio(REQ_WRITE, bio); - - wait_for_completion(&complete); - - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (scrub_fixup_io(WRITE, sdev->dev->bdev, + (sbio->physical + ix * PAGE_SIZE) >> 9, + sbio->bio->bi_io_vec[ix].bv_page)) { /* I/O-error, writeback failed, give up */ goto uncorrectable; + } kfree(multi); spin_lock(&sdev->stat_lock); @@ -477,7 +346,7 @@ static void scrub_fixup(struct scrub_fixup *fixup) if (printk_ratelimit()) printk(KERN_ERR "btrfs: fixed up at %llu\n", - (unsigned long long)fixup->logical); + (unsigned long long)logical); return; uncorrectable: @@ -488,7 +357,32 @@ uncorrectable: if (printk_ratelimit()) printk(KERN_ERR "btrfs: unable to fixup at %llu\n", - (unsigned long long)fixup->logical); + (unsigned long long)logical); +} + +static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, + struct page *page) +{ + struct bio *bio = NULL; + int ret; + DECLARE_COMPLETION_ONSTACK(complete); + + /* we are going to wait on this IO */ + rw |= REQ_SYNC | REQ_UNPLUG; + + bio = bio_alloc(GFP_NOFS, 1); + bio->bi_bdev = bdev; + bio->bi_sector = sector; + bio_add_page(bio, page, PAGE_SIZE, 0); + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + submit_bio(rw, bio); + + wait_for_completion(&complete); + + ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); + bio_put(bio); + return ret; } static void scrub_bio_end_io(struct bio *bio, int err) @@ -514,44 +408,24 @@ static void scrub_checksum(struct btrfs_work *work) int ret; if (sbio->err) { - struct bio *bio; - struct bio *old_bio; - for (i = 0; i < sbio->count; ++i) scrub_recheck_error(sbio, i); + + sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); + sbio->bio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bio->bi_phys_segments = 0; + sbio->bio->bi_idx = 0; + + for (i = 0; i < sbio->count; i++) { + struct bio_vec *bi; + bi = &sbio->bio->bi_io_vec[i]; + bi->bv_offset = 0; + bi->bv_len = PAGE_SIZE; + } + spin_lock(&sdev->stat_lock); ++sdev->stat.read_errors; spin_unlock(&sdev->stat_lock); - - /* - * FIXME: allocate a new bio after a media error. I haven't - * figured out how to reuse this one - */ - old_bio = sbio->bio; - bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); - if (!bio) { - /* - * alloc failed. cancel the scrub and don't requeue - * this sbio - */ - printk(KERN_ERR "btrfs scrub: allocation failure, " - "cancelling scrub\n"); - atomic_inc(&sdev->dev->dev_root->fs_info-> - scrub_cancel_req); - goto out_no_enqueue; - } - sbio->bio = bio; - bio->bi_private = sbio; - bio->bi_end_io = scrub_bio_end_io; - bio->bi_sector = 0; - bio->bi_bdev = sbio->sdev->dev->bdev; - bio->bi_size = 0; - for (i = 0; i < SCRUB_PAGES_PER_BIO; ++i) { - struct page *page; - page = old_bio->bi_io_vec[i].bv_page; - bio_add_page(bio, page, PAGE_SIZE, 0); - } - bio_put(old_bio); goto out; } for (i = 0; i < sbio->count; ++i) { @@ -581,7 +455,6 @@ out: sbio->next_free = sdev->first_free; sdev->first_free = sbio->index; spin_unlock(&sdev->list_lock); -out_no_enqueue: atomic_dec(&sdev->in_flight); wake_up(&sdev->list_wait); } -- cgit v1.2.3 From 8628764e1a5e1998a42b9713e9edea7753653d01 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Wed, 23 Mar 2011 16:34:19 +0100 Subject: btrfs: add readonly flag setting the readonly flag prevents writes in case an error is detected Signed-off-by: Arne Jansen --- fs/btrfs/ctree.h | 2 +- fs/btrfs/ioctl.c | 2 +- fs/btrfs/ioctl.h | 1 + fs/btrfs/scrub.c | 23 +++++++++++++---------- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b7373b14e4cd..ee904666b766 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2657,7 +2657,7 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, /* scrub.c */ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, - struct btrfs_scrub_progress *progress); + struct btrfs_scrub_progress *progress, int readonly); int btrfs_scrub_pause(struct btrfs_root *root); int btrfs_scrub_pause_super(struct btrfs_root *root); int btrfs_scrub_continue(struct btrfs_root *root); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 205cd011d2f3..f0a74f014748 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2547,7 +2547,7 @@ static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg) return PTR_ERR(sa); ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end, - &sa->progress); + &sa->progress, sa->flags & BTRFS_SCRUB_READONLY); if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 1a638ceeead8..e5e0ee2cad4e 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -81,6 +81,7 @@ struct btrfs_scrub_progress { * Intermittent error. */ }; +#define BTRFS_SCRUB_READONLY 1 struct btrfs_ioctl_scrub_args { __u64 devid; /* in */ __u64 start; /* in */ diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6a50801ecfa0..a31f2a9bd2e2 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -42,7 +42,6 @@ * - In case of a read error on files with nodatasum, map the file and read * the extent to trigger a writeback of the good copy * - track and record media errors, throw out bad devices - * - add a readonly mode * - add a mode to also read unallocated space * - make the prefetch cancellable */ @@ -99,6 +98,7 @@ struct scrub_dev { u16 csum_size; struct list_head csum_list; atomic_t cancel_req; + int readonly; /* * statistics */ @@ -329,14 +329,16 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix) if (i == multi->num_stripes) goto uncorrectable; - /* - * bi_io_vec[ix].bv_page now contains good data, write it back - */ - if (scrub_fixup_io(WRITE, sdev->dev->bdev, - (sbio->physical + ix * PAGE_SIZE) >> 9, - sbio->bio->bi_io_vec[ix].bv_page)) { - /* I/O-error, writeback failed, give up */ - goto uncorrectable; + if (!sdev->readonly) { + /* + * bi_io_vec[ix].bv_page now contains good data, write it back + */ + if (scrub_fixup_io(WRITE, sdev->dev->bdev, + (sbio->physical + ix * PAGE_SIZE) >> 9, + sbio->bio->bi_io_vec[ix].bv_page)) { + /* I/O-error, writeback failed, give up */ + goto uncorrectable; + } } kfree(multi); @@ -1156,7 +1158,7 @@ static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, - struct btrfs_scrub_progress *progress) + struct btrfs_scrub_progress *progress, int readonly) { struct scrub_dev *sdev; struct btrfs_fs_info *fs_info = root->fs_info; @@ -1209,6 +1211,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, scrub_workers_put(root); return PTR_ERR(sdev); } + sdev->readonly = readonly; dev->scrub_device = sdev; atomic_inc(&fs_info->scrubs_running); -- cgit v1.2.3 From 7a36ddec1003a4e84e79f28ee714a142ed6bc529 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 6 May 2011 15:33:15 +0200 Subject: btrfs: use printk_ratelimited instead of printk_ratelimit As per printk_ratelimit comment, it should not be used. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 21 ++++++--------------- fs/btrfs/inode.c | 13 ++++--------- 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index de7b4770ab17..cb9d1b8bfe74 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "compat.h" #include "ctree.h" @@ -254,14 +255,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs: %s checksum verify " + printk_ratelimited(KERN_INFO "btrfs: %s checksum verify " "failed on %llu wanted %X found %X " "level %d\n", root->fs_info->sb->s_id, (unsigned long long)buf->start, val, found, btrfs_header_level(buf)); - } if (result != (char *)&inline_result) kfree(result); return 1; @@ -296,13 +295,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ret = 0; goto out; } - if (printk_ratelimit()) { - printk("parent transid verify failed on %llu wanted %llu " + printk_ratelimited("parent transid verify failed on %llu wanted %llu " "found %llu\n", (unsigned long long)eb->start, (unsigned long long)parent_transid, (unsigned long long)btrfs_header_generation(eb)); - } ret = 1; clear_extent_buffer_uptodate(io_tree, eb, &cached_state); out: @@ -533,12 +530,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, found_start = btrfs_header_bytenr(eb); if (found_start != start) { - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs bad tree block start " + printk_ratelimited(KERN_INFO "btrfs bad tree block start " "%llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); - } ret = -EIO; goto err; } @@ -550,10 +545,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, goto err; } if (check_tree_block_fsid(root, eb)) { - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs bad fsid on block %llu\n", + printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n", (unsigned long long)eb->start); - } ret = -EIO; goto err; } @@ -2108,11 +2101,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) if (uptodate) { set_buffer_uptodate(bh); } else { - if (printk_ratelimit()) { - printk(KERN_WARNING "lost page write due to " + printk_ratelimited(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); - } /* note, we dont' set_buffer_write_io_error because we have * our own ways of dealing with the IO errors */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5ff52b644a60..1d1017f91558 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -2004,12 +2005,10 @@ good: return 0; zeroit: - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " + printk_ratelimited(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " "private %llu\n", page->mapping->host->i_ino, (unsigned long long)start, csum, (unsigned long long)private); - } memset(kaddr + offset, 1, end - start + 1); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); @@ -4243,22 +4242,18 @@ void btrfs_dirty_inode(struct inode *inode) btrfs_end_transaction(trans, root); trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { - if (printk_ratelimit()) { - printk(KERN_ERR "btrfs: fail to " + printk_ratelimited(KERN_ERR "btrfs: fail to " "dirty inode %lu error %ld\n", inode->i_ino, PTR_ERR(trans)); - } return; } btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret) { - if (printk_ratelimit()) { - printk(KERN_ERR "btrfs: fail to " + printk_ratelimited(KERN_ERR "btrfs: fail to " "dirty inode %lu error %d\n", inode->i_ino, ret); - } } } btrfs_end_transaction(trans, root); -- cgit v1.2.3 From 4ea028859bbdad34b84c9951fbb832ae10c6a96c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 12 May 2011 18:13:12 +0200 Subject: btrfs: use unsigned type for single bit bitfield Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e37d441617d2..343304dec6d1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -740,12 +740,12 @@ struct btrfs_space_info { */ unsigned long reservation_progress; - int full:1; /* indicates that we cannot allocate any more + unsigned int full:1; /* indicates that we cannot allocate any more chunks for this space */ - int chunk_alloc:1; /* set if we are allocating a chunk */ + unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ - int force_alloc; /* set if we need to force a chunk alloc for - this space */ + unsigned int force_alloc; /* set if we need to force a chunk + alloc for this space */ struct list_head list; -- cgit v1.2.3 From bcd53741cc2af4342ac3ff6983bddc4a1b63b9b4 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 12 Apr 2011 10:43:21 +0200 Subject: btrfs: move btrfs_cmp_device_free_bytes to super.c this function won't be used here anymore, so move it super.c where it is used for df-calculation --- fs/btrfs/super.c | 26 ++++++++++++++++++++++++++ fs/btrfs/volumes.c | 13 ------------- fs/btrfs/volumes.h | 15 --------------- 3 files changed, 26 insertions(+), 28 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..32fe8b33cc1c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -913,6 +913,32 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) return 0; } +/* Used to sort the devices by max_avail(descending sort) */ +static int btrfs_cmp_device_free_bytes(const void *dev_info1, + const void *dev_info2) +{ + if (((struct btrfs_device_info *)dev_info1)->max_avail > + ((struct btrfs_device_info *)dev_info2)->max_avail) + return -1; + else if (((struct btrfs_device_info *)dev_info1)->max_avail < + ((struct btrfs_device_info *)dev_info2)->max_avail) + return 1; + else + return 0; +} + +/* + * sort the devices by max_avail, in which max free extent size of each device + * is stored.(Descending Sort) + */ +static inline void btrfs_descending_sort_devices( + struct btrfs_device_info *devices, + size_t nr_devices) +{ + sort(devices, nr_devices, sizeof(struct btrfs_device_info), + btrfs_cmp_device_free_bytes, NULL); +} + /* * The helper to calc the free space on the devices that can be used to store * file data. diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8b9fb8c7683d..a9f1fc23278b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2282,19 +2282,6 @@ static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, return calc_size * num_stripes; } -/* Used to sort the devices by max_avail(descending sort) */ -int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) -{ - if (((struct btrfs_device_info *)dev_info1)->max_avail > - ((struct btrfs_device_info *)dev_info2)->max_avail) - return -1; - else if (((struct btrfs_device_info *)dev_info1)->max_avail < - ((struct btrfs_device_info *)dev_info2)->max_avail) - return 1; - else - return 0; -} - static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, int *num_stripes, int *min_stripes, int *sub_stripes) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..b502f01f79ed 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -157,21 +157,6 @@ struct map_lookup { struct btrfs_bio_stripe stripes[]; }; -/* Used to sort the devices by max_avail(descending sort) */ -int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); - -/* - * sort the devices by max_avail, in which max free extent size of each device - * is stored.(Descending Sort) - */ -static inline void btrfs_descending_sort_devices( - struct btrfs_device_info *devices, - size_t nr_devices) -{ - sort(devices, nr_devices, sizeof(struct btrfs_device_info), - btrfs_cmp_device_free_bytes, NULL); -} - int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, u64 end, u64 *length); -- cgit v1.2.3 From a9c9bf68276c36898e23db770a65bd9b75bfac58 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 12 Apr 2011 11:01:20 +0200 Subject: btrfs: heed alloc_start currently alloc_start is disregarded if the requested chunk size is bigger than (device size - alloc_start), but smaller than the device size. The only situation where I see this could have made sense was when a chunk equal the size of the device has been requested. This was possible as the allocator failed to take alloc_start into account when calculating the request chunk size. As this gets fixed by this patch, the workaround is not necessary anymore. --- fs/btrfs/volumes.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a9f1fc23278b..45c592a7335e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -849,10 +849,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, /* we don't want to overwrite the superblock on the drive, * so we make sure to start at an offset of at least 1MB */ - search_start = 1024 * 1024; - - if (root->fs_info->alloc_start + num_bytes <= search_end) - search_start = max(root->fs_info->alloc_start, search_start); + search_start = max(root->fs_info->alloc_start, 1024ull * 1024); max_hole_start = search_start; max_hole_size = 0; -- cgit v1.2.3 From 73c5de0051533cbdf2bb656586c3eb21a475aa7d Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 12 Apr 2011 12:07:57 +0200 Subject: btrfs: quasi-round-robin for chunk allocation In a multi device setup, the chunk allocator currently always allocates chunks on the devices in the same order. This leads to a very uneven distribution, especially with RAID1 or RAID10 and an uneven number of devices. This patch always sorts the devices before allocating, and allocates the stripes on the devices with the most available space, as long as there is enough space available. In a low space situation, it first tries to maximize striping. The patch also simplifies the allocator and reduces the checks for corner cases. The simplification is done by several means. First, it defines the properties of each RAID type upfront. These properties are used afterwards instead of differentiating cases in several places. Second, the old allocator defined a minimum stripe size for each block group type, tried to find a large enough chunk, and if this fails just allocates a smaller one. This is now done in one step. The largest possible chunk (up to max_chunk_size) is searched and allocated. Because we now have only one pass, the allocation of the map (struct map_lookup) is moved down to the point where the number of stripes is already known. This way we avoid reallocation of the map. We still avoid allocating stripes that are not a multiple of STRIPE_SIZE. --- fs/btrfs/volumes.c | 481 ++++++++++++++++++++--------------------------------- fs/btrfs/volumes.h | 1 + 2 files changed, 177 insertions(+), 305 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 45c592a7335e..ab55bfc31a06 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2268,262 +2268,204 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, return 0; } -static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, - int num_stripes, int sub_stripes) +/* + * sort the devices in descending order by max_avail, total_avail + */ +static int btrfs_cmp_device_info(const void *a, const void *b) { - if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) - return calc_size; - else if (type & BTRFS_BLOCK_GROUP_RAID10) - return calc_size * (num_stripes / sub_stripes); - else - return calc_size * num_stripes; + const struct btrfs_device_info *di_a = a; + const struct btrfs_device_info *di_b = b; + + if (di_a->max_avail > di_b->max_avail) + return -1; + if (di_a->max_avail < di_b->max_avail) + return 1; + if (di_a->total_avail > di_b->total_avail) + return -1; + if (di_a->total_avail < di_b->total_avail) + return 1; + return 0; } -static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, - int *num_stripes, int *min_stripes, - int *sub_stripes) +static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, + struct map_lookup **map_ret, + u64 *num_bytes_out, u64 *stripe_size_out, + u64 start, u64 type) { - *num_stripes = 1; - *min_stripes = 1; - *sub_stripes = 0; + struct btrfs_fs_info *info = extent_root->fs_info; + struct btrfs_fs_devices *fs_devices = info->fs_devices; + struct list_head *cur; + struct map_lookup *map = NULL; + struct extent_map_tree *em_tree; + struct extent_map *em; + struct btrfs_device_info *devices_info = NULL; + u64 total_avail; + int num_stripes; /* total number of stripes to allocate */ + int sub_stripes; /* sub_stripes info for map */ + int dev_stripes; /* stripes per dev */ + int devs_max; /* max devs to use */ + int devs_min; /* min devs needed */ + int devs_increment; /* ndevs has to be a multiple of this */ + int ncopies; /* how many copies to data has */ + int ret; + u64 max_stripe_size; + u64 max_chunk_size; + u64 stripe_size; + u64 num_bytes; + int ndevs; + int i; + int j; - if (type & (BTRFS_BLOCK_GROUP_RAID0)) { - *num_stripes = fs_devices->rw_devices; - *min_stripes = 2; - } - if (type & (BTRFS_BLOCK_GROUP_DUP)) { - *num_stripes = 2; - *min_stripes = 2; - } - if (type & (BTRFS_BLOCK_GROUP_RAID1)) { - if (fs_devices->rw_devices < 2) - return -ENOSPC; - *num_stripes = 2; - *min_stripes = 2; - } - if (type & (BTRFS_BLOCK_GROUP_RAID10)) { - *num_stripes = fs_devices->rw_devices; - if (*num_stripes < 4) - return -ENOSPC; - *num_stripes &= ~(u32)1; - *sub_stripes = 2; - *min_stripes = 4; + if ((type & BTRFS_BLOCK_GROUP_RAID1) && + (type & BTRFS_BLOCK_GROUP_DUP)) { + WARN_ON(1); + type &= ~BTRFS_BLOCK_GROUP_DUP; } - return 0; -} + if (list_empty(&fs_devices->alloc_list)) + return -ENOSPC; -static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, - u64 proposed_size, u64 type, - int num_stripes, int small_stripe) -{ - int min_stripe_size = 1 * 1024 * 1024; - u64 calc_size = proposed_size; - u64 max_chunk_size = calc_size; - int ncopies = 1; + sub_stripes = 1; + dev_stripes = 1; + devs_increment = 1; + ncopies = 1; + devs_max = 0; /* 0 == as many as possible */ + devs_min = 1; - if (type & (BTRFS_BLOCK_GROUP_RAID1 | - BTRFS_BLOCK_GROUP_DUP | - BTRFS_BLOCK_GROUP_RAID10)) + /* + * define the properties of each RAID type. + * FIXME: move this to a global table and use it in all RAID + * calculation code + */ + if (type & (BTRFS_BLOCK_GROUP_DUP)) { + dev_stripes = 2; ncopies = 2; + devs_max = 1; + } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) { + devs_min = 2; + } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) { + devs_increment = 2; + ncopies = 2; + devs_max = 2; + devs_min = 2; + } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) { + sub_stripes = 2; + devs_increment = 2; + ncopies = 2; + devs_min = 4; + } else { + devs_max = 1; + } if (type & BTRFS_BLOCK_GROUP_DATA) { - max_chunk_size = 10 * calc_size; - min_stripe_size = 64 * 1024 * 1024; + max_stripe_size = 1024 * 1024 * 1024; + max_chunk_size = 10 * max_stripe_size; } else if (type & BTRFS_BLOCK_GROUP_METADATA) { - max_chunk_size = 256 * 1024 * 1024; - min_stripe_size = 32 * 1024 * 1024; + max_stripe_size = 256 * 1024 * 1024; + max_chunk_size = max_stripe_size; } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { - calc_size = 8 * 1024 * 1024; - max_chunk_size = calc_size * 2; - min_stripe_size = 1 * 1024 * 1024; + max_stripe_size = 8 * 1024 * 1024; + max_chunk_size = 2 * max_stripe_size; + } else { + printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n", + type); + BUG_ON(1); } /* we don't want a chunk larger than 10% of writeable space */ max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); - if (calc_size * num_stripes > max_chunk_size * ncopies) { - calc_size = max_chunk_size * ncopies; - do_div(calc_size, num_stripes); - do_div(calc_size, BTRFS_STRIPE_LEN); - calc_size *= BTRFS_STRIPE_LEN; - } + devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, + GFP_NOFS); + if (!devices_info) + return -ENOMEM; - /* we don't want tiny stripes */ - if (!small_stripe) - calc_size = max_t(u64, min_stripe_size, calc_size); + cur = fs_devices->alloc_list.next; /* - * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure - * we end up with something bigger than a stripe + * in the first pass through the devices list, we gather information + * about the available holes on each device. */ - calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); - - do_div(calc_size, BTRFS_STRIPE_LEN); - calc_size *= BTRFS_STRIPE_LEN; - - return calc_size; -} + ndevs = 0; + while (cur != &fs_devices->alloc_list) { + struct btrfs_device *device; + u64 max_avail; + u64 dev_offset; -static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, - int num_stripes) -{ - struct map_lookup *new; - size_t len = map_lookup_size(num_stripes); - - BUG_ON(map->num_stripes < num_stripes); - - if (map->num_stripes == num_stripes) - return map; - - new = kmalloc(len, GFP_NOFS); - if (!new) { - /* just change map->num_stripes */ - map->num_stripes = num_stripes; - return map; - } - - memcpy(new, map, len); - new->num_stripes = num_stripes; - kfree(map); - return new; -} - -/* - * helper to allocate device space from btrfs_device_info, in which we stored - * max free space information of every device. It is used when we can not - * allocate chunks by default size. - * - * By this helper, we can allocate a new chunk as larger as possible. - */ -static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, - struct btrfs_fs_devices *fs_devices, - struct btrfs_device_info *devices, - int nr_device, u64 type, - struct map_lookup **map_lookup, - int min_stripes, u64 *stripe_size) -{ - int i, index, sort_again = 0; - int min_devices = min_stripes; - u64 max_avail, min_free; - struct map_lookup *map = *map_lookup; - int ret; + device = list_entry(cur, struct btrfs_device, dev_alloc_list); - if (nr_device < min_stripes) - return -ENOSPC; + cur = cur->next; - btrfs_descending_sort_devices(devices, nr_device); + if (!device->writeable) { + printk(KERN_ERR + "btrfs: read-only device in alloc_list\n"); + WARN_ON(1); + continue; + } - max_avail = devices[0].max_avail; - if (!max_avail) - return -ENOSPC; + if (!device->in_fs_metadata) + continue; - for (i = 0; i < nr_device; i++) { - /* - * if dev_offset = 0, it means the free space of this device - * is less than what we need, and we didn't search max avail - * extent on this device, so do it now. + if (device->total_bytes > device->bytes_used) + total_avail = device->total_bytes - device->bytes_used; + else + total_avail = 0; + /* avail is off by max(alloc_start, 1MB), but that is the same + * for all devices, so it doesn't hurt the sorting later on */ - if (!devices[i].dev_offset) { - ret = find_free_dev_extent(trans, devices[i].dev, - max_avail, - &devices[i].dev_offset, - &devices[i].max_avail); - if (ret != 0 && ret != -ENOSPC) - return ret; - sort_again = 1; - } - } - /* we update the max avail free extent of each devices, sort again */ - if (sort_again) - btrfs_descending_sort_devices(devices, nr_device); - - if (type & BTRFS_BLOCK_GROUP_DUP) - min_devices = 1; + ret = find_free_dev_extent(trans, device, + max_stripe_size * dev_stripes, + &dev_offset, &max_avail); + if (ret && ret != -ENOSPC) + goto error; - if (!devices[min_devices - 1].max_avail) - return -ENOSPC; + if (ret == 0) + max_avail = max_stripe_size * dev_stripes; - max_avail = devices[min_devices - 1].max_avail; - if (type & BTRFS_BLOCK_GROUP_DUP) - do_div(max_avail, 2); + if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) + continue; - max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, - min_stripes, 1); - if (type & BTRFS_BLOCK_GROUP_DUP) - min_free = max_avail * 2; - else - min_free = max_avail; + devices_info[ndevs].dev_offset = dev_offset; + devices_info[ndevs].max_avail = max_avail; + devices_info[ndevs].total_avail = total_avail; + devices_info[ndevs].dev = device; + ++ndevs; + } - if (min_free > devices[min_devices - 1].max_avail) - return -ENOSPC; + /* + * now sort the devices by hole size / available space + */ + sort(devices_info, ndevs, sizeof(struct btrfs_device_info), + btrfs_cmp_device_info, NULL); - map = __shrink_map_lookup_stripes(map, min_stripes); - *stripe_size = max_avail; + /* round down to number of usable stripes */ + ndevs -= ndevs % devs_increment; - index = 0; - for (i = 0; i < min_stripes; i++) { - map->stripes[i].dev = devices[index].dev; - map->stripes[i].physical = devices[index].dev_offset; - if (type & BTRFS_BLOCK_GROUP_DUP) { - i++; - map->stripes[i].dev = devices[index].dev; - map->stripes[i].physical = devices[index].dev_offset + - max_avail; - } - index++; + if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { + ret = -ENOSPC; + goto error; } - *map_lookup = map; - - return 0; -} -static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct map_lookup **map_ret, - u64 *num_bytes, u64 *stripe_size, - u64 start, u64 type) -{ - struct btrfs_fs_info *info = extent_root->fs_info; - struct btrfs_device *device = NULL; - struct btrfs_fs_devices *fs_devices = info->fs_devices; - struct list_head *cur; - struct map_lookup *map; - struct extent_map_tree *em_tree; - struct extent_map *em; - struct btrfs_device_info *devices_info; - struct list_head private_devs; - u64 calc_size = 1024 * 1024 * 1024; - u64 min_free; - u64 avail; - u64 dev_offset; - int num_stripes; - int min_stripes; - int sub_stripes; - int min_devices; /* the min number of devices we need */ - int i; - int ret; - int index; + if (devs_max && ndevs > devs_max) + ndevs = devs_max; + /* + * the primary goal is to maximize the number of stripes, so use as many + * devices as possible, even if the stripes are not maximum sized. + */ + stripe_size = devices_info[ndevs-1].max_avail; + num_stripes = ndevs * dev_stripes; - if ((type & BTRFS_BLOCK_GROUP_RAID1) && - (type & BTRFS_BLOCK_GROUP_DUP)) { - WARN_ON(1); - type &= ~BTRFS_BLOCK_GROUP_DUP; + if (stripe_size * num_stripes > max_chunk_size * ncopies) { + stripe_size = max_chunk_size * ncopies; + do_div(stripe_size, num_stripes); } - if (list_empty(&fs_devices->alloc_list)) - return -ENOSPC; - - ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, - &min_stripes, &sub_stripes); - if (ret) - return ret; - devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, - GFP_NOFS); - if (!devices_info) - return -ENOMEM; + do_div(stripe_size, dev_stripes); + do_div(stripe_size, BTRFS_STRIPE_LEN); + stripe_size *= BTRFS_STRIPE_LEN; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { @@ -2532,85 +2474,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, } map->num_stripes = num_stripes; - cur = fs_devices->alloc_list.next; - index = 0; - i = 0; - - calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, - num_stripes, 0); - - if (type & BTRFS_BLOCK_GROUP_DUP) { - min_free = calc_size * 2; - min_devices = 1; - } else { - min_free = calc_size; - min_devices = min_stripes; - } - - INIT_LIST_HEAD(&private_devs); - while (index < num_stripes) { - device = list_entry(cur, struct btrfs_device, dev_alloc_list); - BUG_ON(!device->writeable); - if (device->total_bytes > device->bytes_used) - avail = device->total_bytes - device->bytes_used; - else - avail = 0; - cur = cur->next; - - if (device->in_fs_metadata && avail >= min_free) { - ret = find_free_dev_extent(trans, device, min_free, - &devices_info[i].dev_offset, - &devices_info[i].max_avail); - if (ret == 0) { - list_move_tail(&device->dev_alloc_list, - &private_devs); - map->stripes[index].dev = device; - map->stripes[index].physical = - devices_info[i].dev_offset; - index++; - if (type & BTRFS_BLOCK_GROUP_DUP) { - map->stripes[index].dev = device; - map->stripes[index].physical = - devices_info[i].dev_offset + - calc_size; - index++; - } - } else if (ret != -ENOSPC) - goto error; - - devices_info[i].dev = device; - i++; - } else if (device->in_fs_metadata && - avail >= BTRFS_STRIPE_LEN) { - devices_info[i].dev = device; - devices_info[i].max_avail = avail; - i++; - } - - if (cur == &fs_devices->alloc_list) - break; - } - - list_splice(&private_devs, &fs_devices->alloc_list); - if (index < num_stripes) { - if (index >= min_stripes) { - num_stripes = index; - if (type & (BTRFS_BLOCK_GROUP_RAID10)) { - num_stripes /= sub_stripes; - num_stripes *= sub_stripes; - } - - map = __shrink_map_lookup_stripes(map, num_stripes); - } else if (i >= min_devices) { - ret = __btrfs_alloc_tiny_space(trans, fs_devices, - devices_info, i, type, - &map, min_stripes, - &calc_size); - if (ret) - goto error; - } else { - ret = -ENOSPC; - goto error; + for (i = 0; i < ndevs; ++i) { + for (j = 0; j < dev_stripes; ++j) { + int s = i * dev_stripes + j; + map->stripes[s].dev = devices_info[i].dev; + map->stripes[s].physical = devices_info[i].dev_offset + + j * stripe_size; } } map->sector_size = extent_root->sectorsize; @@ -2621,11 +2490,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, map->sub_stripes = sub_stripes; *map_ret = map; - *stripe_size = calc_size; - *num_bytes = chunk_bytes_by_type(type, calc_size, - map->num_stripes, sub_stripes); + num_bytes = stripe_size * (num_stripes / ncopies); + + *stripe_size_out = stripe_size; + *num_bytes_out = num_bytes; - trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); + trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); em = alloc_extent_map(GFP_NOFS); if (!em) { @@ -2634,7 +2504,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, } em->bdev = (struct block_device *)map; em->start = start; - em->len = *num_bytes; + em->len = num_bytes; em->block_start = 0; em->block_len = em->len; @@ -2647,20 +2517,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ret = btrfs_make_block_group(trans, extent_root, 0, type, BTRFS_FIRST_CHUNK_TREE_OBJECTID, - start, *num_bytes); + start, num_bytes); BUG_ON(ret); - index = 0; - while (index < map->num_stripes) { - device = map->stripes[index].dev; - dev_offset = map->stripes[index].physical; + for (i = 0; i < map->num_stripes; ++i) { + struct btrfs_device *device; + u64 dev_offset; + + device = map->stripes[i].dev; + dev_offset = map->stripes[i].physical; ret = btrfs_alloc_dev_extent(trans, device, info->chunk_root->root_key.objectid, BTRFS_FIRST_CHUNK_TREE_OBJECTID, - start, dev_offset, calc_size); + start, dev_offset, stripe_size); BUG_ON(ret); - index++; } kfree(devices_info); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index b502f01f79ed..37ae6e2126a1 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -144,6 +144,7 @@ struct btrfs_device_info { struct btrfs_device *dev; u64 dev_offset; u64 max_avail; + u64 total_avail; }; struct map_lookup { -- cgit v1.2.3 From 16cdcec736cd214350cdb591bf1091f8beedefa0 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Fri, 22 Apr 2011 18:12:22 +0800 Subject: btrfs: implement delayed inode items operation Changelog V5 -> V6: - Fix oom when the memory load is high, by storing the delayed nodes into the root's radix tree, and letting btrfs inodes go. Changelog V4 -> V5: - Fix the race on adding the delayed node to the inode, which is spotted by Chris Mason. - Merge Chris Mason's incremental patch into this patch. - Fix deadlock between readdir() and memory fault, which is reported by Itaru Kitayama. Changelog V3 -> V4: - Fix nested lock, which is reported by Itaru Kitayama, by updating space cache inode in time. Changelog V2 -> V3: - Fix the race between the delayed worker and the task which does delayed items balance, which is reported by Tsutomu Itoh. - Modify the patch address David Sterba's comment. - Fix the bug of the cpu recursion spinlock, reported by Chris Mason Changelog V1 -> V2: - break up the global rb-tree, use a list to manage the delayed nodes, which is created for every directory and file, and used to manage the delayed directory name index items and the delayed inode item. - introduce a worker to deal with the delayed nodes. Compare with Ext3/4, the performance of file creation and deletion on btrfs is very poor. the reason is that btrfs must do a lot of b+ tree insertions, such as inode item, directory name item, directory name index and so on. If we can do some delayed b+ tree insertion or deletion, we can improve the performance, so we made this patch which implemented delayed directory name index insertion/deletion and delayed inode update. Implementation: - introduce a delayed root object into the filesystem, that use two lists to manage the delayed nodes which are created for every file/directory. One is used to manage all the delayed nodes that have delayed items. And the other is used to manage the delayed nodes which is waiting to be dealt with by the work thread. - Every delayed node has two rb-tree, one is used to manage the directory name index which is going to be inserted into b+ tree, and the other is used to manage the directory name index which is going to be deleted from b+ tree. - introduce a worker to deal with the delayed operation. This worker is used to deal with the works of the delayed directory name index items insertion and deletion and the delayed inode update. When the delayed items is beyond the lower limit, we create works for some delayed nodes and insert them into the work queue of the worker, and then go back. When the delayed items is beyond the upper bound, we create works for all the delayed nodes that haven't been dealt with, and insert them into the work queue of the worker, and then wait for that the untreated items is below some threshold value. - When we want to insert a directory name index into b+ tree, we just add the information into the delayed inserting rb-tree. And then we check the number of the delayed items and do delayed items balance. (The balance policy is above.) - When we want to delete a directory name index from the b+ tree, we search it in the inserting rb-tree at first. If we look it up, just drop it. If not, add the key of it into the delayed deleting rb-tree. Similar to the delayed inserting rb-tree, we also check the number of the delayed items and do delayed items balance. (The same to inserting manipulation) - When we want to update the metadata of some inode, we cached the data of the inode into the delayed node. the worker will flush it into the b+ tree after dealing with the delayed insertion and deletion. - We will move the delayed node to the tail of the list after we access the delayed node, By this way, we can cache more delayed items and merge more inode updates. - If we want to commit transaction, we will deal with all the delayed node. - the delayed node will be freed when we free the btrfs inode. - Before we log the inode items, we commit all the directory name index items and the delayed inode update. I did a quick test by the benchmark tool[1] and found we can improve the performance of file creation by ~15%, and file deletion by ~20%. Before applying this patch: Create files: Total files: 50000 Total time: 1.096108 Average time: 0.000022 Delete files: Total files: 50000 Total time: 1.510403 Average time: 0.000030 After applying this patch: Create files: Total files: 50000 Total time: 0.932899 Average time: 0.000019 Delete files: Total files: 50000 Total time: 1.215732 Average time: 0.000024 [1] http://marc.info/?l=linux-btrfs&m=128212635122920&q=p3 Many thanks for Kitayama-san's help! Signed-off-by: Miao Xie Reviewed-by: David Sterba Tested-by: Tsutomu Itoh Tested-by: Itaru Kitayama Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/btrfs_inode.h | 5 + fs/btrfs/ctree.c | 14 +- fs/btrfs/ctree.h | 29 +- fs/btrfs/delayed-inode.c | 1694 ++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/delayed-inode.h | 141 ++++ fs/btrfs/dir-item.c | 34 +- fs/btrfs/disk-io.c | 50 +- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 18 +- fs/btrfs/inode.c | 111 ++- fs/btrfs/ioctl.c | 2 +- fs/btrfs/super.c | 10 +- fs/btrfs/transaction.c | 45 +- fs/btrfs/transaction.h | 2 + fs/btrfs/tree-log.c | 7 + 16 files changed, 2074 insertions(+), 91 deletions(-) create mode 100644 fs/btrfs/delayed-inode.c create mode 100644 fs/btrfs/delayed-inode.h diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73aec..a8411c22313d 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ - compression.o delayed-ref.o relocation.o + compression.o delayed-ref.o relocation.o delayed-inode.o diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..beefafd91f22 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -22,6 +22,7 @@ #include "extent_map.h" #include "extent_io.h" #include "ordered-data.h" +#include "delayed-inode.h" /* in memory btrfs inode */ struct btrfs_inode { @@ -158,9 +159,13 @@ struct btrfs_inode { */ unsigned force_compress:4; + struct btrfs_delayed_node *delayed_node; + struct inode vfs_inode; }; +extern unsigned char btrfs_filetype_table[]; + static inline struct btrfs_inode *BTRFS_I(struct inode *inode) { return container_of(inode, struct btrfs_inode, vfs_inode); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..2736b6b2ff5f 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans, struct extent_buffer *src_buf); static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot); -static int setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr); - struct btrfs_path *btrfs_alloc_path(void) { @@ -3559,11 +3554,10 @@ out: * to save stack depth by doing the bulk of the work in a function * that doesn't call btrfs_search_slot */ -static noinline_for_stack int -setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr) +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..5d25129d0116 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -869,6 +869,7 @@ struct btrfs_block_group_cache { struct reloc_control; struct btrfs_device; struct btrfs_fs_devices; +struct btrfs_delayed_root; struct btrfs_fs_info { u8 fsid[BTRFS_FSID_SIZE]; u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; @@ -895,7 +896,10 @@ struct btrfs_fs_info { /* logical->physical extent mapping */ struct btrfs_mapping_tree mapping_tree; - /* block reservation for extent, checksum and root tree */ + /* + * block reservation for extent, checksum, root tree and + * delayed dir index item + */ struct btrfs_block_rsv global_block_rsv; /* block reservation for delay allocation */ struct btrfs_block_rsv delalloc_block_rsv; @@ -1022,6 +1026,7 @@ struct btrfs_fs_info { * for the sys_munmap function call path */ struct btrfs_workers fixup_workers; + struct btrfs_workers delayed_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; int thread_pool_size; @@ -1079,6 +1084,8 @@ struct btrfs_fs_info { /* filesystem state */ u64 fs_state; + + struct btrfs_delayed_root *delayed_root; }; /* @@ -1161,6 +1168,11 @@ struct btrfs_root { /* red-black tree that keeps track of in-memory inodes */ struct rb_root inode_tree; + /* + * radix tree that keeps track of delayed nodes of every inode, + * protected by inode_lock + */ + struct radix_tree_root delayed_nodes_tree; /* * right now this just gets used so that a root has its own devid * for stat. It may be used for more later @@ -2099,6 +2111,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) } /* extent-tree.c */ +static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, + int num_items) +{ + return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * + 3 * num_items; +} + void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); @@ -2294,6 +2313,8 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); void btrfs_set_path_blocking(struct btrfs_path *p); +void btrfs_clear_path_blocking(struct btrfs_path *p, + struct extent_buffer *held); void btrfs_unlock_up_safe(struct btrfs_path *p, int level); int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -2305,6 +2326,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, return btrfs_del_items(trans, root, path, path->slots[0], 1); } +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr); int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); int btrfs_insert_some_items(struct btrfs_trans_handle *trans, @@ -2368,7 +2393,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item); /* dir-item.c */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, - int name_len, u64 dir, + int name_len, struct inode *dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c new file mode 100644 index 000000000000..95485318f001 --- /dev/null +++ b/fs/btrfs/delayed-inode.c @@ -0,0 +1,1694 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include "delayed-inode.h" +#include "disk-io.h" +#include "transaction.h" + +#define BTRFS_DELAYED_WRITEBACK 400 +#define BTRFS_DELAYED_BACKGROUND 100 + +static struct kmem_cache *delayed_node_cache; + +int __init btrfs_delayed_inode_init(void) +{ + delayed_node_cache = kmem_cache_create("delayed_node", + sizeof(struct btrfs_delayed_node), + 0, + SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, + NULL); + if (!delayed_node_cache) + return -ENOMEM; + return 0; +} + +void btrfs_delayed_inode_exit(void) +{ + if (delayed_node_cache) + kmem_cache_destroy(delayed_node_cache); +} + +static inline void btrfs_init_delayed_node( + struct btrfs_delayed_node *delayed_node, + struct btrfs_root *root, u64 inode_id) +{ + delayed_node->root = root; + delayed_node->inode_id = inode_id; + atomic_set(&delayed_node->refs, 0); + delayed_node->count = 0; + delayed_node->in_list = 0; + delayed_node->inode_dirty = 0; + delayed_node->ins_root = RB_ROOT; + delayed_node->del_root = RB_ROOT; + mutex_init(&delayed_node->mutex); + delayed_node->index_cnt = 0; + INIT_LIST_HEAD(&delayed_node->n_list); + INIT_LIST_HEAD(&delayed_node->p_list); + delayed_node->bytes_reserved = 0; +} + +static inline int btrfs_is_continuous_delayed_item( + struct btrfs_delayed_item *item1, + struct btrfs_delayed_item *item2) +{ + if (item1->key.type == BTRFS_DIR_INDEX_KEY && + item1->key.objectid == item2->key.objectid && + item1->key.type == item2->key.type && + item1->key.offset + 1 == item2->key.offset) + return 1; + return 0; +} + +static inline struct btrfs_delayed_root *btrfs_get_delayed_root( + struct btrfs_root *root) +{ + return root->fs_info->delayed_root; +} + +static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( + struct inode *inode) +{ + struct btrfs_delayed_node *node; + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_root *root = btrfs_inode->root; + int ret; + +again: + node = ACCESS_ONCE(btrfs_inode->delayed_node); + if (node) { + atomic_inc(&node->refs); /* can be accessed */ + return node; + } + + spin_lock(&root->inode_lock); + node = radix_tree_lookup(&root->delayed_nodes_tree, inode->i_ino); + if (node) { + if (btrfs_inode->delayed_node) { + spin_unlock(&root->inode_lock); + goto again; + } + btrfs_inode->delayed_node = node; + atomic_inc(&node->refs); /* can be accessed */ + atomic_inc(&node->refs); /* cached in the inode */ + spin_unlock(&root->inode_lock); + return node; + } + spin_unlock(&root->inode_lock); + + node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); + if (!node) + return ERR_PTR(-ENOMEM); + btrfs_init_delayed_node(node, root, inode->i_ino); + + atomic_inc(&node->refs); /* cached in the btrfs inode */ + atomic_inc(&node->refs); /* can be accessed */ + + ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + if (ret) { + kmem_cache_free(delayed_node_cache, node); + return ERR_PTR(ret); + } + + spin_lock(&root->inode_lock); + ret = radix_tree_insert(&root->delayed_nodes_tree, inode->i_ino, node); + if (ret == -EEXIST) { + kmem_cache_free(delayed_node_cache, node); + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + goto again; + } + btrfs_inode->delayed_node = node; + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + + return node; +} + +/* + * Call it when holding delayed_node->mutex + * + * If mod = 1, add this node into the prepared list. + */ +static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node, + int mod) +{ + spin_lock(&root->lock); + if (node->in_list) { + if (!list_empty(&node->p_list)) + list_move_tail(&node->p_list, &root->prepare_list); + else if (mod) + list_add_tail(&node->p_list, &root->prepare_list); + } else { + list_add_tail(&node->n_list, &root->node_list); + list_add_tail(&node->p_list, &root->prepare_list); + atomic_inc(&node->refs); /* inserted into list */ + root->nodes++; + node->in_list = 1; + } + spin_unlock(&root->lock); +} + +/* Call it when holding delayed_node->mutex */ +static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node) +{ + spin_lock(&root->lock); + if (node->in_list) { + root->nodes--; + atomic_dec(&node->refs); /* not in the list */ + list_del_init(&node->n_list); + if (!list_empty(&node->p_list)) + list_del_init(&node->p_list); + node->in_list = 0; + } + spin_unlock(&root->lock); +} + +struct btrfs_delayed_node *btrfs_first_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->node_list)) + goto out; + + p = delayed_root->node_list.next; + node = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +struct btrfs_delayed_node *btrfs_next_delayed_node( + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_root *delayed_root; + struct list_head *p; + struct btrfs_delayed_node *next = NULL; + + delayed_root = node->root->fs_info->delayed_root; + spin_lock(&delayed_root->lock); + if (!node->in_list) { /* not in the list */ + if (list_empty(&delayed_root->node_list)) + goto out; + p = delayed_root->node_list.next; + } else if (list_is_last(&node->n_list, &delayed_root->node_list)) + goto out; + else + p = node->n_list.next; + + next = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&next->refs); +out: + spin_unlock(&delayed_root->lock); + + return next; +} + +static void __btrfs_release_delayed_node( + struct btrfs_delayed_node *delayed_node, + int mod) +{ + struct btrfs_delayed_root *delayed_root; + + if (!delayed_node) + return; + + delayed_root = delayed_node->root->fs_info->delayed_root; + + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + btrfs_queue_delayed_node(delayed_root, delayed_node, mod); + else + btrfs_dequeue_delayed_node(delayed_root, delayed_node); + mutex_unlock(&delayed_node->mutex); + + if (atomic_dec_and_test(&delayed_node->refs)) { + struct btrfs_root *root = delayed_node->root; + spin_lock(&root->inode_lock); + if (atomic_read(&delayed_node->refs) == 0) { + radix_tree_delete(&root->delayed_nodes_tree, + delayed_node->inode_id); + kmem_cache_free(delayed_node_cache, delayed_node); + } + spin_unlock(&root->inode_lock); + } +} + +static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 0); +} + +struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->prepare_list)) + goto out; + + p = delayed_root->prepare_list.next; + list_del_init(p); + node = list_entry(p, struct btrfs_delayed_node, p_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +static inline void btrfs_release_prepared_delayed_node( + struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 1); +} + +struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) +{ + struct btrfs_delayed_item *item; + item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); + if (item) { + item->data_len = data_len; + item->ins_or_del = 0; + item->bytes_reserved = 0; + item->block_rsv = NULL; + item->delayed_node = NULL; + atomic_set(&item->refs, 1); + } + return item; +} + +/* + * __btrfs_lookup_delayed_item - look up the delayed item by key + * @delayed_node: pointer to the delayed node + * @key: the key to look up + * @prev: used to store the prev item if the right item isn't found + * @next: used to store the next item if the right item isn't found + * + * Note: if we don't find the right item, we will return the prev item and + * the next item. + */ +static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( + struct rb_root *root, + struct btrfs_key *key, + struct btrfs_delayed_item **prev, + struct btrfs_delayed_item **next) +{ + struct rb_node *node, *prev_node = NULL; + struct btrfs_delayed_item *delayed_item = NULL; + int ret = 0; + + node = root->rb_node; + + while (node) { + delayed_item = rb_entry(node, struct btrfs_delayed_item, + rb_node); + prev_node = node; + ret = btrfs_comp_cpu_keys(&delayed_item->key, key); + if (ret < 0) + node = node->rb_right; + else if (ret > 0) + node = node->rb_left; + else + return delayed_item; + } + + if (prev) { + if (!prev_node) + *prev = NULL; + else if (ret < 0) + *prev = delayed_item; + else if ((node = rb_prev(prev_node)) != NULL) { + *prev = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *prev = NULL; + } + + if (next) { + if (!prev_node) + *next = NULL; + else if (ret > 0) + *next = delayed_item; + else if ((node = rb_next(prev_node)) != NULL) { + *next = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *next = NULL; + } + return NULL; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, + struct btrfs_delayed_item *ins, + int action) +{ + struct rb_node **p, *node; + struct rb_node *parent_node = NULL; + struct rb_root *root; + struct btrfs_delayed_item *item; + int cmp; + + if (action == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_node->ins_root; + else if (action == BTRFS_DELAYED_DELETION_ITEM) + root = &delayed_node->del_root; + else + BUG(); + p = &root->rb_node; + node = &ins->rb_node; + + while (*p) { + parent_node = *p; + item = rb_entry(parent_node, struct btrfs_delayed_item, + rb_node); + + cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); + if (cmp < 0) + p = &(*p)->rb_right; + else if (cmp > 0) + p = &(*p)->rb_left; + else + return -EEXIST; + } + + rb_link_node(node, parent_node, p); + rb_insert_color(node, root); + ins->delayed_node = delayed_node; + ins->ins_or_del = action; + + if (ins->key.type == BTRFS_DIR_INDEX_KEY && + action == BTRFS_DELAYED_INSERTION_ITEM && + ins->key.offset >= delayed_node->index_cnt) + delayed_node->index_cnt = ins->key.offset + 1; + + delayed_node->count++; + atomic_inc(&delayed_node->root->fs_info->delayed_root->items); + return 0; +} + +static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_INSERTION_ITEM); +} + +static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_DELETION_ITEM); +} + +static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) +{ + struct rb_root *root; + struct btrfs_delayed_root *delayed_root; + + delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; + + BUG_ON(!delayed_root); + BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && + delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); + + if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_item->delayed_node->ins_root; + else + root = &delayed_item->delayed_node->del_root; + + rb_erase(&delayed_item->rb_node, root); + delayed_item->delayed_node->count--; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); +} + +static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) +{ + if (item) { + __btrfs_remove_delayed_item(item); + if (atomic_dec_and_test(&item->refs)) + kfree(item); + } +} + +struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->ins_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->del_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_next_delayed_item( + struct btrfs_delayed_item *item) +{ + struct rb_node *p; + struct btrfs_delayed_item *next = NULL; + + p = rb_next(&item->rb_node); + if (p) + next = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return next; +} + +static inline struct btrfs_delayed_node *btrfs_get_delayed_node( + struct inode *inode) +{ + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_inode->delayed_node; + if (delayed_node) + atomic_inc(&delayed_node->refs); + + return delayed_node; +} + +static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, + u64 root_id) +{ + struct btrfs_key root_key; + + if (root->objectid == root_id) + return root; + + root_key.objectid = root_id; + root_key.type = BTRFS_ROOT_ITEM_KEY; + root_key.offset = (u64)-1; + return btrfs_read_fs_root_no_name(root->fs_info, &root_key); +} + +static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) { + item->bytes_reserved = num_bytes; + item->block_rsv = dst_rsv; + } + + return ret; +} + +static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + if (!item->bytes_reserved) + return; + + btrfs_block_rsv_release(root, item->block_rsv, + item->bytes_reserved); +} + +static int btrfs_delayed_inode_reserve_metadata( + struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) + node->bytes_reserved = num_bytes; + + return ret; +} + +static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *rsv; + + if (!node->bytes_reserved) + return; + + rsv = &root->fs_info->global_block_rsv; + btrfs_block_rsv_release(root, rsv, + node->bytes_reserved); + node->bytes_reserved = 0; +} + +/* + * This helper will insert some continuous items into the same leaf according + * to the free space of the leaf. + */ +static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + int free_space; + int total_data_size = 0, total_size = 0; + struct extent_buffer *leaf; + char *data_ptr; + struct btrfs_key *keys; + u32 *data_size; + struct list_head head; + int slot; + int nitems; + int i; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + free_space = btrfs_leaf_free_space(root, leaf); + INIT_LIST_HEAD(&head); + + next = item; + + /* + * count the number of the continuous items that we can insert in batch + */ + while (total_size + next->data_len + sizeof(struct btrfs_item) <= + free_space) { + total_data_size += next->data_len; + total_size += next->data_len + sizeof(struct btrfs_item); + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + } + + if (!nitems) { + ret = 0; + goto out; + } + + /* + * we need allocate some memory space, but it might cause the task + * to sleep, so we set all locked nodes in the path to blocking locks + * first. + */ + btrfs_set_path_blocking(path); + + keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); + if (!keys) { + ret = -ENOMEM; + goto out; + } + + data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); + if (!data_size) { + ret = -ENOMEM; + goto error; + } + + /* get keys of all the delayed items */ + i = 0; + list_for_each_entry(next, &head, tree_list) { + keys[i] = next->key; + data_size[i] = next->data_len; + i++; + } + + /* reset all the locked nodes in the patch to spinning locks. */ + btrfs_clear_path_blocking(path, NULL); + + /* insert the keys of the items */ + ret = setup_items_for_insert(trans, root, path, keys, data_size, + total_data_size, total_size, nitems); + if (ret) + goto error; + + /* insert the dir index items */ + slot = path->slots[0]; + list_for_each_entry_safe(curr, next, &head, tree_list) { + data_ptr = btrfs_item_ptr(leaf, slot, char); + write_extent_buffer(leaf, &curr->data, + (unsigned long)data_ptr, + curr->data_len); + slot++; + + btrfs_delayed_item_release_metadata(root, curr); + + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +error: + kfree(data_size); + kfree(keys); +out: + return ret; +} + +/* + * This helper can just do simple insertion that needn't extend item for new + * data, such as directory name index insertion, inode insertion. + */ +static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *delayed_item) +{ + struct extent_buffer *leaf; + struct btrfs_item *item; + char *ptr; + int ret; + + ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, + delayed_item->data_len); + if (ret < 0 && ret != -EEXIST) + return ret; + + leaf = path->nodes[0]; + + item = btrfs_item_nr(leaf, path->slots[0]); + ptr = btrfs_item_ptr(leaf, path->slots[0], char); + + write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, + delayed_item->data_len); + btrfs_mark_buffer_dirty(leaf); + + btrfs_delayed_item_release_metadata(root, delayed_item); + return 0; +} + +/* + * we insert an item first, then if there are some continuous items, we try + * to insert those items into the same leaf. + */ +static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_insertion_item(node); + if (!curr) + goto insert_end; + + ret = btrfs_insert_delayed_item(trans, root, path, curr); + if (ret < 0) { + btrfs_release_path(root, path); + goto insert_end; + } + + prev = curr; + curr = __btrfs_next_delayed_item(prev); + if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { + /* insert the continuous items into the same leaf */ + path->slots[0]++; + btrfs_batch_insert_items(trans, root, path, curr); + } + btrfs_release_delayed_item(prev); + btrfs_mark_buffer_dirty(path->nodes[0]); + + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + goto do_again; + +insert_end: + mutex_unlock(&node->mutex); + return ret; +} + +static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + struct extent_buffer *leaf; + struct btrfs_key key; + struct list_head head; + int nitems, i, last_item; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + + i = path->slots[0]; + last_item = btrfs_header_nritems(leaf) - 1; + if (i > last_item) + return -ENOENT; /* FIXME: Is errno suitable? */ + + next = item; + INIT_LIST_HEAD(&head); + btrfs_item_key_to_cpu(leaf, &key, i); + nitems = 0; + /* + * count the number of the dir index items that we can delete in batch + */ + while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + + i++; + if (i > last_item) + break; + btrfs_item_key_to_cpu(leaf, &key, i); + } + + if (!nitems) + return 0; + + ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); + if (ret) + goto out; + + list_for_each_entry_safe(curr, next, &head, tree_list) { + btrfs_delayed_item_release_metadata(root, curr); + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +out: + return ret; +} + +static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_deletion_item(node); + if (!curr) + goto delete_fail; + + ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); + if (ret < 0) + goto delete_fail; + else if (ret > 0) { + /* + * can't find the item which the node points to, so this node + * is invalid, just drop it. + */ + prev = curr; + curr = __btrfs_next_delayed_item(prev); + btrfs_release_delayed_item(prev); + ret = 0; + btrfs_release_path(root, path); + if (curr) + goto do_again; + else + goto delete_fail; + } + + btrfs_batch_delete_items(trans, root, path, curr); + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + goto do_again; + +delete_fail: + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + return ret; +} + +static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_delayed_root *delayed_root; + + if (delayed_node && delayed_node->inode_dirty) { + BUG_ON(!delayed_node->root); + delayed_node->inode_dirty = 0; + delayed_node->count--; + + delayed_root = delayed_node->root->fs_info->delayed_root; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); + } +} + +static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_node *node) +{ + struct btrfs_key key; + struct btrfs_inode_item *inode_item; + struct extent_buffer *leaf; + int ret; + + mutex_lock(&node->mutex); + if (!node->inode_dirty) { + mutex_unlock(&node->mutex); + return 0; + } + + key.objectid = node->inode_id; + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + ret = btrfs_lookup_inode(trans, root, path, &key, 1); + if (ret > 0) { + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + return -ENOENT; + } else if (ret < 0) { + mutex_unlock(&node->mutex); + return ret; + } + + btrfs_unlock_up_safe(path, 1); + leaf = path->nodes[0]; + inode_item = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_inode_item); + write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, + sizeof(struct btrfs_inode_item)); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + btrfs_delayed_inode_release_metadata(root, node); + btrfs_release_delayed_inode(node); + mutex_unlock(&node->mutex); + + return 0; +} + +/* Called when committing the transaction. */ +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + struct btrfs_delayed_node *curr_node, *prev_node; + struct btrfs_path *path; + int ret = 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + delayed_root = btrfs_get_delayed_root(root); + + curr_node = btrfs_first_delayed_node(delayed_root); + while (curr_node) { + root = curr_node->root; + ret = btrfs_insert_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, root, path, + curr_node); + if (ret) { + btrfs_release_delayed_node(curr_node); + break; + } + + prev_node = curr_node; + curr_node = btrfs_next_delayed_node(curr_node); + btrfs_release_delayed_node(prev_node); + } + + btrfs_free_path(path); + return ret; +} + +static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *node) +{ + struct btrfs_path *path; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + ret = btrfs_insert_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, node->root, path, node); + btrfs_free_path(path); + + return ret; +} + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); + int ret; + + if (!delayed_node) + return 0; + + mutex_lock(&delayed_node->mutex); + if (!delayed_node->count) { + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return 0; + } + mutex_unlock(&delayed_node->mutex); + + ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +void btrfs_remove_delayed_node(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); + if (!delayed_node) + return; + + BTRFS_I(inode)->delayed_node = NULL; + btrfs_release_delayed_node(delayed_node); +} + +struct btrfs_async_delayed_node { + struct btrfs_root *root; + struct btrfs_delayed_node *delayed_node; + struct btrfs_work work; +}; + +static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_trans_handle *trans; + struct btrfs_path *path; + struct btrfs_delayed_node *delayed_node = NULL; + struct btrfs_root *root; + unsigned long nr = 0; + int need_requeue = 0; + int ret; + + async_node = container_of(work, struct btrfs_async_delayed_node, work); + + path = btrfs_alloc_path(); + if (!path) + goto out; + path->leave_spinning = 1; + + delayed_node = async_node->delayed_node; + root = delayed_node->root; + + trans = btrfs_join_transaction(root, 0); + if (IS_ERR(trans)) + goto free_path; + + ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + delayed_node); + + if (!ret) + btrfs_update_delayed_inode(trans, root, path, delayed_node); + + /* + * Maybe new delayed items have been inserted, so we need requeue + * the work. Besides that, we must dequeue the empty delayed nodes + * to avoid the race between delayed items balance and the worker. + * The race like this: + * Task1 Worker thread + * count == 0, needn't requeue + * also needn't insert the + * delayed node into prepare + * list again. + * add lots of delayed items + * queue the delayed node + * already in the list, + * and not in the prepare + * list, it means the delayed + * node is being dealt with + * by the worker. + * do delayed items balance + * the delayed node is being + * dealt with by the worker + * now, just wait. + * the worker goto idle. + * Task1 will sleep until the transaction is commited. + */ + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + need_requeue = 1; + else + btrfs_dequeue_delayed_node(root->fs_info->delayed_root, + delayed_node); + mutex_unlock(&delayed_node->mutex); + + nr = trans->blocks_used; + + btrfs_end_transaction_dmeta(trans, root); + __btrfs_btree_balance_dirty(root, nr); +free_path: + btrfs_free_path(path); +out: + if (need_requeue) + btrfs_requeue_work(&async_node->work); + else { + btrfs_release_prepared_delayed_node(delayed_node); + kfree(async_node); + } +} + +static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, + struct btrfs_root *root, int all) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_delayed_node *curr; + int count = 0; + +again: + curr = btrfs_first_prepared_delayed_node(delayed_root); + if (!curr) + return 0; + + async_node = kmalloc(sizeof(*async_node), GFP_NOFS); + if (!async_node) { + btrfs_release_prepared_delayed_node(curr); + return -ENOMEM; + } + + async_node->root = root; + async_node->delayed_node = curr; + + async_node->work.func = btrfs_async_run_delayed_node_done; + async_node->work.flags = 0; + + btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); + count++; + + if (all || count < 4) + goto again; + + return 0; +} + +void btrfs_balance_delayed_items(struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + + delayed_root = btrfs_get_delayed_root(root); + + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) + return; + + if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { + int ret; + ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); + if (ret) + return; + + wait_event_interruptible_timeout( + delayed_root->wait, + (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND), + HZ); + return; + } + + btrfs_wq_run_delayed_node(delayed_root, root, 0); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *delayed_item; + struct btrfs_dir_item *dir_item; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); + if (!delayed_item) { + ret = -ENOMEM; + goto release_node; + } + + ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + delayed_item->key.objectid = dir->i_ino; + btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); + delayed_item->key.offset = index; + + dir_item = (struct btrfs_dir_item *)delayed_item->data; + dir_item->location = *disk_key; + dir_item->transid = cpu_to_le64(trans->transid); + dir_item->data_len = 0; + dir_item->name_len = cpu_to_le16(name_len); + dir_item->type = type; + memcpy((char *)(dir_item + 1), name, name_len); + + mutex_lock(&delayed_node->mutex); + ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(name: %s) into " + "the insertion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + name, + (unsigned long long)delayed_node->root->objectid, + (unsigned long long)delayed_node->inode_id, + ret); + BUG(); + } + mutex_unlock(&delayed_node->mutex); + +release_node: + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, + struct btrfs_delayed_node *node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + mutex_lock(&node->mutex); + item = __btrfs_lookup_delayed_insertion_item(node, key); + if (!item) { + mutex_unlock(&node->mutex); + return 1; + } + + btrfs_delayed_item_release_metadata(root, item); + btrfs_release_delayed_item(item); + mutex_unlock(&node->mutex); + return 0; +} + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index) +{ + struct btrfs_delayed_node *node; + struct btrfs_delayed_item *item; + struct btrfs_key item_key; + int ret; + + node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(node)) + return PTR_ERR(node); + + item_key.objectid = dir->i_ino; + btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); + item_key.offset = index; + + ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); + if (!ret) + goto end; + + item = btrfs_alloc_delayed_item(0); + if (!item) { + ret = -ENOMEM; + goto end; + } + + item->key = item_key; + + ret = btrfs_delayed_item_reserve_metadata(trans, root, item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible. + */ + BUG_ON(ret); + + mutex_lock(&node->mutex); + ret = __btrfs_add_delayed_deletion_item(node, item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(index: %llu) " + "into the deletion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + (unsigned long long)index, + (unsigned long long)node->root->objectid, + (unsigned long long)node->inode_id, + ret); + BUG(); + } + mutex_unlock(&node->mutex); +end: + btrfs_release_delayed_node(node); + return ret; +} + +int btrfs_inode_delayed_dir_index_count(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; + int ret = 0; + + if (!delayed_node) + return -ENOENT; + + /* + * Since we have held i_mutex of this directory, it is impossible that + * a new directory index is added into the delayed node and index_cnt + * is updated now. So we needn't lock the delayed node. + */ + if (!delayed_node->index_cnt) + return -EINVAL; + + BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; + return ret; +} + +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *item; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + mutex_lock(&delayed_node->mutex); + item = __btrfs_first_delayed_insertion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, ins_list); + item = __btrfs_next_delayed_item(item); + } + + item = __btrfs_first_delayed_deletion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, del_list); + item = __btrfs_next_delayed_item(item); + } + mutex_unlock(&delayed_node->mutex); + /* + * This delayed node is still cached in the btrfs inode, so refs + * must be > 1 now, and we needn't check it is going to be freed + * or not. + * + * Besides that, this function is used to read dir, we do not + * insert/delete delayed items in this period. So we also needn't + * requeue or dequeue this delayed node. + */ + atomic_dec(&delayed_node->refs); +} + +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_item *curr, *next; + + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } +} + +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index) +{ + struct btrfs_delayed_item *curr, *next; + int ret; + + if (list_empty(del_list)) + return 0; + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + if (curr->key.offset > index) + break; + + list_del(&curr->readdir_list); + ret = (curr->key.offset == index); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (ret) + return 1; + else + continue; + } + return 0; +} + +/* + * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree + * + */ +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list) +{ + struct btrfs_dir_item *di; + struct btrfs_delayed_item *curr, *next; + struct btrfs_key location; + char *name; + int name_len; + int over = 0; + unsigned char d_type; + + if (list_empty(ins_list)) + return 0; + + /* + * Changing the data of the delayed item is impossible. So + * we needn't lock them. And we have held i_mutex of the + * directory, nobody can delete any directory indexes now. + */ + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + + if (curr->key.offset < filp->f_pos) { + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + continue; + } + + filp->f_pos = curr->key.offset; + + di = (struct btrfs_dir_item *)curr->data; + name = (char *)(di + 1); + name_len = le16_to_cpu(di->name_len); + + d_type = btrfs_filetype_table[di->type]; + btrfs_disk_key_to_cpu(&location, &di->location); + + over = filldir(dirent, name, name_len, curr->key.offset, + location.objectid, d_type); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (over) + return 1; + } + return 0; +} + +BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, + generation, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, + sequence, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, + transid, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, + nbytes, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, + block_group, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); + +BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); +BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); + +static void fill_stack_inode_item(struct btrfs_trans_handle *trans, + struct btrfs_inode_item *inode_item, + struct inode *inode) +{ + btrfs_set_stack_inode_uid(inode_item, inode->i_uid); + btrfs_set_stack_inode_gid(inode_item, inode->i_gid); + btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); + btrfs_set_stack_inode_mode(inode_item, inode->i_mode); + btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); + btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); + btrfs_set_stack_inode_generation(inode_item, + BTRFS_I(inode)->generation); + btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); + btrfs_set_stack_inode_transid(inode_item, trans->transid); + btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); + btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); + btrfs_set_stack_inode_block_group(inode_item, + BTRFS_I(inode)->block_group); + + btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_nsec); +} + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(inode); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + mutex_lock(&delayed_node->mutex); + if (delayed_node->inode_dirty) { + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + goto release_node; + } + + ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); + /* + * we must reserve enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + delayed_node->inode_dirty = 1; + delayed_node->count++; + atomic_inc(&root->fs_info->delayed_root->items); +release_node: + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_root *root = delayed_node->root; + struct btrfs_delayed_item *curr_item, *prev_item; + + mutex_lock(&delayed_node->mutex); + curr_item = __btrfs_first_delayed_insertion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + curr_item = __btrfs_first_delayed_deletion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + if (delayed_node->inode_dirty) { + btrfs_delayed_inode_release_metadata(root, delayed_node); + btrfs_release_delayed_inode(delayed_node); + } + mutex_unlock(&delayed_node->mutex); +} + +void btrfs_kill_delayed_inode_items(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + __btrfs_kill_delayed_node(delayed_node); + btrfs_release_delayed_node(delayed_node); +} + +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) +{ + u64 inode_id = 0; + struct btrfs_delayed_node *delayed_nodes[8]; + int i, n; + + while (1) { + spin_lock(&root->inode_lock); + n = radix_tree_gang_lookup(&root->delayed_nodes_tree, + (void **)delayed_nodes, inode_id, + ARRAY_SIZE(delayed_nodes)); + if (!n) { + spin_unlock(&root->inode_lock); + break; + } + + inode_id = delayed_nodes[n - 1]->inode_id + 1; + + for (i = 0; i < n; i++) + atomic_inc(&delayed_nodes[i]->refs); + spin_unlock(&root->inode_lock); + + for (i = 0; i < n; i++) { + __btrfs_kill_delayed_node(delayed_nodes[i]); + btrfs_release_delayed_node(delayed_nodes[i]); + } + } +} diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h new file mode 100644 index 000000000000..eb7d240aa648 --- /dev/null +++ b/fs/btrfs/delayed-inode.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __DELAYED_TREE_OPERATION_H +#define __DELAYED_TREE_OPERATION_H + +#include +#include +#include +#include +#include +#include + +#include "ctree.h" + +/* types of the delayed item */ +#define BTRFS_DELAYED_INSERTION_ITEM 1 +#define BTRFS_DELAYED_DELETION_ITEM 2 + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + /* + * Used for delayed nodes which is waiting to be dealt with by the + * worker. If the delayed node is inserted into the work queue, we + * drop it from this list. + */ + struct list_head prepare_list; + atomic_t items; /* for delayed items */ + int nodes; /* for delayed nodes */ + wait_queue_head_t wait; +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + /* Used to add the node into the delayed root's node list. */ + struct list_head n_list; + /* + * Used to add the node into the prepare list, the nodes in this list + * is waiting to be dealt with by the async worker. + */ + struct list_head p_list; + struct rb_root ins_root; + struct rb_root del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + atomic_t refs; + u64 index_cnt; + bool in_list; + bool inode_dirty; + int count; +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + struct btrfs_key key; + struct list_head tree_list; /* used for batch insert/delete items */ + struct list_head readdir_list; /* used for readdir items */ + u64 bytes_reserved; + struct btrfs_block_rsv *block_rsv; + struct btrfs_delayed_node *delayed_node; + atomic_t refs; + int ins_or_del; + u32 data_len; + char data[0]; +}; + +static inline void btrfs_init_delayed_root( + struct btrfs_delayed_root *delayed_root) +{ + atomic_set(&delayed_root->items, 0); + delayed_root->nodes = 0; + spin_lock_init(&delayed_root->lock); + init_waitqueue_head(&delayed_root->wait); + INIT_LIST_HEAD(&delayed_root->node_list); + INIT_LIST_HEAD(&delayed_root->prepare_list); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index); + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index); + +int btrfs_inode_delayed_dir_index_count(struct inode *inode); + +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root); + +void btrfs_balance_delayed_items(struct btrfs_root *root); + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode); +/* Used for evicting the inode. */ +void btrfs_remove_delayed_node(struct inode *inode); +void btrfs_kill_delayed_inode_items(struct inode *inode); + + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode); + +/* Used for drop dead root */ +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); + +/* Used for readdir() */ +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list); +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list); +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index); +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list); + +/* for init */ +int __init btrfs_delayed_inode_init(void); +void btrfs_delayed_inode_exit(void); +#endif diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae69..f53fb3847c96 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -124,8 +124,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, * to use for the second index (if one is created). */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, const char *name, int name_len, u64 dir, - struct btrfs_key *location, u8 type, u64 index) + *root, const char *name, int name_len, + struct inode *dir, struct btrfs_key *location, + u8 type, u64 index) { int ret = 0; int ret2 = 0; @@ -137,13 +138,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = dir; + key.objectid = dir->i_ino; btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->leave_spinning = 1; + btrfs_cpu_key_to_disk(&disk_key, location); + data_size = sizeof(*dir_item) + name_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); @@ -155,7 +160,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root } leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, type); btrfs_set_dir_data_len(leaf, dir_item, 0); @@ -174,27 +178,9 @@ second_insert: } btrfs_release_path(root, path); - btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); - key.offset = index; - dir_item = insert_with_overflow(trans, root, path, &key, data_size, - name, name_len); - if (IS_ERR(dir_item)) { - ret2 = PTR_ERR(dir_item); - goto out_free; - } - leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); - btrfs_set_dir_item_key(leaf, dir_item, &disk_key); - btrfs_set_dir_type(leaf, dir_item, type); - btrfs_set_dir_data_len(leaf, dir_item, 0); - btrfs_set_dir_name_len(leaf, dir_item, name_len); - btrfs_set_dir_transid(leaf, dir_item, trans->transid); - name_ptr = (unsigned long)(dir_item + 1); - write_extent_buffer(leaf, name, name_ptr, name_len); - btrfs_mark_buffer_dirty(leaf); - + ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir, + &disk_key, type, index); out_free: - btrfs_free_path(path); if (ret) return ret; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..22c3c9586049 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1058,6 +1058,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->name = NULL; root->in_sysfs = 0; root->inode_tree = RB_ROOT; + INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; root->orphan_block_rsv = NULL; @@ -1693,6 +1694,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); + fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), + GFP_NOFS); + if (!fs_info->delayed_root) { + err = -ENOMEM; + goto fail_iput; + } + btrfs_init_delayed_root(fs_info->delayed_root); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); @@ -1760,7 +1768,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) { err = -EINVAL; - goto fail_iput; + goto fail_alloc; } memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); @@ -1772,7 +1780,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, disk_super = &fs_info->super_copy; if (!btrfs_super_root(disk_super)) - goto fail_iput; + goto fail_alloc; /* check FS state, whether FS is broken. */ fs_info->fs_state |= btrfs_super_flags(disk_super); @@ -1788,7 +1796,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super) & @@ -1798,7 +1806,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported optional features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super); @@ -1814,7 +1822,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported option features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } btrfs_init_workers(&fs_info->generic_worker, @@ -1861,6 +1869,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", 1, &fs_info->generic_worker); + btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", + fs_info->thread_pool_size, + &fs_info->generic_worker); /* * endios are largely parallel and should have a very @@ -1882,6 +1893,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1); btrfs_start_workers(&fs_info->endio_freespace_worker, 1); + btrfs_start_workers(&fs_info->delayed_workers, 1); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, @@ -2138,6 +2150,9 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); +fail_alloc: + kfree(fs_info->delayed_root); fail_iput: invalidate_inode_pages2(fs_info->btree_inode->i_mapping); iput(fs_info->btree_inode); @@ -2578,6 +2593,7 @@ int close_ctree(struct btrfs_root *root) del_fs_roots(fs_info); iput(fs_info->btree_inode); + kfree(fs_info->delayed_root); btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->fixup_workers); @@ -2589,6 +2605,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); @@ -2662,6 +2679,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) u64 num_dirty; unsigned long thresh = 32 * 1024 * 1024; + if (current->flags & PF_MEMALLOC) + return; + + btrfs_balance_delayed_items(root); + + num_dirty = root->fs_info->dirty_metadata_bytes; + + if (num_dirty > thresh) { + balance_dirty_pages_ratelimited_nr( + root->fs_info->btree_inode->i_mapping, 1); + } + return; +} + +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) +{ + /* + * looks as though older kernels can get into trouble with + * this code, they end up stuck in balance_dirty_pages forever + */ + u64 num_dirty; + unsigned long thresh = 32 * 1024 * 1024; + if (current->flags & PF_MEMALLOC) return; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 07b20dc2fd95..aca35af37dbc 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -71,6 +71,7 @@ int btrfs_insert_dev_radix(struct btrfs_root *root, u64 block_start, u64 num_blocks); void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16c..7b0433866f36 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3973,12 +3973,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info) WARN_ON(fs_info->chunk_block_rsv.reserved > 0); } -static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) -{ - return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * - 3 * num_items; -} - int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items) @@ -3989,7 +3983,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, if (num_items == 0 || root->fs_info->chunk_root == root) return 0; - num_bytes = calc_trans_metadata_size(root, num_items); + num_bytes = btrfs_calc_trans_metadata_size(root, num_items); ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, num_bytes); if (!ret) { @@ -4028,14 +4022,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, * If all of the metadata space is used, we can commit * transaction and use space it freed. */ - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } @@ -4049,7 +4043,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, * two for root back/forward refs, two for directory entries * and one for root of the snapshot. */ - u64 num_bytes = calc_trans_metadata_size(root, 5); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); dst_rsv->space_info = src_rsv->space_info; return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } @@ -4078,7 +4072,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) if (nr_extents > reserved_extents) { nr_extents -= reserved_extents; - to_reserve = calc_trans_metadata_size(root, nr_extents); + to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); } else { nr_extents = 0; to_reserve = 0; @@ -4132,7 +4126,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes); if (nr_extents > 0) - to_free += calc_trans_metadata_size(root, nr_extents); + to_free += btrfs_calc_trans_metadata_size(root, nr_extents); btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, to_free); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..3470f67c6258 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2647,11 +2647,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; int ret; + /* + * If root is tree root, it means this inode is used to + * store free space information. And these inodes are updated + * when committing the transaction, so they needn't delaye to + * be updated, or deadlock will occured. + */ + if (likely(root != root->fs_info->tree_root)) { + ret = btrfs_delayed_update_inode(trans, root, inode); + if (!ret) + btrfs_set_inode_last_trans(trans, inode); + return ret; + } + path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; - ret = btrfs_lookup_inode(trans, root, path, - &BTRFS_I(inode)->location, 1); + ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, + 1); if (ret) { if (ret > 0) ret = -ENOENT; @@ -2661,7 +2676,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, btrfs_unlock_up_safe(path, 1); leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_inode_item); + struct btrfs_inode_item); fill_inode_item(trans, leaf, inode_item, inode); btrfs_mark_buffer_dirty(leaf); @@ -2672,7 +2687,6 @@ failed: return ret; } - /* * unlink helper that gets used here in inode.c and in the tree logging * recovery code. It remove a link in a directory with a given name, and @@ -2724,18 +2738,9 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - if (IS_ERR(di)) { - ret = PTR_ERR(di); - goto err; - } - if (!di) { - ret = -ENOENT; + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); + if (ret) goto err; - } - ret = btrfs_delete_one_dir_name(trans, root, path, di); - btrfs_release_path(root, path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, dir->i_ino); @@ -2924,6 +2929,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, index = btrfs_inode_ref_index(path->nodes[0], ref); btrfs_release_path(root, path); + /* + * This is a commit root search, if we can lookup inode item and other + * relative items in the commit root, it means the transaction of + * dir/file creation has been committed, and the dir index item that we + * delay to insert has also been inserted into the commit root. So + * we needn't worry about the delayed insertion of the dir index item + * here. + */ di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { @@ -3029,24 +3042,16 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); index = key.offset; } + btrfs_release_path(root, path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); - - leaf = path->nodes[0]; - btrfs_dir_item_key_to_cpu(leaf, di, &key); - WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); - ret = btrfs_delete_one_dir_name(trans, root, path, di); + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); BUG_ON(ret); - btrfs_release_path(root, path); btrfs_i_size_write(dir, dir->i_size - name_len * 2); dir->i_mtime = dir->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); - btrfs_free_path(path); return 0; } @@ -3306,6 +3311,15 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, if (root->ref_cows || root == root->fs_info->tree_root) btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); + /* + * This function is also used to drop the items in the log tree before + * we relog the inode, so if root != BTRFS_I(inode)->root, it means + * it is used to drop the loged items. So we shouldn't kill the delayed + * items. + */ + if (min_type == 0 && root == BTRFS_I(inode)->root) + btrfs_kill_delayed_inode_items(inode); + path = btrfs_alloc_path(); BUG_ON(!path); path->reada = -1; @@ -4208,7 +4222,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, return d_splice_alias(inode, dentry); } -static unsigned char btrfs_filetype_table[] = { +unsigned char btrfs_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; @@ -4222,6 +4236,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; + struct list_head ins_list; + struct list_head del_list; int ret; struct extent_buffer *leaf; int slot; @@ -4234,6 +4250,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, char tmp_name[32]; char *name_ptr; int name_len; + int is_curr = 0; /* filp->f_pos points to the current index? */ /* FIXME, use a real flag for deciding about the key type */ if (root->fs_info->tree_root == root) @@ -4258,8 +4275,16 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, filp->f_pos = 2; } path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->reada = 2; + if (key_type == BTRFS_DIR_INDEX_KEY) { + INIT_LIST_HEAD(&ins_list); + INIT_LIST_HEAD(&del_list); + btrfs_get_delayed_items(inode, &ins_list, &del_list); + } + btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; key.objectid = inode->i_ino; @@ -4289,8 +4314,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, break; if (found_key.offset < filp->f_pos) goto next; + if (key_type == BTRFS_DIR_INDEX_KEY && + btrfs_should_delete_dir_index(&del_list, + found_key.offset)) + goto next; filp->f_pos = found_key.offset; + is_curr = 1; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); di_cur = 0; @@ -4345,6 +4375,15 @@ next: path->slots[0]++; } + if (key_type == BTRFS_DIR_INDEX_KEY) { + if (is_curr) + filp->f_pos++; + ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, + &ins_list); + if (ret) + goto nopos; + } + /* Reached end of directory/root. Bump pos past the last item. */ if (key_type == BTRFS_DIR_INDEX_KEY) /* @@ -4357,6 +4396,8 @@ next: nopos: ret = 0; err: + if (key_type == BTRFS_DIR_INDEX_KEY) + btrfs_put_delayed_items(&ins_list, &del_list); btrfs_free_path(path); return ret; } @@ -4434,6 +4475,8 @@ void btrfs_dirty_inode(struct inode *inode) } } btrfs_end_transaction(trans, root); + if (BTRFS_I(inode)->delayed_node) + btrfs_balance_delayed_items(root); } /* @@ -4502,9 +4545,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) int ret = 0; if (BTRFS_I(dir)->index_cnt == (u64)-1) { - ret = btrfs_set_inode_index_count(dir); - if (ret) - return ret; + ret = btrfs_inode_delayed_dir_index_count(dir); + if (ret) { + ret = btrfs_set_inode_index_count(dir); + if (ret) + return ret; + } } *index = BTRFS_I(dir)->index_cnt; @@ -4671,7 +4717,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, if (ret == 0) { ret = btrfs_insert_dir_item(trans, root, name, name_len, - parent_inode->i_ino, &key, + parent_inode, &key, btrfs_inode_type(inode), index); BUG_ON(ret); @@ -6784,6 +6830,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->dummy_inode = 0; ei->force_compress = BTRFS_COMPRESS_NONE; + ei->delayed_node = NULL; + inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); @@ -6874,6 +6922,7 @@ void btrfs_destroy_inode(struct inode *inode) inode_tree_del(inode); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); free: + btrfs_remove_delayed_node(inode); call_rcu(&inode->i_rcu, btrfs_i_callback); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed4799..df59401af742 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -422,7 +422,7 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir->i_ino, &key, + name, namelen, dir, &key, BTRFS_FT_DIR, index); if (ret) goto fail; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..cc5a2a8a5acb 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -40,6 +40,7 @@ #include #include #include "compat.h" +#include "delayed-inode.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -1206,10 +1207,14 @@ static int __init init_btrfs_fs(void) if (err) goto free_extent_io; - err = btrfs_interface_init(); + err = btrfs_delayed_inode_init(); if (err) goto free_extent_map; + err = btrfs_interface_init(); + if (err) + goto free_delayed_inode; + err = register_filesystem(&btrfs_fs_type); if (err) goto unregister_ioctl; @@ -1219,6 +1224,8 @@ static int __init init_btrfs_fs(void) unregister_ioctl: btrfs_interface_exit(); +free_delayed_inode: + btrfs_delayed_inode_exit(); free_extent_map: extent_map_exit(); free_extent_io: @@ -1235,6 +1242,7 @@ free_sysfs: static void __exit exit_btrfs_fs(void) { btrfs_destroy_cachep(); + btrfs_delayed_inode_exit(); extent_map_exit(); extent_io_exit(); btrfs_interface_exit(); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..b83ed5e64a32 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -487,19 +487,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 1, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 1, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 0); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 0); + if (ret) + return ret; + return 0; +} + +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + return __btrfs_end_transaction(trans, root, 1, 1); } /* @@ -967,7 +988,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode->i_ino, &key, + parent_inode, &key, BTRFS_FT_DIR, index); BUG_ON(ret); @@ -1037,6 +1058,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { + /* + * We must deal with the delayed items before creating + * snapshots, or we will create a snapthot with inconsistent + * information. + */ + ret = btrfs_run_delayed_items(trans, fs_info->fs_root); + BUG_ON(ret); + ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1290,6 +1319,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, BUG_ON(ret); } + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + /* * rename don't use btrfs_join_transaction, so, once we * set the transaction to blocked above, we aren't going @@ -1316,6 +1348,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); @@ -1432,6 +1467,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) root = list_entry(list.next, struct btrfs_root, root_list); list_del(&root->root_list); + btrfs_kill_all_delayed_nodes(root); + if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) btrfs_drop_snapshot(root, NULL, 0); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c584..cb928c6c42e6 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -115,6 +115,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); void btrfs_throttle(struct btrfs_root *root); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..ae0b72856bfb 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2773,6 +2773,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.type = (u8)-1; max_key.offset = (u64)-1; + ret = btrfs_commit_inode_delayed_items(trans, inode); + if (ret) { + btrfs_free_path(path); + btrfs_free_path(dst_path); + return ret; + } + mutex_lock(&BTRFS_I(inode)->log_mutex); /* -- cgit v1.2.3 From 0d0ca30f180906224be6279788f2b202cfd959d8 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 22 May 2011 07:11:22 -0400 Subject: Btrfs: update the delayed inode code to use the btrfs_ino helper. Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 11 ++++++----- fs/btrfs/dir-item.c | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 95485318f001..c25405f69360 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -88,6 +88,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( struct btrfs_delayed_node *node; struct btrfs_inode *btrfs_inode = BTRFS_I(inode); struct btrfs_root *root = btrfs_inode->root; + u64 ino = btrfs_ino(inode); int ret; again: @@ -98,7 +99,7 @@ again: } spin_lock(&root->inode_lock); - node = radix_tree_lookup(&root->delayed_nodes_tree, inode->i_ino); + node = radix_tree_lookup(&root->delayed_nodes_tree, ino); if (node) { if (btrfs_inode->delayed_node) { spin_unlock(&root->inode_lock); @@ -115,7 +116,7 @@ again: node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); if (!node) return ERR_PTR(-ENOMEM); - btrfs_init_delayed_node(node, root, inode->i_ino); + btrfs_init_delayed_node(node, root, ino); atomic_inc(&node->refs); /* cached in the btrfs inode */ atomic_inc(&node->refs); /* can be accessed */ @@ -127,7 +128,7 @@ again: } spin_lock(&root->inode_lock); - ret = radix_tree_insert(&root->delayed_nodes_tree, inode->i_ino, node); + ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); if (ret == -EEXIST) { kmem_cache_free(delayed_node_cache, node); spin_unlock(&root->inode_lock); @@ -1274,7 +1275,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, */ BUG_ON(ret); - delayed_item->key.objectid = dir->i_ino; + delayed_item->key.objectid = btrfs_ino(dir); btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); delayed_item->key.offset = index; @@ -1337,7 +1338,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, if (IS_ERR(node)) return PTR_ERR(node); - item_key.objectid = dir->i_ino; + item_key.objectid = btrfs_ino(dir); btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); item_key.offset = index; diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index f53fb3847c96..e757202a014e 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -138,7 +138,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = dir->i_ino; + key.objectid = btrfs_ino(dir); btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); -- cgit v1.2.3 From 8e531cdfeb75269c6c5aae33651cca39707848da Mon Sep 17 00:00:00 2001 From: liubo Date: Fri, 6 May 2011 10:36:09 +0800 Subject: Btrfs: do not flush csum items of unchanged file data during treelog The current code relogs the entire inode every time during fsync log, and it is much better suited to small files rather than large ones. During my performance test, the fsync performace of large files sucks, and we can ascribe this to the tremendous amount of csum infos of the large ones, cause we have to flush all of these csum infos into log trees even when there are only _one_ change in the whole file data. Apparently, to optimize fsync, we need to create a filter to skip the unnecessary csum ones, that is, the corresponding file data remains unchanged before this fsync. Here I have some test results to show, I use sysbench to do "random write + fsync". === sysbench --test=fileio --num-threads=1 --file-num=2 --file-block-size=4K --file-total-size=8G --file-test-mode=rndwr --file-io-mode=sync --file-extra-flags= [prepare, run] === Sysbench args: - Number of threads: 1 - Extra file open flags: 0 - 2 files, 4Gb each - Block size 4Kb - Number of random requests for random IO: 10000 - Read/Write ratio for combined random IO test: 1.50 - Periodic FSYNC enabled, calling fsync() each 100 requests. - Calling fsync() at the end of test, Enabled. - Using synchronous I/O mode - Doing random write test Sysbench results: === Operations performed: 0 Read, 10000 Write, 200 Other = 10200 Total Read 0b Written 39.062Mb Total transferred 39.062Mb === a) without patch: (*SPEED* : 451.01Kb/sec) 112.75 Requests/sec executed b) with patch: (*SPEED* : 4.7533Mb/sec) 1216.84 Requests/sec executed PS: I've made a _sub transid_ stuff patch, but it does not perform as effectively as this patch, and I'm wanderring where the problem is and trying to improve it more. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index b4c191d6c774..0f5537e60bb4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2667,6 +2667,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, extent = btrfs_item_ptr(src, start_slot + i, struct btrfs_file_extent_item); + if (btrfs_file_extent_generation(src, extent) < trans->transid) + continue; + found_type = btrfs_file_extent_type(src, extent); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { -- cgit v1.2.3 From 0956c798ef8dbe0fc215870eb68bd2d8e789f86a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 18 May 2011 00:11:22 +0000 Subject: BTRFS: Remove unused node_lock 240f62c8756 replaced the node_lock with rcu_read_lock, but forgot to remove the actual lock in the data structure. Remove it here. Signed-off-by: Andi Kleen Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 3 --- fs/btrfs/disk-io.c | 1 - 2 files changed, 4 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..f290b98e2fe6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1088,9 +1088,6 @@ struct btrfs_fs_info { struct btrfs_root { struct extent_buffer *node; - /* the node lock is held while changing the node pointer */ - spinlock_t node_lock; - struct extent_buffer *commit_root; struct btrfs_root *log_root; struct btrfs_root *reloc_root; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..64b289690f9d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1064,7 +1064,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->orphan_list); INIT_LIST_HEAD(&root->root_list); - spin_lock_init(&root->node_lock); spin_lock_init(&root->orphan_lock); spin_lock_init(&root->inode_lock); spin_lock_init(&root->accounting_lock); -- cgit v1.2.3 From e2156867159ae7b3bc38ef1c26ea0ee30a895ef8 Mon Sep 17 00:00:00 2001 From: Hugo Mills Date: Sat, 14 May 2011 17:43:41 +0000 Subject: btrfs: Ensure the tree search ioctl returns the right number of records Btrfs's tree search ioctl has a field to indicate that no more than a given number of records should be returned. The ioctl doesn't honour this, as the tested value is not incremented until the end of the copy_to_sk function. This patch removes an unnecessary local variable, and updates the num_found counter as each key is found in the tree. Signed-off-by: Hugo Mills Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed4799..ce773fb736a1 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1279,7 +1279,6 @@ static noinline int copy_to_sk(struct btrfs_root *root, int nritems; int i; int slot; - int found = 0; int ret = 0; leaf = path->nodes[0]; @@ -1326,7 +1325,7 @@ static noinline int copy_to_sk(struct btrfs_root *root, item_off, item_len); *sk_offset += item_len; } - found++; + (*num_found)++; if (*num_found >= sk->nr_items) break; @@ -1345,7 +1344,6 @@ advance_key: } else ret = 1; overflow: - *num_found += found; return ret; } -- cgit v1.2.3 From 0f3b708c11914b684d17fed975eed19db902a8de Mon Sep 17 00:00:00 2001 From: Jamey Sharp Date: Thu, 5 May 2011 19:03:46 +0000 Subject: btrfs: Delete unused version.sh script. In 2008, commit b4f6c45dfbf84f47c21f73f6370ad1292b0627fd dropped the use of fs/btrfs/version.sh, but left the script behind. Kill it. Commit by Jamey Sharp and Josh Triplett. Signed-off-by: Jamey Sharp Signed-off-by: Josh Triplett Cc: Chris Mason Signed-off-by: Chris Mason --- fs/btrfs/version.sh | 43 ------------------------------------------- 1 file changed, 43 deletions(-) delete mode 100644 fs/btrfs/version.sh diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh deleted file mode 100644 index 1ca1952fd917..000000000000 --- a/fs/btrfs/version.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# -# determine-version -- report a useful version for releases -# -# Copyright 2008, Aron Griffis -# Copyright 2008, Oracle -# Released under the GNU GPLv2 - -v="v0.16" - -which git &> /dev/null -if [ $? == 0 ]; then - git branch >& /dev/null - if [ $? == 0 ]; then - if head=`git rev-parse --verify HEAD 2>/dev/null`; then - if tag=`git describe --tags 2>/dev/null`; then - v="$tag" - fi - - # Are there uncommitted changes? - git update-index --refresh --unmerged > /dev/null - if git diff-index --name-only HEAD | \ - grep -v "^scripts/package" \ - | read dummy; then - v="$v"-dirty - fi - fi - fi -fi - -echo "#ifndef __BUILD_VERSION" > .build-version.h -echo "#define __BUILD_VERSION" >> .build-version.h -echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h -echo "#endif" >> .build-version.h - -diff -q version.h .build-version.h >& /dev/null - -if [ $? == 0 ]; then - rm .build-version.h - exit 0 -fi - -mv .build-version.h version.h -- cgit v1.2.3 From c4f675cd40d955d539180506c09515c90169b15b Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Fri, 20 May 2011 20:20:30 +0000 Subject: btrfs: don't spin in shrink_delalloc if there is nothing to free Observed as a large delay when --mixed filesystem is filled up. Test example: 1. create tiny --mixed FS: $ dd if=/dev/zero of=2G.img seek=$((2048 * 1024 * 1024 - 1)) count=1 bs=1 $ mkfs.btrfs --mixed 2G.img $ mount -oloop 2G.img /mnt/ut/ 2. Try to fill it up: $ dd if=/dev/urandom of=10M.file bs=10240 count=1024 $ seq 1 256 | while read file_no; do echo $file_no; time cp 10M.file ${file_no}.copy; done Up to '200.copy' it goes fast, but when disk fills-up each -ENOSPC message takes 3 seconds to pop-up _every_ ENOSPC (and in usermode linux it's even more: 30-60 seconds!). (Maybe, time depends on kernel's timer resolution). No IO, no CPU load, just rescheduling. Some debugging revealed busy spinning in shrink_delalloc. Signed-off-by: Sergei Trofimovich Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16c..9f5fdd37451d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3425,6 +3425,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, if (reserved == 0) return 0; + /* nothing to shrink - nothing to reclaim */ + if (root->fs_info->delalloc_bytes == 0) + return 0; + max_reclaim = min(reserved, to_reclaim); while (loops < 1024) { -- cgit v1.2.3 From 9694b3fcbb0f5dd498fdf53c82f22fcc37989152 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Fri, 20 May 2011 20:20:31 +0000 Subject: btrfs: typo: 'btrfS' -> 'btrfs' Signed-off-by: Sergei Trofimovich Signed-off-by: Chris Mason --- fs/btrfs/dir-item.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae69..dec93485d539 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -452,7 +452,7 @@ int verify_dir_item(struct btrfs_root *root, namelen = XATTR_NAME_MAX; if (btrfs_dir_name_len(leaf, dir_item) > namelen) { - printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n", + printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n", (unsigned)btrfs_dir_data_len(leaf, dir_item)); return 1; } -- cgit v1.2.3 From 27160b6b5a1744b6eaa8416e2b901ec937b1eee0 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Fri, 20 May 2011 20:20:32 +0000 Subject: btrfs: fix typo 'testeing' -> 'testing' Signed-off-by: Sergei Trofimovich Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..72650ceb9829 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1310,7 +1310,7 @@ static int btrfs_set_bit_hook(struct inode *inode, /* * set_bit and clear bit hooks normally require _irqsave/restore - * but in this case, we are only testeing for the DELALLOC + * but in this case, we are only testing for the DELALLOC * bit, which is only set or cleared with irqs on */ if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { @@ -1344,7 +1344,7 @@ static int btrfs_clear_bit_hook(struct inode *inode, { /* * set_bit and clear bit hooks normally require _irqsave/restore - * but in this case, we are only testeing for the DELALLOC + * but in this case, we are only testing for the DELALLOC * bit, which is only set or cleared with irqs on */ if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { -- cgit v1.2.3 From b0b802d7e34b0b4a78f911c3a8aad88aa91fd7ab Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 19 May 2011 07:03:42 +0000 Subject: Btrfs: return error code to caller when btrfs_previous_item fails The error code is returned instead of calling BUG_ON when btrfs_previous_item returns the error. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c7367ae5a3e6..e40cdd5b4669 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -949,14 +949,14 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, BTRFS_DEV_EXTENT_KEY); - BUG_ON(ret); + if (ret) + goto out; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); BUG_ON(found_key.offset > start || found_key.offset + btrfs_dev_extent_length(leaf, extent) < start); - ret = 0; } else if (ret == 0) { leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], @@ -969,6 +969,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); BUG_ON(ret); +out: btrfs_free_path(path); return ret; } -- cgit v1.2.3 From 65a246c5ffe3b487a001de025816326939e63362 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 19 May 2011 04:37:44 +0000 Subject: Btrfs: return error code to caller when btrfs_del_item fails The error code is returned instead of calling BUG_ON when btrfs_del_item returns the error. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/file-item.c | 10 ++++++---- fs/btrfs/root-tree.c | 6 +++++- fs/btrfs/tree-log.c | 10 +++++++--- fs/btrfs/volumes.c | 4 +--- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..6e7556aa02e8 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -551,10 +551,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { if (path->slots[0] == 0) - goto out; + break; path->slots[0]--; } else if (ret < 0) { - goto out; + break; } leaf = path->nodes[0]; @@ -579,7 +579,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, /* delete the entire item, it is inside our range */ if (key.offset >= bytenr && csum_end <= end_byte) { ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); + if (ret) + goto out; if (key.offset == bytenr) break; } else if (key.offset < bytenr && csum_end > end_byte) { @@ -633,9 +634,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, } btrfs_release_path(root, path); } + ret = 0; out: btrfs_free_path(path); - return 0; + return ret; } int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6928bff62daa..2cf5f5142159 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -385,7 +385,10 @@ again: *sequence = btrfs_root_ref_sequence(leaf, ref); ret = btrfs_del_item(trans, tree_root, path); - BUG_ON(ret); + if (ret) { + err = ret; + goto out; + } } else err = -ENOENT; @@ -397,6 +400,7 @@ again: goto again; } +out: btrfs_free_path(path); return err; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..cf2baeb70462 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1050,7 +1050,8 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, break; ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); + if (ret) + goto out; btrfs_release_path(root, path); inode = read_one_inode(root, key.offset); @@ -1068,8 +1069,10 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, */ key.offset = (u64)-1; } + ret = 0; +out: btrfs_release_path(root, path); - return 0; + return ret; } @@ -2587,7 +2590,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, break; ret = btrfs_del_item(trans, log, path); - BUG_ON(ret); + if (ret) + break; btrfs_release_path(log, path); } btrfs_release_path(log, path); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e40cdd5b4669..deca1a0326ad 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -967,7 +967,6 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, if (device->bytes_used > 0) device->bytes_used -= btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); out: btrfs_free_path(path); @@ -1770,10 +1769,9 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_del_item(trans, root, path); - BUG_ON(ret); btrfs_free_path(path); - return 0; + return ret; } static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 -- cgit v1.2.3 From 1cd307990d6e2b4965620e339a92e0d7ae853e13 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 19 May 2011 05:19:08 +0000 Subject: Btrfs: BUG_ON is deleted from the caller of btrfs_truncate_item & btrfs_extend_item Currently, btrfs_truncate_item and btrfs_extend_item returns only 0. So, the check by BUG_ON in the caller is unnecessary. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 8 ++------ fs/btrfs/dir-item.c | 1 - fs/btrfs/extent-tree.c | 3 --- fs/btrfs/file-item.c | 3 --- fs/btrfs/inode-item.c | 2 -- fs/btrfs/inode.c | 1 - fs/btrfs/tree-log.c | 1 - 7 files changed, 2 insertions(+), 17 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..6f1a59cc41ff 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3216,7 +3216,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, struct btrfs_path *path, u32 new_size, int from_end) { - int ret = 0; int slot; struct extent_buffer *leaf; struct btrfs_item *item; @@ -3314,12 +3313,11 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, btrfs_set_item_size(leaf, item, new_size); btrfs_mark_buffer_dirty(leaf); - ret = 0; if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } - return ret; + return 0; } /* @@ -3329,7 +3327,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u32 data_size) { - int ret = 0; int slot; struct extent_buffer *leaf; struct btrfs_item *item; @@ -3394,12 +3391,11 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, btrfs_set_item_size(leaf, item, old_size + data_size); btrfs_mark_buffer_dirty(leaf); - ret = 0; if (btrfs_leaf_free_space(root, leaf) < 0) { btrfs_print_leaf(root, leaf); BUG(); } - return ret; + return 0; } /* diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index dec93485d539..dd421c48c353 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -50,7 +50,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle if (di) return ERR_PTR(-EEXIST); ret = btrfs_extend_item(trans, root, path, data_size); - WARN_ON(ret > 0); } if (ret < 0) return ERR_PTR(ret); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9f5fdd37451d..103e141afeb3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -947,7 +947,6 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_extend_item(trans, root, path, new_size); - BUG_ON(ret); leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); @@ -1555,7 +1554,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans, size = btrfs_extent_inline_ref_size(type); ret = btrfs_extend_item(trans, root, path, size); - BUG_ON(ret); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(leaf, ei); @@ -1684,7 +1682,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans, end - ptr - size); item_size -= size; ret = btrfs_truncate_item(trans, root, path, item_size, 1); - BUG_ON(ret); } btrfs_mark_buffer_dirty(leaf); return 0; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 6e7556aa02e8..fb9b02667e75 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -495,7 +495,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, u32 new_size = (bytenr - key->offset) >> blocksize_bits; new_size *= csum_size; ret = btrfs_truncate_item(trans, root, path, new_size, 1); - BUG_ON(ret); } else if (key->offset >= bytenr && csum_end > end_byte && end_byte > key->offset) { /* @@ -508,7 +507,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, new_size *= csum_size; ret = btrfs_truncate_item(trans, root, path, new_size, 0); - BUG_ON(ret); key->offset = end_byte; ret = btrfs_set_item_key_safe(trans, root, path, key); @@ -763,7 +761,6 @@ again: goto insert; ret = btrfs_extend_item(trans, root, path, diff); - BUG_ON(ret); goto csum; } diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 64f1150bb48d..baa74f3db691 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -130,7 +130,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, item_size - (ptr + sub_item_len - item_start)); ret = btrfs_truncate_item(trans, root, path, item_size - sub_item_len, 1); - BUG_ON(ret); out: btrfs_free_path(path); return ret; @@ -167,7 +166,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); ret = btrfs_extend_item(trans, root, path, ins_len); - BUG_ON(ret); ref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_ref); ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 72650ceb9829..e9e2b4778279 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3428,7 +3428,6 @@ search_again: btrfs_file_extent_calc_inline_size(size); ret = btrfs_truncate_item(trans, root, path, size, 1); - BUG_ON(ret); } else if (root->ref_cows) { inode_sub_bytes(inode, item_end + 1 - found_key.offset); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cf2baeb70462..0350147106d5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -382,7 +382,6 @@ insert: } else if (found_size < item_size) { ret = btrfs_extend_item(trans, root, path, item_size - found_size); - BUG_ON(ret); } } else if (ret) { return ret; -- cgit v1.2.3 From c00e9493f1412621c8665a707d63e32b0768f572 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 28 Apr 2011 09:10:23 +0000 Subject: Btrfs: return error to caller if read_one_inode() fails When read_one_inode() fails, error code is returned to caller instead of BUG_ON(). Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 0350147106d5..46b7b57650ab 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -677,7 +677,10 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); inode = read_one_inode(root, location.objectid); - BUG_ON(!inode); + if (!inode) { + kfree(name); + return -EIO; + } ret = link_to_fixup_dir(trans, root, path, location.objectid); BUG_ON(ret); @@ -816,7 +819,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, return -ENOENT; inode = read_one_inode(root, key->objectid); - BUG_ON(!inode); + if (!inode) { + iput(dir); + return -EIO; + } ref_ptr = btrfs_item_ptr_offset(eb, slot); ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); @@ -1054,7 +1060,8 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); inode = read_one_inode(root, key.offset); - BUG_ON(!inode); + if (!inode) + return -EIO; ret = fixup_inode_link_count(trans, root, inode); BUG_ON(ret); @@ -1090,7 +1097,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, struct inode *inode; inode = read_one_inode(root, objectid); - BUG_ON(!inode); + if (!inode) + return -EIO; key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); @@ -1177,7 +1185,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, int ret; dir = read_one_inode(root, key->objectid); - BUG_ON(!dir); + if (!dir) + return -EIO; name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); @@ -1433,7 +1442,10 @@ again: btrfs_release_path(root, path); btrfs_release_path(log, log_path); inode = read_one_inode(root, location.objectid); - BUG_ON(!inode); + if (!inode) { + kfree(name); + return -EIO; + } ret = link_to_fixup_dir(trans, root, path, location.objectid); -- cgit v1.2.3 From 37daa4f968e9470ae9f30e246a5781717c598271 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 28 Apr 2011 09:18:21 +0000 Subject: Btrfs: check return value of btrfs_inc_extent_ref() If return value of btrfs_inc_extent_ref() is not 0, BUG() is called. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 46b7b57650ab..c2d887566400 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -589,6 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ins.objectid, ins.offset, 0, root->root_key.objectid, key->objectid, offset); + BUG_ON(ret); } else { /* * insert the extent pointer in the extent -- cgit v1.2.3 From b083916638eee513be501f53b42a4be0b9851db0 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 14 May 2011 07:10:51 +0000 Subject: fs/btrfs: Add missing btrfs_free_path Btrfs_alloc_path should be matched with btrfs_free_path in error-handling code. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @r exists@ local idexpression struct btrfs_path * x; expression ra,rb; position p1,p2; @@ x = btrfs_alloc_path@p1(...) ... when != btrfs_free_path(x,...) when != if (...) { ... btrfs_free_path(x,...) ...} when != x = ra if(...) { ... when != x = rb when forall when != btrfs_free_path(x,...) \(return <+...x...+>; \| return@p2...; \) } @script:python@ p1 << r.p1; p2 << r.p2; @@ cocci.print_main("alloc",p1) cocci.print_secs("return",p2) // Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + fs/btrfs/super.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e9e2b4778279..80fcd5177731 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7314,6 +7314,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, datasize); if (err) { drop_inode = 1; + btrfs_free_path(path); goto out_unlock; } leaf = path->nodes[0]; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..46d7eed7e965 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -506,8 +506,10 @@ static struct dentry *get_default_root(struct super_block *sb, */ dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); - if (IS_ERR(di)) + if (IS_ERR(di)) { + btrfs_free_path(path); return ERR_CAST(di); + } if (!di) { /* * Ok the default dir item isn't there. This is weird since -- cgit v1.2.3 From 8233767a227ac5843f1023b88c7272a7b5058f5f Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 06:44:57 +0000 Subject: Btrfs: allocate extent state and check the result properly It doesn't allocate extent_state and check the result properly: - in set_extent_bit, it doesn't allocate extent_state if the path is not allowed wait - in clear_extent_bit, it doesn't check the result after atomic-ly allocate, we trigger BUG_ON() if it's fail - if allocate fail, we trigger BUG_ON instead of returning -ENOMEM since the return value of clear_extent_bit() is ignored by many callers Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..9ccea86dd015 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -439,6 +439,15 @@ static int clear_state_bit(struct extent_io_tree *tree, return ret; } +static struct extent_state * +alloc_extent_state_atomic(struct extent_state *prealloc) +{ + if (!prealloc) + prealloc = alloc_extent_state(GFP_ATOMIC); + + return prealloc; +} + /* * clear some bits on a range in the tree. This may require splitting * or inserting elements in the tree, so the gfp mask is used to @@ -476,8 +485,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - if (!prealloc) - return -ENOMEM; + BUG_ON(!prealloc); } spin_lock(&tree->lock); @@ -529,8 +537,8 @@ hit_next: */ if (state->start < start) { - if (!prealloc) - prealloc = alloc_extent_state(GFP_ATOMIC); + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; @@ -551,8 +559,8 @@ hit_next: * on the first half */ if (state->start <= end && state->end > end) { - if (!prealloc) - prealloc = alloc_extent_state(GFP_ATOMIC); + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); if (wake) @@ -725,8 +733,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - if (!prealloc) - return -ENOMEM; + BUG_ON(!prealloc); } spin_lock(&tree->lock); @@ -743,6 +750,8 @@ again: */ node = tree_search(tree, start); if (!node) { + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = insert_state(tree, prealloc, start, end, &bits); prealloc = NULL; BUG_ON(err == -EEXIST); @@ -811,6 +820,9 @@ hit_next: err = -EEXIST; goto out; } + + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; @@ -841,6 +853,9 @@ hit_next: this_end = end; else this_end = last_start - 1; + + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = insert_state(tree, prealloc, start, this_end, &bits); BUG_ON(err == -EEXIST); @@ -865,6 +880,9 @@ hit_next: err = -EEXIST; goto out; } + + prealloc = alloc_extent_state_atomic(prealloc); + BUG_ON(!prealloc); err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); -- cgit v1.2.3 From c7f895a2b2d1a002810d52e7b6653c9dc2fd0b0b Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 06:45:49 +0000 Subject: Btrfs: fix unsafe usage of merge_state merge_state can free the current state if it can be merged with the next node, but in set_extent_bit(), after merge_state, we still use the current extent to get the next node and cache it into cached_state Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9ccea86dd015..ebfff5b44752 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -780,20 +780,18 @@ hit_next: if (err) goto out; + next_node = rb_next(node); cache_state(state, cached_state); merge_state(tree, state); if (last_end == (u64)-1) goto out; start = last_end + 1; - if (start < end && prealloc && !need_resched()) { - next_node = rb_next(node); - if (next_node) { - state = rb_entry(next_node, struct extent_state, - rb_node); - if (state->start == start) - goto hit_next; - } + if (next_node && start < end && prealloc && !need_resched()) { + state = rb_entry(next_node, struct extent_state, + rb_node); + if (state->start == start) + goto hit_next; } goto search_again; } @@ -856,14 +854,22 @@ hit_next: prealloc = alloc_extent_state_atomic(prealloc); BUG_ON(!prealloc); + + /* + * Avoid to free 'prealloc' if it can be merged with + * the later extent. + */ + atomic_inc(&prealloc->refs); err = insert_state(tree, prealloc, start, this_end, &bits); BUG_ON(err == -EEXIST); if (err) { + free_extent_state(prealloc); prealloc = NULL; goto out; } cache_state(prealloc, cached_state); + free_extent_state(prealloc); prealloc = NULL; start = this_end + 1; goto search_again; -- cgit v1.2.3 From 4f6c9328c6c0d8558907f16579a9d47815abef80 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:06:40 +0000 Subject: Btrfs: fix bh leak on __btrfs_open_devices path 'bh' is forgot to release if no error is detected Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index deca1a0326ad..290100fc47be 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -597,6 +597,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, list_add(&device->dev_alloc_list, &fs_devices->alloc_list); } + brelse(bh); continue; error_brelse: -- cgit v1.2.3 From c9513edb0079f97749c2ac00c887a22c4ba44792 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:07:30 +0000 Subject: Btrfs: fix the race between reading and updating devices On btrfs_congested_fn and __unplug_io_fn paths, we should hold device_list_mutex to avoid remove/add device path to update fs_devices->devices On __btrfs_close_devices and btrfs_prepare_sprout paths, the devices in fs_devices->devices or fs_devices->devices is updated, so we should hold the mutex to avoid the reader side to reach them Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 ++ fs/btrfs/volumes.c | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 64b289690f9d..4e53a4fc467f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1410,6 +1410,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) struct btrfs_device *device; struct backing_dev_info *bdi; + mutex_lock(&info->fs_devices->device_list_mutex); list_for_each_entry(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; @@ -1419,6 +1420,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) break; } } + mutex_unlock(&info->fs_devices->device_list_mutex); return ret; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 290100fc47be..43c4f09e441c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -481,6 +481,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) if (--fs_devices->opened > 0) return 0; + mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { blkdev_put(device->bdev, device->mode); @@ -495,6 +496,8 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) device->writeable = 0; device->in_fs_metadata = 0; } + mutex_unlock(&fs_devices->device_list_mutex); + WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; @@ -1415,7 +1418,11 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init(&fs_devices->devices, &seed_devices->devices); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); list_for_each_entry(device, &seed_devices->devices, dev_list) { device->fs_devices = seed_devices; -- cgit v1.2.3 From 0c1daee085cff1395d1eba4ad6faff7810a594d8 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:08:16 +0000 Subject: Btrfs: fix the race between remove dev and alloc chunk On remove device path, it updates device->dev_alloc_list but does not hold chunk lock Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 43c4f09e441c..ee197ec28547 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1291,7 +1291,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } if (device->writeable) { + lock_chunks(root); list_del_init(&device->dev_alloc_list); + unlock_chunks(root); root->fs_info->fs_devices->rw_devices--; } @@ -1345,7 +1347,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } fs_devices->seed = device->fs_devices->seed; device->fs_devices->seed = NULL; + lock_chunks(root); __btrfs_close_devices(device->fs_devices); + unlock_chunks(root); free_fs_devices(device->fs_devices); } @@ -1377,8 +1381,10 @@ out: return ret; error_undo: if (device->writeable) { + lock_chunks(root); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); + unlock_chunks(root); root->fs_info->fs_devices->rw_devices++; } goto error_brelse; -- cgit v1.2.3 From 46224705656633466ca7dc71d81b3c0abc76cae4 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:08:47 +0000 Subject: Btrfs: drop unnecessary device lock Drop device_list_mutex for the reader side on clone_fs_devices and btrfs_rm_device pathes since the fs_info->volume_mutex can ensure the device list is not updated btrfs_close_extra_devices is the initialized path, we can not add or remove device at this time, so we can simply drop the mutex safely, like other initialized function does(add_missing_dev, __find_device, __btrfs_open_devices ...). Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ee197ec28547..0b5ca2737268 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -406,7 +406,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) fs_devices->latest_trans = orig->latest_trans; memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); - mutex_lock(&orig->device_list_mutex); + /* We have held the volume lock, it is safe to get the devices. */ list_for_each_entry(orig_dev, &orig->devices, dev_list) { device = kzalloc(sizeof(*device), GFP_NOFS); if (!device) @@ -429,10 +429,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) device->fs_devices = fs_devices; fs_devices->num_devices++; } - mutex_unlock(&orig->device_list_mutex); return fs_devices; error: - mutex_unlock(&orig->device_list_mutex); free_fs_devices(fs_devices); return ERR_PTR(-ENOMEM); } @@ -443,7 +441,7 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&uuid_mutex); again: - mutex_lock(&fs_devices->device_list_mutex); + /* This is the initialized path, it is safe to release the devices. */ list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { if (device->in_fs_metadata) continue; @@ -463,7 +461,6 @@ again: kfree(device->name); kfree(device); } - mutex_unlock(&fs_devices->device_list_mutex); if (fs_devices->seed) { fs_devices = fs_devices->seed; @@ -1242,14 +1239,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) device = NULL; devices = &root->fs_info->fs_devices->devices; - mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + /* + * It is safe to read the devices since the volume_mutex + * is held. + */ list_for_each_entry(tmp, devices, dev_list) { if (tmp->in_fs_metadata && !tmp->bdev) { device = tmp; break; } } - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); bdev = NULL; bh = NULL; disk_super = NULL; -- cgit v1.2.3 From 1f78160ce1b1b8e657e2248118c4d91f881763f0 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:09:16 +0000 Subject: Btrfs: using rcu lock in the reader side of devices list fs_devices->devices is only updated on remove and add device paths, so we can use rcu to protect it in the reader side Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 14 ++++----- fs/btrfs/ioctl.c | 7 +++-- fs/btrfs/volumes.c | 85 +++++++++++++++++++++++++++++++++++++----------------- fs/btrfs/volumes.h | 2 ++ 4 files changed, 72 insertions(+), 36 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4e53a4fc467f..deba3d9c8853 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1410,8 +1410,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) struct btrfs_device *device; struct backing_dev_info *bdi; - mutex_lock(&info->fs_devices->device_list_mutex); - list_for_each_entry(device, &info->fs_devices->devices, dev_list) { + rcu_read_lock(); + list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; bdi = blk_get_backing_dev_info(device->bdev); @@ -1420,7 +1420,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) break; } } - mutex_unlock(&info->fs_devices->device_list_mutex); + rcu_read_unlock(); return ret; } @@ -2332,9 +2332,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; - mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + rcu_read_lock(); head = &root->fs_info->fs_devices->devices; - list_for_each_entry(dev, head, dev_list) { + list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; @@ -2367,7 +2367,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) } total_errors = 0; - list_for_each_entry(dev, head, dev_list) { + list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) continue; if (!dev->in_fs_metadata || !dev->writeable) @@ -2377,7 +2377,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) if (ret) total_errors++; } - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + rcu_read_unlock(); if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ce773fb736a1..0de71feb8e1c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -281,8 +281,9 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - mutex_lock(&fs_info->fs_devices->device_list_mutex); - list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { + rcu_read_lock(); + list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, + dev_list) { if (!device->bdev) continue; q = bdev_get_queue(device->bdev); @@ -292,7 +293,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) minlen); } } - mutex_unlock(&fs_info->fs_devices->device_list_mutex); + rcu_read_unlock(); if (!num_devices) return -EOPNOTSUPP; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0b5ca2737268..e7844f8a347a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -363,7 +363,7 @@ static noinline int device_list_add(const char *path, INIT_LIST_HEAD(&device->dev_alloc_list); mutex_lock(&fs_devices->device_list_mutex); - list_add(&device->dev_list, &fs_devices->devices); + list_add_rcu(&device->dev_list, &fs_devices->devices); mutex_unlock(&fs_devices->device_list_mutex); device->fs_devices = fs_devices; @@ -471,6 +471,29 @@ again: return 0; } +static void __free_device(struct work_struct *work) +{ + struct btrfs_device *device; + + device = container_of(work, struct btrfs_device, rcu_work); + + if (device->bdev) + blkdev_put(device->bdev, device->mode); + + kfree(device->name); + kfree(device); +} + +static void free_device(struct rcu_head *head) +{ + struct btrfs_device *device; + + device = container_of(head, struct btrfs_device, rcu); + + INIT_WORK(&device->rcu_work, __free_device); + schedule_work(&device->rcu_work); +} + static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; @@ -480,18 +503,27 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { - if (device->bdev) { - blkdev_put(device->bdev, device->mode); + struct btrfs_device *new_device; + + if (device->bdev) fs_devices->open_devices--; - } + if (device->writeable) { list_del_init(&device->dev_alloc_list); fs_devices->rw_devices--; } - device->bdev = NULL; - device->writeable = 0; - device->in_fs_metadata = 0; + new_device = kmalloc(sizeof(*new_device), GFP_NOFS); + BUG_ON(!new_device); + memcpy(new_device, device, sizeof(*new_device)); + new_device->name = kstrdup(device->name, GFP_NOFS); + BUG_ON(!new_device->name); + new_device->bdev = NULL; + new_device->writeable = 0; + new_device->in_fs_metadata = 0; + list_replace_rcu(&device->dev_list, &new_device->dev_list); + + call_rcu(&device->rcu, free_device); } mutex_unlock(&fs_devices->device_list_mutex); @@ -1204,11 +1236,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) struct block_device *bdev; struct buffer_head *bh = NULL; struct btrfs_super_block *disk_super; + struct btrfs_fs_devices *cur_devices; u64 all_avail; u64 devid; u64 num_devices; u8 *dev_uuid; int ret = 0; + bool clear_super = false; mutex_lock(&uuid_mutex); mutex_lock(&root->fs_info->volume_mutex); @@ -1294,6 +1328,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) list_del_init(&device->dev_alloc_list); unlock_chunks(root); root->fs_info->fs_devices->rw_devices--; + clear_super = true; } ret = btrfs_shrink_device(device, 0); @@ -1304,16 +1339,15 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (ret) goto error_undo; - device->in_fs_metadata = 0; - /* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. */ + + cur_devices = device->fs_devices; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_del_init(&device->dev_list); - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + list_del_rcu(&device->dev_list); device->fs_devices->num_devices--; @@ -1327,36 +1361,36 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (device->bdev == root->fs_info->fs_devices->latest_bdev) root->fs_info->fs_devices->latest_bdev = next_device->bdev; - if (device->bdev) { - blkdev_put(device->bdev, device->mode); - device->bdev = NULL; + if (device->bdev) device->fs_devices->open_devices--; - } + + call_rcu(&device->rcu, free_device); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); - if (device->fs_devices->open_devices == 0) { + if (cur_devices->open_devices == 0) { struct btrfs_fs_devices *fs_devices; fs_devices = root->fs_info->fs_devices; while (fs_devices) { - if (fs_devices->seed == device->fs_devices) + if (fs_devices->seed == cur_devices) break; fs_devices = fs_devices->seed; } - fs_devices->seed = device->fs_devices->seed; - device->fs_devices->seed = NULL; + fs_devices->seed = cur_devices->seed; + cur_devices->seed = NULL; lock_chunks(root); - __btrfs_close_devices(device->fs_devices); + __btrfs_close_devices(cur_devices); unlock_chunks(root); - free_fs_devices(device->fs_devices); + free_fs_devices(cur_devices); } /* * at this point, the device is zero sized. We want to * remove it from the devices list and zero out the old super */ - if (device->writeable) { + if (clear_super) { /* make sure this device isn't detected as part of * the FS anymore */ @@ -1365,8 +1399,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) sync_dirty_buffer(bh); } - kfree(device->name); - kfree(device); ret = 0; error_brelse: @@ -1425,7 +1457,8 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, mutex_init(&seed_devices->device_list_mutex); mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_splice_init(&fs_devices->devices, &seed_devices->devices); + list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, + synchronize_rcu); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); @@ -1624,7 +1657,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) * half setup */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_add(&device->dev_list, &root->fs_info->fs_devices->devices); + list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); root->fs_info->fs_devices->num_devices++; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..f1b2e4f53fc2 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -86,6 +86,8 @@ struct btrfs_device { u8 uuid[BTRFS_UUID_SIZE]; struct btrfs_work work; + struct rcu_head rcu; + struct work_struct rcu_work; }; struct btrfs_fs_devices { -- cgit v1.2.3 From 4cb5300bc839b8a943eb19c9f27f25470e22d0ca Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 24 May 2011 15:35:30 -0400 Subject: Btrfs: add mount -o auto_defrag This will detect small random writes into files and queue the up for an auto defrag process. It isn't well suited to database workloads yet, but works for smaller files such as rpm, sqlite or bdb databases. Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 1 + fs/btrfs/ctree.h | 45 ++++- fs/btrfs/disk-io.c | 12 ++ fs/btrfs/file.c | 257 ++++++++++++++++++++++++++++ fs/btrfs/inode.c | 12 ++ fs/btrfs/ioctl.c | 448 ++++++++++++++++++++++++++++++++++++++----------- fs/btrfs/ioctl.h | 31 ---- fs/btrfs/super.c | 7 +- 8 files changed, 678 insertions(+), 135 deletions(-) diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index d0b0e43a6a8b..93b1aa932014 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -153,6 +153,7 @@ struct btrfs_inode { unsigned ordered_data_close:1; unsigned orphan_meta_reserved:1; unsigned dummy_inode:1; + unsigned in_defrag:1; /* * always compress this one file diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 026fc47b42cf..332323e19dd1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1074,6 +1074,11 @@ struct btrfs_fs_info { /* all metadata allocations go through this cluster */ struct btrfs_free_cluster meta_alloc_cluster; + /* auto defrag inodes go here */ + spinlock_t defrag_inodes_lock; + struct rb_root defrag_inodes; + atomic_t defrag_running; + spinlock_t ref_cache_lock; u64 total_ref_cache_size; @@ -1205,6 +1210,38 @@ struct btrfs_root { struct super_block anon_super; }; +struct btrfs_ioctl_defrag_range_args { + /* start of the defrag operation */ + __u64 start; + + /* number of bytes to defrag, use (u64)-1 to say all */ + __u64 len; + + /* + * flags for the operation, which can include turning + * on compression for this one defrag + */ + __u64 flags; + + /* + * any extent bigger than this will be considered + * already defragged. Use 0 to take the kernel default + * Use 1 to say every single extent must be rewritten + */ + __u32 extent_thresh; + + /* + * which compression method to use if turning on compression + * for this defrag operation. If unspecified, zlib will + * be used + */ + __u32 compress_type; + + /* spare for later */ + __u32 unused[4]; +}; + + /* * inode items have the data typically returned from stat and store other * info about object characteristics. There is one for every file and dir in @@ -1302,6 +1339,7 @@ struct btrfs_root { #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) +#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2528,8 +2566,13 @@ extern const struct dentry_operations btrfs_dentry_operations; long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); void btrfs_update_iflags(struct inode *inode); void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); - +int btrfs_defrag_file(struct inode *inode, struct file *file, + struct btrfs_ioctl_defrag_range_args *range, + u64 newer_than, unsigned long max_pages); /* file.c */ +int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + struct inode *inode); +int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 16d335b342a2..b2588a552658 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1475,6 +1475,7 @@ static int cleaner_kthread(void *arg) btrfs_run_delayed_iputs(root); btrfs_clean_old_snapshots(root); mutex_unlock(&root->fs_info->cleaner_mutex); + btrfs_run_defrag_inodes(root->fs_info); } if (freezing(current)) { @@ -1616,6 +1617,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, spin_lock_init(&fs_info->ref_cache_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); + spin_lock_init(&fs_info->defrag_inodes_lock); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; @@ -1638,9 +1640,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); + atomic_set(&fs_info->defrag_running, 0); fs_info->sb = sb; fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; + fs_info->defrag_inodes = RB_ROOT; fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); @@ -2501,6 +2505,14 @@ int close_ctree(struct btrfs_root *root) smp_mb(); btrfs_scrub_cancel(root); + + /* wait for any defraggers to finish */ + wait_event(fs_info->transaction_wait, + (atomic_read(&fs_info->defrag_running) == 0)); + + /* clear out the rbtree of defraggable inodes */ + btrfs_run_defrag_inodes(root->fs_info); + btrfs_put_block_group_cache(fs_info); /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 58ddc4442159..c6a22d783c35 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -40,6 +40,263 @@ #include "locking.h" #include "compat.h" +/* + * when auto defrag is enabled we + * queue up these defrag structs to remember which + * inodes need defragging passes + */ +struct inode_defrag { + struct rb_node rb_node; + /* objectid */ + u64 ino; + /* + * transid where the defrag was added, we search for + * extents newer than this + */ + u64 transid; + + /* root objectid */ + u64 root; + + /* last offset we were able to defrag */ + u64 last_offset; + + /* if we've wrapped around back to zero once already */ + int cycled; +}; + +/* pop a record for an inode into the defrag tree. The lock + * must be held already + * + * If you're inserting a record for an older transid than an + * existing record, the transid already in the tree is lowered + * + * If an existing record is found the defrag item you + * pass in is freed + */ +static int __btrfs_add_inode_defrag(struct inode *inode, + struct inode_defrag *defrag) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct inode_defrag *entry; + struct rb_node **p; + struct rb_node *parent = NULL; + + p = &root->fs_info->defrag_inodes.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct inode_defrag, rb_node); + + if (defrag->ino < entry->ino) + p = &parent->rb_left; + else if (defrag->ino > entry->ino) + p = &parent->rb_right; + else { + /* if we're reinserting an entry for + * an old defrag run, make sure to + * lower the transid of our existing record + */ + if (defrag->transid < entry->transid) + entry->transid = defrag->transid; + if (defrag->last_offset > entry->last_offset) + entry->last_offset = defrag->last_offset; + goto exists; + } + } + BTRFS_I(inode)->in_defrag = 1; + rb_link_node(&defrag->rb_node, parent, p); + rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); + return 0; + +exists: + kfree(defrag); + return 0; + +} + +/* + * insert a defrag record for this inode if auto defrag is + * enabled + */ +int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct inode_defrag *defrag; + int ret = 0; + u64 transid; + + if (!btrfs_test_opt(root, AUTO_DEFRAG)) + return 0; + + if (root->fs_info->closing) + return 0; + + if (BTRFS_I(inode)->in_defrag) + return 0; + + if (trans) + transid = trans->transid; + else + transid = BTRFS_I(inode)->root->last_trans; + + defrag = kzalloc(sizeof(*defrag), GFP_NOFS); + if (!defrag) + return -ENOMEM; + + defrag->ino = inode->i_ino; + defrag->transid = transid; + defrag->root = root->root_key.objectid; + + spin_lock(&root->fs_info->defrag_inodes_lock); + if (!BTRFS_I(inode)->in_defrag) + ret = __btrfs_add_inode_defrag(inode, defrag); + spin_unlock(&root->fs_info->defrag_inodes_lock); + return ret; +} + +/* + * must be called with the defrag_inodes lock held + */ +struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino, + struct rb_node **next) +{ + struct inode_defrag *entry = NULL; + struct rb_node *p; + struct rb_node *parent = NULL; + + p = info->defrag_inodes.rb_node; + while (p) { + parent = p; + entry = rb_entry(parent, struct inode_defrag, rb_node); + + if (ino < entry->ino) + p = parent->rb_left; + else if (ino > entry->ino) + p = parent->rb_right; + else + return entry; + } + + if (next) { + while (parent && ino > entry->ino) { + parent = rb_next(parent); + entry = rb_entry(parent, struct inode_defrag, rb_node); + } + *next = parent; + } + return NULL; +} + +/* + * run through the list of inodes in the FS that need + * defragging + */ +int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) +{ + struct inode_defrag *defrag; + struct btrfs_root *inode_root; + struct inode *inode; + struct rb_node *n; + struct btrfs_key key; + struct btrfs_ioctl_defrag_range_args range; + u64 first_ino = 0; + int num_defrag; + int defrag_batch = 1024; + + memset(&range, 0, sizeof(range)); + range.len = (u64)-1; + + atomic_inc(&fs_info->defrag_running); + spin_lock(&fs_info->defrag_inodes_lock); + while(1) { + n = NULL; + + /* find an inode to defrag */ + defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n); + if (!defrag) { + if (n) + defrag = rb_entry(n, struct inode_defrag, rb_node); + else if (first_ino) { + first_ino = 0; + continue; + } else { + break; + } + } + + /* remove it from the rbtree */ + first_ino = defrag->ino + 1; + rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); + + if (fs_info->closing) + goto next_free; + + spin_unlock(&fs_info->defrag_inodes_lock); + + /* get the inode */ + key.objectid = defrag->root; + btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); + key.offset = (u64)-1; + inode_root = btrfs_read_fs_root_no_name(fs_info, &key); + if (IS_ERR(inode_root)) + goto next; + + key.objectid = defrag->ino; + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + + inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); + if (IS_ERR(inode)) + goto next; + + /* do a chunk of defrag */ + BTRFS_I(inode)->in_defrag = 0; + range.start = defrag->last_offset; + num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, + defrag_batch); + /* + * if we filled the whole defrag batch, there + * must be more work to do. Queue this defrag + * again + */ + if (num_defrag == defrag_batch) { + defrag->last_offset = range.start; + __btrfs_add_inode_defrag(inode, defrag); + /* + * we don't want to kfree defrag, we added it back to + * the rbtree + */ + defrag = NULL; + } else if (defrag->last_offset && !defrag->cycled) { + /* + * we didn't fill our defrag batch, but + * we didn't start at zero. Make sure we loop + * around to the start of the file. + */ + defrag->last_offset = 0; + defrag->cycled = 1; + __btrfs_add_inode_defrag(inode, defrag); + defrag = NULL; + } + + iput(inode); +next: + spin_lock(&fs_info->defrag_inodes_lock); +next_free: + kfree(defrag); + } + spin_unlock(&fs_info->defrag_inodes_lock); + + atomic_dec(&fs_info->defrag_running); + + /* + * during unmount, we use the transaction_wait queue to + * wait for the defragger to stop + */ + wake_up(&fs_info->transaction_wait); + return 0; +} /* simple helper to fault in pages and copy. This should go away * and be replaced with calls into generic code. diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d378f8b70ef7..bb51bb1fa44f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -342,6 +342,10 @@ static noinline int compress_file_range(struct inode *inode, int will_compress; int compress_type = root->fs_info->compress_type; + /* if this is a small write inside eof, kick off a defragbot */ + if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024) + btrfs_add_inode_defrag(NULL, inode); + actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; @@ -799,6 +803,10 @@ static noinline int cow_file_range(struct inode *inode, disk_num_bytes = num_bytes; ret = 0; + /* if this is a small write inside eof, kick off defrag */ + if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024) + btrfs_add_inode_defrag(trans, inode); + if (start == 0) { /* lets try to make an inline extent */ ret = cow_file_range_inline(trans, root, inode, @@ -5371,6 +5379,9 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, if (IS_ERR(trans)) return ERR_CAST(trans); + if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024) + btrfs_add_inode_defrag(trans, inode); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; alloc_hint = get_extent_allocation_hint(inode, start, len); @@ -6682,6 +6693,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->ordered_data_close = 0; ei->orphan_meta_reserved = 0; ei->dummy_inode = 0; + ei->in_defrag = 0; ei->force_compress = BTRFS_COMPRESS_NONE; ei->delayed_node = NULL; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c4f17e4e2c9c..85e818ce00c5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -656,6 +656,106 @@ out_unlock: return error; } +/* + * When we're defragging a range, we don't want to kick it off again + * if it is really just waiting for delalloc to send it down. + * If we find a nice big extent or delalloc range for the bytes in the + * file you want to defrag, we return 0 to let you know to skip this + * part of the file + */ +static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh) +{ + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map *em = NULL; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + u64 end; + + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); + read_unlock(&em_tree->lock); + + if (em) { + end = extent_map_end(em); + free_extent_map(em); + if (end - offset > thresh) + return 0; + } + /* if we already have a nice delalloc here, just stop */ + thresh /= 2; + end = count_range_bits(io_tree, &offset, offset + thresh, + thresh, EXTENT_DELALLOC, 1); + if (end >= thresh) + return 0; + return 1; +} + +/* + * helper function to walk through a file and find extents + * newer than a specific transid, and smaller than thresh. + * + * This is used by the defragging code to find new and small + * extents + */ +static int find_new_extents(struct btrfs_root *root, + struct inode *inode, u64 newer_than, + u64 *off, int thresh) +{ + struct btrfs_path *path; + struct btrfs_key min_key; + struct btrfs_key max_key; + struct extent_buffer *leaf; + struct btrfs_file_extent_item *extent; + int type; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + min_key.objectid = inode->i_ino; + min_key.type = BTRFS_EXTENT_DATA_KEY; + min_key.offset = *off; + + max_key.objectid = inode->i_ino; + max_key.type = (u8)-1; + max_key.offset = (u64)-1; + + path->keep_locks = 1; + + while(1) { + ret = btrfs_search_forward(root, &min_key, &max_key, + path, 0, newer_than); + if (ret != 0) + goto none; + if (min_key.objectid != inode->i_ino) + goto none; + if (min_key.type != BTRFS_EXTENT_DATA_KEY) + goto none; + + leaf = path->nodes[0]; + extent = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + + type = btrfs_file_extent_type(leaf, extent); + if (type == BTRFS_FILE_EXTENT_REG && + btrfs_file_extent_num_bytes(leaf, extent) < thresh && + check_defrag_in_cache(inode, min_key.offset, thresh)) { + *off = min_key.offset; + btrfs_free_path(path); + return 0; + } + + if (min_key.offset == (u64)-1) + goto none; + + min_key.offset++; + btrfs_release_path(path); + } +none: + btrfs_free_path(path); + return -ENOENT; +} + static int should_defrag_range(struct inode *inode, u64 start, u64 len, int thresh, u64 *last_len, u64 *skip, u64 *defrag_end) @@ -665,10 +765,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 1; - - if (thresh == 0) - thresh = 256 * 1024; - /* * make sure that once we start defragging and extent, we keep on * defragging it @@ -727,27 +823,176 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, return ret; } -static int btrfs_defrag_file(struct file *file, - struct btrfs_ioctl_defrag_range_args *range) +/* + * it doesn't do much good to defrag one or two pages + * at a time. This pulls in a nice chunk of pages + * to COW and defrag. + * + * It also makes sure the delalloc code has enough + * dirty data to avoid making new small extents as part + * of the defrag + * + * It's a good idea to start RA on this range + * before calling this. + */ +static int cluster_pages_for_defrag(struct inode *inode, + struct page **pages, + unsigned long start_index, + int num_pages) { - struct inode *inode = fdentry(file)->d_inode; - struct btrfs_root *root = BTRFS_I(inode)->root; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + unsigned long file_end; + u64 isize = i_size_read(inode); + u64 page_start; + u64 page_end; + int ret; + int i; + int i_done; struct btrfs_ordered_extent *ordered; - struct page *page; + struct extent_state *cached_state = NULL; + + if (isize == 0) + return 0; + file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + + ret = btrfs_delalloc_reserve_space(inode, + num_pages << PAGE_CACHE_SHIFT); + if (ret) + return ret; +again: + ret = 0; + i_done = 0; + + /* step one, lock all the pages */ + for (i = 0; i < num_pages; i++) { + struct page *page; + page = grab_cache_page(inode->i_mapping, + start_index + i); + if (!page) + break; + + if (!PageUptodate(page)) { + btrfs_readpage(NULL, page); + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + page_cache_release(page); + ret = -EIO; + break; + } + } + isize = i_size_read(inode); + file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + if (!isize || page->index > file_end || + page->mapping != inode->i_mapping) { + /* whoops, we blew past eof, skip this page */ + unlock_page(page); + page_cache_release(page); + break; + } + pages[i] = page; + i_done++; + } + if (!i_done || ret) + goto out; + + if (!(inode->i_sb->s_flags & MS_ACTIVE)) + goto out; + + /* + * so now we have a nice long stream of locked + * and up to date pages, lets wait on them + */ + for (i = 0; i < i_done; i++) + wait_on_page_writeback(pages[i]); + + page_start = page_offset(pages[0]); + page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; + + lock_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, 0, &cached_state, + GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); + if (ordered && + ordered->file_offset + ordered->len > page_start && + ordered->file_offset < page_end) { + btrfs_put_ordered_extent(ordered); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, + &cached_state, GFP_NOFS); + for (i = 0; i < i_done; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_wait_ordered_range(inode, page_start, + page_end - page_start); + goto again; + } + if (ordered) + btrfs_put_ordered_extent(ordered); + + clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, + page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, + GFP_NOFS); + + if (i_done != num_pages) { + atomic_inc(&BTRFS_I(inode)->outstanding_extents); + btrfs_delalloc_release_space(inode, + (num_pages - i_done) << PAGE_CACHE_SHIFT); + } + + + btrfs_set_extent_delalloc(inode, page_start, page_end - 1, + &cached_state); + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, &cached_state, + GFP_NOFS); + + for (i = 0; i < i_done; i++) { + clear_page_dirty_for_io(pages[i]); + ClearPageChecked(pages[i]); + set_page_extent_mapped(pages[i]); + set_page_dirty(pages[i]); + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + return i_done; +out: + for (i = 0; i < i_done; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); + return ret; + +} + +int btrfs_defrag_file(struct inode *inode, struct file *file, + struct btrfs_ioctl_defrag_range_args *range, + u64 newer_than, unsigned long max_to_defrag) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *disk_super; + struct file_ra_state *ra = NULL; unsigned long last_index; - unsigned long ra_pages = root->fs_info->bdi.ra_pages; - unsigned long total_read = 0; u64 features; - u64 page_start; - u64 page_end; u64 last_len = 0; u64 skip = 0; u64 defrag_end = 0; + u64 newer_off = range->start; + int newer_left = 0; unsigned long i; int ret; + int defrag_count = 0; int compress_type = BTRFS_COMPRESS_ZLIB; + int extent_thresh = range->extent_thresh; + int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; + u64 new_align = ~((u64)128 * 1024 - 1); + struct page **pages = NULL; + + if (extent_thresh == 0) + extent_thresh = 256 * 1024; if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { if (range->compress_type > BTRFS_COMPRESS_TYPES) @@ -759,6 +1004,27 @@ static int btrfs_defrag_file(struct file *file, if (inode->i_size == 0) return 0; + /* + * if we were not given a file, allocate a readahead + * context + */ + if (!file) { + ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; + file_ra_state_init(ra, inode->i_mapping); + } else { + ra = &file->f_ra; + } + + pages = kmalloc(sizeof(struct page *) * newer_cluster, + GFP_NOFS); + if (!pages) { + ret = -ENOMEM; + goto out_ra; + } + + /* find the last page to defrag */ if (range->start + range->len > range->start) { last_index = min_t(u64, inode->i_size - 1, range->start + range->len - 1) >> PAGE_CACHE_SHIFT; @@ -766,11 +1032,37 @@ static int btrfs_defrag_file(struct file *file, last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; } - i = range->start >> PAGE_CACHE_SHIFT; - while (i <= last_index) { - if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, + if (newer_than) { + ret = find_new_extents(root, inode, newer_than, + &newer_off, 64 * 1024); + if (!ret) { + range->start = newer_off; + /* + * we always align our defrag to help keep + * the extents in the file evenly spaced + */ + i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + newer_left = newer_cluster; + } else + goto out_ra; + } else { + i = range->start >> PAGE_CACHE_SHIFT; + } + if (!max_to_defrag) + max_to_defrag = last_index - 1; + + while (i <= last_index && defrag_count < max_to_defrag) { + /* + * make sure we stop running if someone unmounts + * the FS + */ + if (!(inode->i_sb->s_flags & MS_ACTIVE)) + break; + + if (!newer_than && + !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, - range->extent_thresh, + extent_thresh, &last_len, &skip, &defrag_end)) { unsigned long next; @@ -782,92 +1074,39 @@ static int btrfs_defrag_file(struct file *file, i = max(i + 1, next); continue; } - - if (total_read % ra_pages == 0) { - btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, - min(last_index, i + ra_pages - 1)); - } - total_read++; - mutex_lock(&inode->i_mutex); if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) BTRFS_I(inode)->force_compress = compress_type; - ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); - if (ret) - goto err_unlock; -again: - if (inode->i_size == 0 || - i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { - ret = 0; - goto err_reservations; - } + btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster); - page = grab_cache_page(inode->i_mapping, i); - if (!page) { - ret = -ENOMEM; - goto err_reservations; - } - - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - page_cache_release(page); - ret = -EIO; - goto err_reservations; - } - } - - if (page->mapping != inode->i_mapping) { - unlock_page(page); - page_cache_release(page); - goto again; - } - - wait_on_page_writeback(page); + ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster); + if (ret < 0) + goto out_ra; - if (PageDirty(page)) { - btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); - goto loop_unlock; - } + defrag_count += ret; + balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); + i += ret; - page_start = (u64)page->index << PAGE_CACHE_SHIFT; - page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); + if (newer_than) { + if (newer_off == (u64)-1) + break; - ordered = btrfs_lookup_ordered_extent(inode, page_start); - if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - goto again; + newer_off = max(newer_off + 1, + (u64)i << PAGE_CACHE_SHIFT); + + ret = find_new_extents(root, inode, + newer_than, &newer_off, + 64 * 1024); + if (!ret) { + range->start = newer_off; + i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + newer_left = newer_cluster; + } else { + break; + } + } else { + i++; } - set_page_extent_mapped(page); - - /* - * this makes sure page_mkwrite is called on the - * page if it is dirtied again later - */ - clear_page_dirty_for_io(page); - clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, - page_end, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, GFP_NOFS); - - btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); - ClearPageChecked(page); - set_page_dirty(page); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - -loop_unlock: - unlock_page(page); - page_cache_release(page); - mutex_unlock(&inode->i_mutex); - - balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); - i++; } if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) @@ -899,12 +1138,14 @@ loop_unlock: btrfs_set_super_incompat_flags(disk_super, features); } - return 0; + if (!file) + kfree(ra); + return defrag_count; -err_reservations: - btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); -err_unlock: - mutex_unlock(&inode->i_mutex); +out_ra: + if (!file) + kfree(ra); + kfree(pages); return ret; } @@ -1756,7 +1997,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) /* the rest are all set to zero by kzalloc */ range->len = (u64)-1; } - ret = btrfs_defrag_file(file, range); + ret = btrfs_defrag_file(fdentry(file)->d_inode, file, + range, 0, 0); + if (ret > 0) + ret = 0; kfree(range); break; default: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index e5e0ee2cad4e..ad1ea789fcb4 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -181,37 +181,6 @@ struct btrfs_ioctl_clone_range_args { #define BTRFS_DEFRAG_RANGE_COMPRESS 1 #define BTRFS_DEFRAG_RANGE_START_IO 2 -struct btrfs_ioctl_defrag_range_args { - /* start of the defrag operation */ - __u64 start; - - /* number of bytes to defrag, use (u64)-1 to say all */ - __u64 len; - - /* - * flags for the operation, which can include turning - * on compression for this one defrag - */ - __u64 flags; - - /* - * any extent bigger than this will be considered - * already defragged. Use 0 to take the kernel default - * Use 1 to say every single extent must be rewritten - */ - __u32 extent_thresh; - - /* - * which compression method to use if turning on compression - * for this defrag operation. If unspecified, zlib will - * be used - */ - __u32 compress_type; - - /* spare for later */ - __u32 unused[4]; -}; - struct btrfs_ioctl_space_info { __u64 flags; __u64 total_bytes; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index cd0c7cd2c8fb..28e3cb2607ff 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -160,7 +160,7 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_subvolrootid, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, }; static match_table_t tokens = { @@ -191,6 +191,7 @@ static match_table_t tokens = { {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, {Opt_enospc_debug, "enospc_debug"}, {Opt_subvolrootid, "subvolrootid=%d"}, + {Opt_defrag, "autodefrag"}, {Opt_err, NULL}, }; @@ -369,6 +370,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_enospc_debug: btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); break; + case Opt_defrag: + printk(KERN_INFO "btrfs: enabling auto defrag"); + btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); -- cgit v1.2.3 From c309df07868baa8b05d2a70637096465746fdbb5 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 26 May 2011 17:43:59 -0400 Subject: Btrfs: return -ENOMEM in clear_extent_bit The btrfs releasepage function depends on ENOMEM coming back when it is called atomic. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a90c4a12556b..0e0fe0f6ec75 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -485,7 +485,8 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); - BUG_ON(!prealloc); + if (!prealloc) + return -ENOMEM; } spin_lock(&tree->lock); -- cgit v1.2.3 From 00d01bc17cc2807292303961519d9c005794eb1d Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Wed, 25 May 2011 12:22:50 +0000 Subject: btrfs scrub: don't coalesce pages that are logically discontiguous scrub_page collects several pages into one bio as long as they are physically contiguous. As we only save one logical address for the whole bio, don't collect pages that are physically contiguous but logically discontiguous. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 87a2f1273136..6dfed0c27ac3 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -631,7 +631,8 @@ again: if (sbio->count == 0) { sbio->physical = physical; sbio->logical = logical; - } else if (sbio->physical + sbio->count * PAGE_SIZE != physical) { + } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || + sbio->logical + sbio->count * PAGE_SIZE != logical) { scrub_submit(sdev); goto again; } -- cgit v1.2.3 From a47d6b70e280401d553e7cac6f5750870de1ad21 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 26 May 2011 06:38:30 +0000 Subject: Btrfs: setup free ino caching in a more asynchronous way For a filesystem that has lots of files in it, the first time we mount it with free ino caching support, it can take quite a long time to setup the caching before we can create new files. Here we fill the cache with [highest_ino, BTRFS_LAST_FREE_OBJECTID] before we start the caching thread to search through the extent tree. Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/inode-map.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 000970512624..3262cd17a12f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -60,12 +60,12 @@ again: while (1) { smp_mb(); - if (fs_info->closing > 1) + if (fs_info->closing) goto out; leaf = path->nodes[0]; slot = path->slots[0]; - if (path->slots[0] >= btrfs_header_nritems(leaf)) { + if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; @@ -100,7 +100,7 @@ again: if (key.type != BTRFS_INODE_ITEM_KEY) goto next; - if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) + if (key.objectid >= root->highest_objectid) break; if (last != (u64)-1 && last + 1 != key.objectid) { @@ -114,9 +114,9 @@ next: path->slots[0]++; } - if (last < BTRFS_LAST_FREE_OBJECTID - 1) { + if (last < root->highest_objectid - 1) { __btrfs_add_free_space(ctl, last + 1, - BTRFS_LAST_FREE_OBJECTID - last - 1); + root->highest_objectid - last - 1); } spin_lock(&root->cache_lock); @@ -136,8 +136,10 @@ out: static void start_caching(struct btrfs_root *root) { + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct task_struct *tsk; int ret; + u64 objectid; spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { @@ -156,6 +158,19 @@ static void start_caching(struct btrfs_root *root) return; } + /* + * It can be quite time-consuming to fill the cache by searching + * through the extent tree, and this can keep ino allocation path + * waiting. Therefore at start we quickly find out the highest + * inode number and we know we can use inode numbers which fall in + * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID]. + */ + ret = btrfs_find_free_objectid(root, &objectid); + if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { + __btrfs_add_free_space(ctl, objectid, + BTRFS_LAST_FREE_OBJECTID - objectid + 1); + } + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", root->root_key.objectid); BUG_ON(IS_ERR(tsk)); @@ -209,7 +224,8 @@ again: start_caching(root); - if (objectid <= root->cache_progress) + if (objectid <= root->cache_progress || + objectid > root->highest_objectid) __btrfs_add_free_space(ctl, objectid, 1); else __btrfs_add_free_space(pinned, objectid, 1); -- cgit v1.2.3 From 174ba50915b08dcfd07c8b5fb795b46a165fa09a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 27 May 2011 10:03:58 -0400 Subject: Btrfs: use the device_list_mutex during write_dev_supers write_dev_supers was changed to use RCU to protect the list of devices, but it was then sleeping while it actually wrote the supers. This fixes it to just use the mutex, since we really don't any concurrency in write_dev_supers anyway. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b2588a552658..98b6a71decba 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2310,7 +2310,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; - rcu_read_lock(); + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); head = &root->fs_info->fs_devices->devices; list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { @@ -2355,7 +2355,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) if (ret) total_errors++; } - rcu_read_unlock(); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); -- cgit v1.2.3