summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-12-08 20:15:26 +0100
committerIngo Molnar <mingo@elte.hu>2010-12-08 20:15:29 +0100
commit8e9255e6a2141e050d51bc4d96dbef494a87d653 (patch)
treef190b142830153eaab05555a93c4f71a144ba3d4 /fs
parent5091faa449ee0b7d73bc296a93bca9540fc51d0a (diff)
parent6313e3c21743cc88bb5bd8aa72948ee1e83937b6 (diff)
downloadlinux-stable-8e9255e6a2141e050d51bc4d96dbef494a87d653.tar.gz
linux-stable-8e9255e6a2141e050d51bc4d96dbef494a87d653.tar.bz2
linux-stable-8e9255e6a2141e050d51bc4d96dbef494a87d653.zip
Merge branch 'linus' into sched/core
Merge reason: we want to queue up dependent cleanup Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs')
-rw-r--r--fs/autofs4/root.c12
-rw-r--r--fs/btrfs/compression.c15
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/disk-io.c38
-rw-r--r--fs/btrfs/export.c76
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_io.c77
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/file.c7
-rw-r--r--fs/btrfs/inode.c294
-rw-r--r--fs/btrfs/ioctl.c31
-rw-r--r--fs/btrfs/ordered-data.c67
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/super.c41
-rw-r--r--fs/btrfs/transaction.c5
-rw-r--r--fs/btrfs/tree-log.c21
-rw-r--r--fs/cifs/Kconfig8
-rw-r--r--fs/cifs/cifsacl.c48
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsproto.h6
-rw-r--r--fs/cifs/connect.c5
-rw-r--r--fs/cifs/dns_resolve.c2
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/fscache.c12
-rw-r--r--fs/cifs/inode.c36
-rw-r--r--fs/cifs/readdir.c29
-rw-r--r--fs/cifs/xattr.c55
-rw-r--r--fs/compat.c28
-rw-r--r--fs/exec.c36
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/gfs2/quota.c15
-rw-r--r--fs/ioprio.c31
-rw-r--r--fs/nfs/dir.c68
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/internal.h9
-rw-r--r--fs/nfs/nfs2xdr.c4
-rw-r--r--fs/nfs/nfs3xdr.c4
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nilfs2/dat.c2
-rw-r--r--fs/nilfs2/ioctl.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c14
-rw-r--r--fs/ocfs2/dcache.c1
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/ocfs2.h6
-rw-r--r--fs/ocfs2/stack_user.c2
-rw-r--r--fs/pipe.c14
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/reiserfs/ioctl.c7
-rw-r--r--fs/reiserfs/xattr_acl.c6
-rw-r--r--fs/splice.c24
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c94
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c35
-rw-r--r--fs/xfs/xfs_bmap.c85
-rw-r--r--fs/xfs/xfs_bmap.h5
-rw-r--r--fs/xfs/xfs_dfrag.c13
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_error.h5
-rw-r--r--fs/xfs/xfs_inode_item.c31
59 files changed, 1062 insertions, 414 deletions
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index d5c1401f0031..d34896cfb19f 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -980,19 +980,11 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
}
}
-static DEFINE_MUTEX(autofs4_ioctl_mutex);
-
static long autofs4_root_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
- long ret;
struct inode *inode = filp->f_dentry->d_inode;
-
- mutex_lock(&autofs4_ioctl_mutex);
- ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
- mutex_unlock(&autofs4_ioctl_mutex);
-
- return ret;
+ return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
}
#ifdef CONFIG_COMPAT
@@ -1002,13 +994,11 @@ static long autofs4_root_compat_ioctl(struct file *filp,
struct inode *inode = filp->f_path.dentry->d_inode;
int ret;
- mutex_lock(&autofs4_ioctl_mutex);
if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
else
ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
(unsigned long)compat_ptr(arg));
- mutex_unlock(&autofs4_ioctl_mutex);
return ret;
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 7845d1f7d1d9..b50bc4bd5c56 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -91,23 +91,10 @@ static inline int compressed_bio_size(struct btrfs_root *root,
static struct bio *compressed_bio_alloc(struct block_device *bdev,
u64 first_byte, gfp_t gfp_flags)
{
- struct bio *bio;
int nr_vecs;
nr_vecs = bio_get_nr_vecs(bdev);
- bio = bio_alloc(gfp_flags, nr_vecs);
-
- if (bio == NULL && (current->flags & PF_MEMALLOC)) {
- while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
- }
-
- if (bio) {
- bio->bi_size = 0;
- bio->bi_bdev = bdev;
- bio->bi_sector = first_byte >> 9;
- }
- return bio;
+ return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
}
static int check_compressed_csum(struct inode *inode,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8db9234f6b41..af52f6d7a4d8 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -808,9 +808,9 @@ struct btrfs_block_group_cache {
int extents_thresh;
int free_extents;
int total_bitmaps;
- int ro:1;
- int dirty:1;
- int iref:1;
+ unsigned int ro:1;
+ unsigned int dirty:1;
+ unsigned int iref:1;
int disk_cache_state;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index fb827d0d7181..c547cca26a26 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -28,6 +28,7 @@
#include <linux/freezer.h>
#include <linux/crc32c.h>
#include <linux/slab.h>
+#include <linux/migrate.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -355,6 +356,8 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
btrfs_header_generation(eb));
BUG_ON(ret);
+ WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
+
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
WARN_ON(1);
@@ -693,6 +696,29 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
__btree_submit_bio_done);
}
+static int btree_migratepage(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ /*
+ * we can't safely write a btree page from here,
+ * we haven't done the locking hook
+ */
+ if (PageDirty(page))
+ return -EAGAIN;
+ /*
+ * Buffers may be managed in a filesystem specific way.
+ * We must have no buffers or drop them.
+ */
+ if (page_has_private(page) &&
+ !try_to_release_page(page, GFP_KERNEL))
+ return -EAGAIN;
+#ifdef CONFIG_MIGRATION
+ return migrate_page(mapping, newpage, page);
+#else
+ return -ENOSYS;
+#endif
+}
+
static int btree_writepage(struct page *page, struct writeback_control *wbc)
{
struct extent_io_tree *tree;
@@ -707,8 +733,7 @@ static int btree_writepage(struct page *page, struct writeback_control *wbc)
}
redirty_page_for_writepage(wbc, page);
- eb = btrfs_find_tree_block(root, page_offset(page),
- PAGE_CACHE_SIZE);
+ eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
WARN_ON(!eb);
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
@@ -799,6 +824,9 @@ static const struct address_space_operations btree_aops = {
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
.sync_page = block_sync_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = btree_migratepage,
+#endif
};
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -1538,10 +1566,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
GFP_NOFS);
struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
- struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
- GFP_NOFS);
- struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
- GFP_NOFS);
+ struct btrfs_root *tree_root = btrfs_sb(sb);
+ struct btrfs_fs_info *fs_info = tree_root->fs_info;
struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 951ef09b82f4..6f0444473594 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -232,9 +232,85 @@ fail:
return ERR_PTR(ret);
}
+static int btrfs_get_name(struct dentry *parent, char *name,
+ struct dentry *child)
+{
+ struct inode *inode = child->d_inode;
+ struct inode *dir = parent->d_inode;
+ struct btrfs_path *path;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
+ struct btrfs_inode_ref *iref;
+ struct btrfs_root_ref *rref;
+ struct extent_buffer *leaf;
+ unsigned long name_ptr;
+ struct btrfs_key key;
+ int name_len;
+ int ret;
+
+ if (!dir || !inode)
+ return -EINVAL;
+
+ if (!S_ISDIR(dir->i_mode))
+ return -EINVAL;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->leave_spinning = 1;
+
+ if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ key.objectid = BTRFS_I(inode)->root->root_key.objectid;
+ key.type = BTRFS_ROOT_BACKREF_KEY;
+ key.offset = (u64)-1;
+ root = root->fs_info->tree_root;
+ } else {
+ key.objectid = inode->i_ino;
+ key.offset = dir->i_ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ }
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ btrfs_free_path(path);
+ return ret;
+ } else if (ret > 0) {
+ if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ path->slots[0]--;
+ } else {
+ btrfs_free_path(path);
+ return -ENOENT;
+ }
+ }
+ leaf = path->nodes[0];
+
+ if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+ rref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_root_ref);
+ name_ptr = (unsigned long)(rref + 1);
+ name_len = btrfs_root_ref_name_len(leaf, rref);
+ } else {
+ iref = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_inode_ref);
+ name_ptr = (unsigned long)(iref + 1);
+ name_len = btrfs_inode_ref_name_len(leaf, iref);
+ }
+
+ read_extent_buffer(leaf, name, name_ptr, name_len);
+ btrfs_free_path(path);
+
+ /*
+ * have to add the null termination to make sure that reconnect_path
+ * gets the right len for strlen
+ */
+ name[name_len] = '\0';
+
+ return 0;
+}
+
const struct export_operations btrfs_export_ops = {
.encode_fh = btrfs_encode_fh,
.fh_to_dentry = btrfs_fh_to_dentry,
.fh_to_parent = btrfs_fh_to_parent,
.get_parent = btrfs_get_parent,
+ .get_name = btrfs_get_name,
};
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0c097f3aec41..bcd59c7dfb57 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3412,7 +3412,7 @@ again:
* our reservation.
*/
if (unused <= space_info->total_bytes) {
- unused -= space_info->total_bytes;
+ unused = space_info->total_bytes - unused;
if (unused >= num_bytes) {
if (!reserved)
space_info->bytes_reserved += orig_bytes;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index eac10e3260a9..3e86b9f36507 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1828,9 +1828,9 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
bio_put(bio);
}
-static struct bio *
-extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
- gfp_t gfp_flags)
+struct bio *
+btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
+ gfp_t gfp_flags)
{
struct bio *bio;
@@ -1919,7 +1919,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
else
nr = bio_get_nr_vecs(bdev);
- bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+ bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
bio_add_page(bio, page, page_size, offset);
bio->bi_end_io = end_io_func;
@@ -2901,21 +2901,53 @@ out:
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent)
{
- int ret;
+ int ret = 0;
u64 off = start;
u64 max = start + len;
u32 flags = 0;
+ u32 found_type;
+ u64 last;
u64 disko = 0;
+ struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
+ struct btrfs_path *path;
+ struct btrfs_file_extent_item *item;
int end = 0;
u64 em_start = 0, em_len = 0;
unsigned long emflags;
- ret = 0;
+ int hole = 0;
if (len == 0)
return -EINVAL;
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ path->leave_spinning = 1;
+
+ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
+ path, inode->i_ino, -1, 0);
+ if (ret < 0) {
+ btrfs_free_path(path);
+ return ret;
+ }
+ WARN_ON(!ret);
+ path->slots[0]--;
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
+ found_type = btrfs_key_type(&found_key);
+
+ /* No extents, just return */
+ if (found_key.objectid != inode->i_ino ||
+ found_type != BTRFS_EXTENT_DATA_KEY) {
+ btrfs_free_path(path);
+ return 0;
+ }
+ last = found_key.offset;
+ btrfs_free_path(path);
+
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
&cached_state, GFP_NOFS);
em = get_extent(inode, NULL, 0, off, max - off, 0);
@@ -2925,11 +2957,18 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
ret = PTR_ERR(em);
goto out;
}
+
while (!end) {
+ hole = 0;
off = em->start + em->len;
if (off >= max)
end = 1;
+ if (em->block_start == EXTENT_MAP_HOLE) {
+ hole = 1;
+ goto next;
+ }
+
em_start = em->start;
em_len = em->len;
@@ -2939,8 +2978,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (em->block_start == EXTENT_MAP_LAST_BYTE) {
end = 1;
flags |= FIEMAP_EXTENT_LAST;
- } else if (em->block_start == EXTENT_MAP_HOLE) {
- flags |= FIEMAP_EXTENT_UNWRITTEN;
} else if (em->block_start == EXTENT_MAP_INLINE) {
flags |= (FIEMAP_EXTENT_DATA_INLINE |
FIEMAP_EXTENT_NOT_ALIGNED);
@@ -2953,10 +2990,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
flags |= FIEMAP_EXTENT_ENCODED;
+next:
emflags = em->flags;
free_extent_map(em);
em = NULL;
-
if (!end) {
em = get_extent(inode, NULL, 0, off, max - off, 0);
if (!em)
@@ -2967,15 +3004,23 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
emflags = em->flags;
}
+
if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
- ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
- em_len, flags);
- if (ret)
- goto out_free;
+ if (em_start == last) {
+ flags |= FIEMAP_EXTENT_LAST;
+ end = 1;
+ }
+
+ if (!hole) {
+ ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
+ em_len, flags);
+ if (ret)
+ goto out_free;
+ }
}
out_free:
free_extent_map(em);
@@ -3836,8 +3881,10 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
spin_lock(&tree->buffer_lock);
eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
- if (!eb)
- goto out;
+ if (!eb) {
+ spin_unlock(&tree->buffer_lock);
+ return ret;
+ }
if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
ret = 0;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 1c6d4f342ef7..4183c8178f01 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -310,4 +310,7 @@ int extent_clear_unlock_delalloc(struct inode *inode,
struct extent_io_tree *tree,
u64 start, u64 end, struct page *locked_page,
unsigned long op);
+struct bio *
+btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
+ gfp_t gfp_flags);
#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e354c33df082..c1faded5fca0 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1047,8 +1047,14 @@ out:
if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ num_written = PTR_ERR(trans);
+ goto done;
+ }
+ mutex_lock(&inode->i_mutex);
ret = btrfs_log_dentry_safe(trans, root,
file->f_dentry);
+ mutex_unlock(&inode->i_mutex);
if (ret == 0) {
ret = btrfs_sync_log(trans, root);
if (ret == 0)
@@ -1067,6 +1073,7 @@ out:
(start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
}
}
+done:
current->backing_dev_info = NULL;
return num_written ? num_written : err;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 558cac2dfa54..8039390bd6a6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4501,6 +4501,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
BTRFS_I(inode)->index_cnt = 2;
BTRFS_I(inode)->root = root;
BTRFS_I(inode)->generation = trans->transid;
+ inode->i_generation = BTRFS_I(inode)->generation;
btrfs_set_inode_space_info(root, inode);
if (mode & S_IFDIR)
@@ -4622,12 +4623,12 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
}
static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
- struct dentry *dentry, struct inode *inode,
- int backref, u64 index)
+ struct inode *dir, struct dentry *dentry,
+ struct inode *inode, int backref, u64 index)
{
- int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
- inode, dentry->d_name.name,
- dentry->d_name.len, backref, index);
+ int err = btrfs_add_link(trans, dir, inode,
+ dentry->d_name.name, dentry->d_name.len,
+ backref, index);
if (!err) {
d_instantiate(dentry, inode);
return 0;
@@ -4668,8 +4669,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
btrfs_set_trans_block_group(trans, dir);
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len,
- dentry->d_parent->d_inode->i_ino, objectid,
+ dentry->d_name.len, dir->i_ino, objectid,
BTRFS_I(dir)->block_group, mode, &index);
err = PTR_ERR(inode);
if (IS_ERR(inode))
@@ -4682,7 +4682,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
}
btrfs_set_trans_block_group(trans, inode);
- err = btrfs_add_nondir(trans, dentry, inode, 0, index);
+ err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
@@ -4730,10 +4730,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
btrfs_set_trans_block_group(trans, dir);
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len,
- dentry->d_parent->d_inode->i_ino,
- objectid, BTRFS_I(dir)->block_group, mode,
- &index);
+ dentry->d_name.len, dir->i_ino, objectid,
+ BTRFS_I(dir)->block_group, mode, &index);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_unlock;
@@ -4745,7 +4743,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
}
btrfs_set_trans_block_group(trans, inode);
- err = btrfs_add_nondir(trans, dentry, inode, 0, index);
+ err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
@@ -4787,6 +4785,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
return -EPERM;
btrfs_inc_nlink(inode);
+ inode->i_ctime = CURRENT_TIME;
err = btrfs_set_inode_index(dir, &index);
if (err)
@@ -4805,15 +4804,17 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_set_trans_block_group(trans, dir);
ihold(inode);
- err = btrfs_add_nondir(trans, dentry, inode, 1, index);
+ err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
if (err) {
drop_inode = 1;
} else {
+ struct dentry *parent = dget_parent(dentry);
btrfs_update_inode_block_group(trans, dir);
err = btrfs_update_inode(trans, root, inode);
BUG_ON(err);
- btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
+ btrfs_log_new_name(trans, inode, NULL, parent);
+ dput(parent);
}
nr = trans->blocks_used;
@@ -4853,8 +4854,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
btrfs_set_trans_block_group(trans, dir);
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len,
- dentry->d_parent->d_inode->i_ino, objectid,
+ dentry->d_name.len, dir->i_ino, objectid,
BTRFS_I(dir)->block_group, S_IFDIR | mode,
&index);
if (IS_ERR(inode)) {
@@ -4877,9 +4877,8 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (err)
goto out_fail;
- err = btrfs_add_link(trans, dentry->d_parent->d_inode,
- inode, dentry->d_name.name,
- dentry->d_name.len, 0, index);
+ err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
+ dentry->d_name.len, 0, index);
if (err)
goto out_fail;
@@ -5535,13 +5534,21 @@ struct btrfs_dio_private {
u64 bytes;
u32 *csums;
void *private;
+
+ /* number of bios pending for this dio */
+ atomic_t pending_bios;
+
+ /* IO errors */
+ int errors;
+
+ struct bio *orig_bio;
};
static void btrfs_endio_direct_read(struct bio *bio, int err)
{
+ struct btrfs_dio_private *dip = bio->bi_private;
struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
struct bio_vec *bvec = bio->bi_io_vec;
- struct btrfs_dio_private *dip = bio->bi_private;
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start;
@@ -5595,15 +5602,18 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered = NULL;
struct extent_state *cached_state = NULL;
+ u64 ordered_offset = dip->logical_offset;
+ u64 ordered_bytes = dip->bytes;
int ret;
if (err)
goto out_done;
-
- ret = btrfs_dec_test_ordered_pending(inode, &ordered,
- dip->logical_offset, dip->bytes);
+again:
+ ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
+ &ordered_offset,
+ ordered_bytes);
if (!ret)
- goto out_done;
+ goto out_test;
BUG_ON(!ordered);
@@ -5663,8 +5673,20 @@ out_unlock:
out:
btrfs_delalloc_release_metadata(inode, ordered->len);
btrfs_end_transaction(trans, root);
+ ordered_offset = ordered->file_offset + ordered->len;
btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered);
+
+out_test:
+ /*
+ * our bio might span multiple ordered extents. If we haven't
+ * completed the accounting for the whole dio, go back and try again
+ */
+ if (ordered_offset < dip->logical_offset + dip->bytes) {
+ ordered_bytes = dip->logical_offset + dip->bytes -
+ ordered_offset;
+ goto again;
+ }
out_done:
bio->bi_private = dip->private;
@@ -5684,6 +5706,176 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
return 0;
}
+static void btrfs_end_dio_bio(struct bio *bio, int err)
+{
+ struct btrfs_dio_private *dip = bio->bi_private;
+
+ if (err) {
+ printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
+ "disk_bytenr %lu len %u err no %d\n",
+ dip->inode->i_ino, bio->bi_rw, bio->bi_sector,
+ bio->bi_size, err);
+ dip->errors = 1;
+
+ /*
+ * before atomic variable goto zero, we must make sure
+ * dip->errors is perceived to be set.
+ */
+ smp_mb__before_atomic_dec();
+ }
+
+ /* if there are more bios still pending for this dio, just exit */
+ if (!atomic_dec_and_test(&dip->pending_bios))
+ goto out;
+
+ if (dip->errors)
+ bio_io_error(dip->orig_bio);
+ else {
+ set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
+ bio_endio(dip->orig_bio, 0);
+ }
+out:
+ bio_put(bio);
+}
+
+static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
+ u64 first_sector, gfp_t gfp_flags)
+{
+ int nr_vecs = bio_get_nr_vecs(bdev);
+ return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
+}
+
+static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
+ int rw, u64 file_offset, int skip_sum,
+ u32 *csums)
+{
+ int write = rw & REQ_WRITE;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ bio_get(bio);
+ ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
+ if (ret)
+ goto err;
+
+ if (write && !skip_sum) {
+ ret = btrfs_wq_submit_bio(root->fs_info,
+ inode, rw, bio, 0, 0,
+ file_offset,
+ __btrfs_submit_bio_start_direct_io,
+ __btrfs_submit_bio_done);
+ goto err;
+ } else if (!skip_sum)
+ btrfs_lookup_bio_sums_dio(root, inode, bio,
+ file_offset, csums);
+
+ ret = btrfs_map_bio(root, rw, bio, 0, 1);
+err:
+ bio_put(bio);
+ return ret;
+}
+
+static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
+ int skip_sum)
+{
+ struct inode *inode = dip->inode;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+ struct bio *bio;
+ struct bio *orig_bio = dip->orig_bio;
+ struct bio_vec *bvec = orig_bio->bi_io_vec;
+ u64 start_sector = orig_bio->bi_sector;
+ u64 file_offset = dip->logical_offset;
+ u64 submit_len = 0;
+ u64 map_length;
+ int nr_pages = 0;
+ u32 *csums = dip->csums;
+ int ret = 0;
+
+ bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
+ if (!bio)
+ return -ENOMEM;
+ bio->bi_private = dip;
+ bio->bi_end_io = btrfs_end_dio_bio;
+ atomic_inc(&dip->pending_bios);
+
+ map_length = orig_bio->bi_size;
+ ret = btrfs_map_block(map_tree, READ, start_sector << 9,
+ &map_length, NULL, 0);
+ if (ret) {
+ bio_put(bio);
+ return -EIO;
+ }
+
+ while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
+ if (unlikely(map_length < submit_len + bvec->bv_len ||
+ bio_add_page(bio, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset) < bvec->bv_len)) {
+ /*
+ * inc the count before we submit the bio so
+ * we know the end IO handler won't happen before
+ * we inc the count. Otherwise, the dip might get freed
+ * before we're done setting it up
+ */
+ atomic_inc(&dip->pending_bios);
+ ret = __btrfs_submit_dio_bio(bio, inode, rw,
+ file_offset, skip_sum,
+ csums);
+ if (ret) {
+ bio_put(bio);
+ atomic_dec(&dip->pending_bios);
+ goto out_err;
+ }
+
+ if (!skip_sum)
+ csums = csums + nr_pages;
+ start_sector += submit_len >> 9;
+ file_offset += submit_len;
+
+ submit_len = 0;
+ nr_pages = 0;
+
+ bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
+ start_sector, GFP_NOFS);
+ if (!bio)
+ goto out_err;
+ bio->bi_private = dip;
+ bio->bi_end_io = btrfs_end_dio_bio;
+
+ map_length = orig_bio->bi_size;
+ ret = btrfs_map_block(map_tree, READ, start_sector << 9,
+ &map_length, NULL, 0);
+ if (ret) {
+ bio_put(bio);
+ goto out_err;
+ }
+ } else {
+ submit_len += bvec->bv_len;
+ nr_pages ++;
+ bvec++;
+ }
+ }
+
+ ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
+ csums);
+ if (!ret)
+ return 0;
+
+ bio_put(bio);
+out_err:
+ dip->errors = 1;
+ /*
+ * before atomic variable goto zero, we must
+ * make sure dip->errors is perceived to be set.
+ */
+ smp_mb__before_atomic_dec();
+ if (atomic_dec_and_test(&dip->pending_bios))
+ bio_io_error(dip->orig_bio);
+
+ /* bio_end_io() will handle error, so we needn't return it */
+ return 0;
+}
+
static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset)
{
@@ -5723,36 +5915,18 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
dip->disk_bytenr = (u64)bio->bi_sector << 9;
bio->bi_private = dip;
+ dip->errors = 0;
+ dip->orig_bio = bio;
+ atomic_set(&dip->pending_bios, 0);
if (write)
bio->bi_end_io = btrfs_endio_direct_write;
else
bio->bi_end_io = btrfs_endio_direct_read;
- ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
- if (ret)
- goto out_err;
-
- if (write && !skip_sum) {
- ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, rw, bio, 0, 0,
- dip->logical_offset,
- __btrfs_submit_bio_start_direct_io,
- __btrfs_submit_bio_done);
- if (ret)
- goto out_err;
+ ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
+ if (!ret)
return;
- } else if (!skip_sum)
- btrfs_lookup_bio_sums_dio(root, inode, bio,
- dip->logical_offset, dip->csums);
-
- ret = btrfs_map_bio(root, rw, bio, 0, 1);
- if (ret)
- goto out_err;
- return;
-out_err:
- kfree(dip->csums);
- kfree(dip);
free_ordered:
/*
* If this is a write, we need to clean up the reserved space and kill
@@ -6607,8 +6781,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
BUG_ON(ret);
if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
- btrfs_log_new_name(trans, old_inode, old_dir,
- new_dentry->d_parent);
+ struct dentry *parent = dget_parent(new_dentry);
+ btrfs_log_new_name(trans, old_inode, old_dir, parent);
+ dput(parent);
btrfs_end_log_trans(root);
}
out_fail:
@@ -6758,8 +6933,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
btrfs_set_trans_block_group(trans, dir);
inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
- dentry->d_name.len,
- dentry->d_parent->d_inode->i_ino, objectid,
+ dentry->d_name.len, dir->i_ino, objectid,
BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
&index);
err = PTR_ERR(inode);
@@ -6773,7 +6947,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
}
btrfs_set_trans_block_group(trans, inode);
- err = btrfs_add_nondir(trans, dentry, inode, 0, index);
+ err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
@@ -6844,6 +7018,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
+ u64 i_size;
int ret = 0;
bool own_trans = true;
@@ -6885,11 +7060,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
(actual_len > inode->i_size) &&
(cur_offset > inode->i_size)) {
if (cur_offset > actual_len)
- i_size_write(inode, actual_len);
+ i_size = actual_len;
else
- i_size_write(inode, cur_offset);
- i_size_write(inode, cur_offset);
- btrfs_ordered_update_i_size(inode, cur_offset, NULL);
+ i_size = cur_offset;
+ i_size_write(inode, i_size);
+ btrfs_ordered_update_i_size(inode, i_size, NULL);
}
ret = btrfs_update_inode(trans, root, inode);
@@ -6943,6 +7118,10 @@ static long btrfs_fallocate(struct inode *inode, int mode,
btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
mutex_lock(&inode->i_mutex);
+ ret = inode_newsize_ok(inode, alloc_end);
+ if (ret)
+ goto out;
+
if (alloc_start > inode->i_size) {
ret = btrfs_cont_expand(inode, alloc_start);
if (ret)
@@ -7139,6 +7318,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
+ .getattr = btrfs_getattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 463d91b4dd3a..f1c9bb4079ed 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -233,7 +233,8 @@ static noinline int create_subvol(struct btrfs_root *root,
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
struct btrfs_root *new_root;
- struct inode *dir = dentry->d_parent->d_inode;
+ struct dentry *parent = dget_parent(dentry);
+ struct inode *dir;
int ret;
int err;
u64 objectid;
@@ -242,8 +243,13 @@ static noinline int create_subvol(struct btrfs_root *root,
ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
0, &objectid);
- if (ret)
+ if (ret) {
+ dput(parent);
return ret;
+ }
+
+ dir = parent->d_inode;
+
/*
* 1 - inode item
* 2 - refs
@@ -251,8 +257,10 @@ static noinline int create_subvol(struct btrfs_root *root,
* 2 - dir items
*/
trans = btrfs_start_transaction(root, 6);
- if (IS_ERR(trans))
+ if (IS_ERR(trans)) {
+ dput(parent);
return PTR_ERR(trans);
+ }
leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
0, objectid, NULL, 0, 0, 0);
@@ -339,6 +347,7 @@ static noinline int create_subvol(struct btrfs_root *root,
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
fail:
+ dput(parent);
if (async_transid) {
*async_transid = trans->transid;
err = btrfs_commit_transaction_async(trans, root, 1);
@@ -354,6 +363,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
char *name, int namelen, u64 *async_transid)
{
struct inode *inode;
+ struct dentry *parent;
struct btrfs_pending_snapshot *pending_snapshot;
struct btrfs_trans_handle *trans;
int ret;
@@ -396,7 +406,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
btrfs_orphan_cleanup(pending_snapshot->snap);
- inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
+ parent = dget_parent(dentry);
+ inode = btrfs_lookup_dentry(parent->d_inode, dentry);
+ dput(parent);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto fail;
@@ -1669,12 +1681,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
olen = len = src->i_size - off;
/* if we extend to eof, continue to block boundary */
if (off + len == src->i_size)
- len = ((src->i_size + bs-1) & ~(bs-1))
- - off;
+ len = ALIGN(src->i_size, bs) - off;
/* verify the end result is block aligned */
- if ((off & (bs-1)) ||
- ((off + len) & (bs-1)))
+ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
+ !IS_ALIGNED(destoff, bs))
goto out_unlock;
/* do any pending delalloc/csum calc on src, one way or
@@ -1874,8 +1885,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
* but shouldn't round up the file size
*/
endoff = new_key.offset + datal;
- if (endoff > off+olen)
- endoff = off+olen;
+ if (endoff > destoff+olen)
+ endoff = destoff+olen;
if (endoff > inode->i_size)
btrfs_i_size_write(inode, endoff);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index f4621f6deca1..ae7737e352c9 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -250,6 +250,73 @@ int btrfs_add_ordered_sum(struct inode *inode,
/*
* this is used to account for finished IO across a given range
+ * of the file. The IO may span ordered extents. If
+ * a given ordered_extent is completely done, 1 is returned, otherwise
+ * 0.
+ *
+ * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
+ * to make sure this function only returns 1 once for a given ordered extent.
+ *
+ * file_offset is updated to one byte past the range that is recorded as
+ * complete. This allows you to walk forward in the file.
+ */
+int btrfs_dec_test_first_ordered_pending(struct inode *inode,
+ struct btrfs_ordered_extent **cached,
+ u64 *file_offset, u64 io_size)
+{
+ struct btrfs_ordered_inode_tree *tree;
+ struct rb_node *node;
+ struct btrfs_ordered_extent *entry = NULL;
+ int ret;
+ u64 dec_end;
+ u64 dec_start;
+ u64 to_dec;
+
+ tree = &BTRFS_I(inode)->ordered_tree;
+ spin_lock(&tree->lock);
+ node = tree_search(tree, *file_offset);
+ if (!node) {
+ ret = 1;
+ goto out;
+ }
+
+ entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
+ if (!offset_in_entry(entry, *file_offset)) {
+ ret = 1;
+ goto out;
+ }
+
+ dec_start = max(*file_offset, entry->file_offset);
+ dec_end = min(*file_offset + io_size, entry->file_offset +
+ entry->len);
+ *file_offset = dec_end;
+ if (dec_start > dec_end) {
+ printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
+ (unsigned long long)dec_start,
+ (unsigned long long)dec_end);
+ }
+ to_dec = dec_end - dec_start;
+ if (to_dec > entry->bytes_left) {
+ printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
+ (unsigned long long)entry->bytes_left,
+ (unsigned long long)to_dec);
+ }
+ entry->bytes_left -= to_dec;
+ if (entry->bytes_left == 0)
+ ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
+ else
+ ret = 1;
+out:
+ if (!ret && cached && entry) {
+ *cached = entry;
+ atomic_inc(&entry->refs);
+ }
+ spin_unlock(&tree->lock);
+ return ret == 0;
+}
+
+/*
+ * this is used to account for finished IO across a given range
* of the file. The IO should not span ordered extents. If
* a given ordered_extent is completely done, 1 is returned, otherwise
* 0.
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 8ac365492a3f..61dca83119dd 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -141,6 +141,9 @@ int btrfs_remove_ordered_extent(struct inode *inode,
int btrfs_dec_test_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);
+int btrfs_dec_test_first_ordered_pending(struct inode *inode,
+ struct btrfs_ordered_extent **cached,
+ u64 *file_offset, u64 io_size);
int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, int type);
int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8299a25ffc8f..dbb51ea7a13c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -244,6 +244,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_space_cache:
printk(KERN_INFO "btrfs: enabling disk space caching\n");
btrfs_set_opt(info->mount_opt, SPACE_CACHE);
+ break;
case Opt_clear_cache:
printk(KERN_INFO "btrfs: force clearing of disk cache\n");
btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
@@ -562,12 +563,26 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
static int btrfs_test_super(struct super_block *s, void *data)
{
- struct btrfs_fs_devices *test_fs_devices = data;
+ struct btrfs_root *test_root = data;
struct btrfs_root *root = btrfs_sb(s);
- return root->fs_info->fs_devices == test_fs_devices;
+ /*
+ * If this super block is going away, return false as it
+ * can't match as an existing super block.
+ */
+ if (!atomic_read(&s->s_active))
+ return 0;
+ return root->fs_info->fs_devices == test_root->fs_info->fs_devices;
+}
+
+static int btrfs_set_super(struct super_block *s, void *data)
+{
+ s->s_fs_info = data;
+
+ return set_anon_super(s, data);
}
+
/*
* Find a superblock for the given device / mount point.
*
@@ -581,6 +596,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
struct super_block *s;
struct dentry *root;
struct btrfs_fs_devices *fs_devices = NULL;
+ struct btrfs_root *tree_root = NULL;
+ struct btrfs_fs_info *fs_info = NULL;
fmode_t mode = FMODE_READ;
char *subvol_name = NULL;
u64 subvol_objectid = 0;
@@ -608,8 +625,24 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
goto error_close_devices;
}
+ /*
+ * Setup a dummy root and fs_info for test/set super. This is because
+ * we don't actually fill this stuff out until open_ctree, but we need
+ * it for searching for existing supers, so this lets us do that and
+ * then open_ctree will properly initialize everything later.
+ */
+ fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
+ tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+ if (!fs_info || !tree_root) {
+ error = -ENOMEM;
+ goto error_close_devices;
+ }
+ fs_info->tree_root = tree_root;
+ fs_info->fs_devices = fs_devices;
+ tree_root->fs_info = fs_info;
+
bdev = fs_devices->latest_bdev;
- s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices);
+ s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root);
if (IS_ERR(s))
goto error_s;
@@ -675,6 +708,8 @@ error_s:
error = PTR_ERR(s);
error_close_devices:
btrfs_close_devices(fs_devices);
+ kfree(fs_info);
+ kfree(tree_root);
error_free_subvol_name:
kfree(subvol_name);
return ERR_PTR(error);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 1fffbc017bdf..f50e931fc217 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -902,6 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_root *root = pending->root;
struct btrfs_root *parent_root;
struct inode *parent_inode;
+ struct dentry *parent;
struct dentry *dentry;
struct extent_buffer *tmp;
struct extent_buffer *old;
@@ -941,7 +942,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
trans->block_rsv = &pending->block_rsv;
dentry = pending->dentry;
- parent_inode = dentry->d_parent->d_inode;
+ parent = dget_parent(dentry);
+ parent_inode = parent->d_inode;
parent_root = BTRFS_I(parent_inode)->root;
record_root_in_trans(trans, parent_root);
@@ -989,6 +991,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
parent_inode->i_ino, index,
dentry->d_name.name, dentry->d_name.len);
BUG_ON(ret);
+ dput(parent);
key.offset = (u64)-1;
pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a29f19384a27..054744ac5719 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2869,6 +2869,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
{
int ret = 0;
struct btrfs_root *root;
+ struct dentry *old_parent = NULL;
/*
* for regular files, if its inode is already on disk, we don't
@@ -2910,10 +2911,13 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
if (IS_ROOT(parent))
break;
- parent = parent->d_parent;
+ parent = dget_parent(parent);
+ dput(old_parent);
+ old_parent = parent;
inode = parent->d_inode;
}
+ dput(old_parent);
out:
return ret;
}
@@ -2945,6 +2949,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
{
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
+ struct dentry *old_parent = NULL;
int ret = 0;
u64 last_committed = root->fs_info->last_trans_committed;
@@ -3016,10 +3021,13 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
if (IS_ROOT(parent))
break;
- parent = parent->d_parent;
+ parent = dget_parent(parent);
+ dput(old_parent);
+ old_parent = parent;
}
ret = 0;
end_trans:
+ dput(old_parent);
if (ret < 0) {
BUG_ON(ret != -ENOSPC);
root->fs_info->last_trans_log_full_commit = trans->transid;
@@ -3039,8 +3047,13 @@ end_no_trans:
int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct dentry *dentry)
{
- return btrfs_log_inode_parent(trans, root, dentry->d_inode,
- dentry->d_parent, 0);
+ struct dentry *parent = dget_parent(dentry);
+ int ret;
+
+ ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
+ dput(parent);
+
+ return ret;
}
/*
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 0ed213970ced..ee45648b0d1a 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -4,6 +4,7 @@ config CIFS
select NLS
select CRYPTO
select CRYPTO_MD5
+ select CRYPTO_HMAC
select CRYPTO_ARC4
help
This is the client VFS module for the Common Internet File System
@@ -143,6 +144,13 @@ config CIFS_FSCACHE
to be cached locally on disk through the general filesystem cache
manager. If unsure, say N.
+config CIFS_ACL
+ bool "Provide CIFS ACL support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && CIFS_XATTR
+ help
+ Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
+ is handed over to the application/caller.
+
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index c9b4792ae825..c6ebea088ac7 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -560,7 +560,7 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
- return NULL;
+ return ERR_CAST(tlink);
xid = GetXid();
rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
@@ -568,7 +568,9 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
cifs_put_tlink(tlink);
- cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
+ cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
+ if (rc)
+ return ERR_PTR(rc);
return pntsd;
}
@@ -583,7 +585,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
- return NULL;
+ return ERR_CAST(tlink);
tcon = tlink_tcon(tlink);
xid = GetXid();
@@ -591,23 +593,22 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
&fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc) {
- cERROR(1, "Unable to open file to get ACL");
- goto out;
+ if (!rc) {
+ rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
+ CIFSSMBClose(xid, tcon, fid);
}
- rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
- cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
-
- CIFSSMBClose(xid, tcon, fid);
- out:
cifs_put_tlink(tlink);
FreeXid(xid);
+
+ cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
+ if (rc)
+ return ERR_PTR(rc);
return pntsd;
}
/* Retrieve an ACL from the server */
-static struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
+struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
struct inode *inode, const char *path,
u32 *pacllen)
{
@@ -695,7 +696,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
}
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
-void
+int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
struct inode *inode, const char *path, const __u16 *pfid)
{
@@ -711,17 +712,21 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
- if (pntsd)
+ if (IS_ERR(pntsd)) {
+ rc = PTR_ERR(pntsd);
+ cERROR(1, "%s: error %d getting sec desc", __func__, rc);
+ } else {
rc = parse_sec_desc(pntsd, acllen, fattr);
- if (rc)
- cFYI(1, "parse sec desc failed rc = %d", rc);
+ kfree(pntsd);
+ if (rc)
+ cERROR(1, "parse sec desc failed rc = %d", rc);
+ }
- kfree(pntsd);
- return;
+ return rc;
}
/* Convert mode bits to an ACL so we can update the ACL on the server */
-int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
+int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
{
int rc = 0;
__u32 secdesclen = 0;
@@ -736,7 +741,10 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
/* Add three ACEs for owner, group, everyone getting rid of
other ACEs as chmod disables ACEs and set the security descriptor */
- if (pntsd) {
+ if (IS_ERR(pntsd)) {
+ rc = PTR_ERR(pntsd);
+ cERROR(1, "%s: error %d getting sec desc", __func__, rc);
+ } else {
/* allocate memory for the smb header,
set security descriptor request security descriptor
parameters, and secuirty descriptor itself */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9c3789762ab7..76c8a906a63e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -458,6 +458,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
seq_printf(s, ",acl");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
seq_printf(s, ",mfsymlinks");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
+ seq_printf(s, ",fsc");
seq_printf(s, ",rsize=%d", cifs_sb->rsize);
seq_printf(s, ",wsize=%d", cifs_sb->wsize);
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7ed69b6b5fe6..db961dc4fd3d 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -130,10 +130,12 @@ extern int cifs_get_file_info_unix(struct file *filp);
extern int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path,
struct super_block *sb, int xid);
-extern void cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
+extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode,
const char *path, const __u16 *pfid);
-extern int mode_to_acl(struct inode *inode, const char *path, __u64);
+extern int mode_to_cifs_acl(struct inode *inode, const char *path, __u64);
+extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
+ const char *, u32 *);
extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
const char *);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 251a17c03545..32fa4d9b5dbc 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1352,6 +1352,11 @@ cifs_parse_mount_options(char *options, const char *devname,
"supported. Instead set "
"/proc/fs/cifs/LookupCacheEnabled to 0\n");
} else if (strnicmp(data, "fsc", 3) == 0) {
+#ifndef CONFIG_CIFS_FSCACHE
+ cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE"
+ "kernel config option set");
+ return 1;
+#endif
vol->fsc = true;
} else if (strnicmp(data, "mfsymlinks", 10) == 0) {
vol->mfsymlinks = true;
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 0eb87026cad3..548f06230a6d 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -66,7 +66,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
/* Search for server name delimiter */
sep = memchr(hostname, '\\', len);
if (sep)
- len = sep - unc;
+ len = sep - hostname;
else
cFYI(1, "%s: probably server name is whole unc: %s",
__func__, unc);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 06c3e83fa387..b857ce5db775 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2271,8 +2271,10 @@ void cifs_oplock_break_get(struct cifsFileInfo *cfile)
void cifs_oplock_break_put(struct cifsFileInfo *cfile)
{
+ struct super_block *sb = cfile->dentry->d_sb;
+
cifsFileInfo_put(cfile);
- cifs_sb_deactive(cfile->dentry->d_sb);
+ cifs_sb_deactive(sb);
}
const struct address_space_operations cifs_addr_ops = {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index a2ad94efcfe6..297a43d0ff7f 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -2,7 +2,7 @@
* fs/cifs/fscache.c - CIFS filesystem cache interface
*
* Copyright (c) 2010 Novell, Inc.
- * Author(s): Suresh Jayaraman (sjayaraman@suse.de>
+ * Author(s): Suresh Jayaraman <sjayaraman@suse.de>
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
@@ -67,10 +67,12 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
if (cifsi->fscache)
return;
- cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
+ cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
&cifs_fscache_inode_object_def, cifsi);
- cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
+ cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
cifsi->fscache);
+ }
}
void cifs_fscache_release_inode_cookie(struct inode *inode)
@@ -101,10 +103,8 @@ void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
{
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
cifs_fscache_disable_inode_cookie(inode);
- else {
+ else
cifs_fscache_enable_inode_cookie(inode);
- cFYI(1, "CIFS: fscache inode cookie set");
- }
}
void cifs_fscache_reset_inode_cookie(struct inode *inode)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ef3a55bf86b6..28cb6e735943 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -689,8 +689,13 @@ int cifs_get_inode_info(struct inode **pinode,
#ifdef CONFIG_CIFS_EXPERIMENTAL
/* fill in 0777 bits from ACL */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- cFYI(1, "Getting mode bits from ACL");
- cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid);
+ rc = cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path,
+ pfid);
+ if (rc) {
+ cFYI(1, "%s: Getting ACL failed with error: %d",
+ __func__, rc);
+ goto cgii_exit;
+ }
}
#endif
@@ -881,8 +886,10 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
- if (!inode)
- return ERR_PTR(rc);
+ if (!inode) {
+ inode = ERR_PTR(rc);
+ goto out;
+ }
#ifdef CONFIG_CIFS_FSCACHE
/* populate tcon->resource_id */
@@ -898,13 +905,11 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
} else if (rc) {
- kfree(full_path);
- _FreeXid(xid);
iget_failed(inode);
- return ERR_PTR(rc);
+ inode = ERR_PTR(rc);
}
-
+out:
kfree(full_path);
/* can not call macro FreeXid here since in a void func
* TODO: This is no longer true
@@ -1670,7 +1675,9 @@ cifs_inode_needs_reval(struct inode *inode)
return false;
}
-/* check invalid_mapping flag and zap the cache if it's set */
+/*
+ * Zap the cache. Called when invalid_mapping flag is set.
+ */
static void
cifs_invalidate_mapping(struct inode *inode)
{
@@ -2115,9 +2122,14 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if (attrs->ia_valid & ATTR_MODE) {
rc = 0;
#ifdef CONFIG_CIFS_EXPERIMENTAL
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
- rc = mode_to_acl(inode, full_path, mode);
- else
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ rc = mode_to_cifs_acl(inode, full_path, mode);
+ if (rc) {
+ cFYI(1, "%s: Setting ACL failed with error: %d",
+ __func__, rc);
+ goto cifs_setattr_exit;
+ }
+ } else
#endif
if (((mode & S_IWUGO) == 0) &&
(cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ef7bb7b50f58..32d300e8f20e 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -226,26 +226,29 @@ static int initiate_cifs_search(const int xid, struct file *file)
char *full_path = NULL;
struct cifsFileInfo *cifsFile;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- struct tcon_link *tlink;
+ struct tcon_link *tlink = NULL;
struct cifsTconInfo *pTcon;
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink))
- return PTR_ERR(tlink);
- pTcon = tlink_tcon(tlink);
-
- if (file->private_data == NULL)
- file->private_data =
- kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
if (file->private_data == NULL) {
- rc = -ENOMEM;
- goto error_exit;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+
+ cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+ if (cifsFile == NULL) {
+ rc = -ENOMEM;
+ goto error_exit;
+ }
+ file->private_data = cifsFile;
+ cifsFile->tlink = cifs_get_tlink(tlink);
+ pTcon = tlink_tcon(tlink);
+ } else {
+ cifsFile = file->private_data;
+ pTcon = tlink_tcon(cifsFile->tlink);
}
- cifsFile = file->private_data;
cifsFile->invalidHandle = true;
cifsFile->srch_inf.endOfSearch = false;
- cifsFile->tlink = cifs_get_tlink(tlink);
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a264b744bb41..eae2a1491608 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -30,10 +30,11 @@
#define MAX_EA_VALUE_SIZE 65535
#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
+#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
#define CIFS_XATTR_USER_PREFIX "user."
#define CIFS_XATTR_SYSTEM_PREFIX "system."
#define CIFS_XATTR_OS2_PREFIX "os2."
-#define CIFS_XATTR_SECURITY_PREFIX ".security"
+#define CIFS_XATTR_SECURITY_PREFIX "security."
#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
#define XATTR_TRUSTED_PREFIX_LEN 8
#define XATTR_SECURITY_PREFIX_LEN 9
@@ -277,29 +278,8 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- __u16 fid;
- int oplock = 0;
- struct cifs_ntsd *pacl = NULL;
- __u32 buflen = 0;
- if (experimEnabled)
- rc = CIFSSMBOpen(xid, pTcon, full_path,
- FILE_OPEN, GENERIC_READ, 0, &fid,
- &oplock, NULL, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- /* else rc is EOPNOTSUPP from above */
-
- if (rc == 0) {
- rc = CIFSSMBGetCIFSACL(xid, pTcon, fid, &pacl,
- &buflen);
- CIFSSMBClose(xid, pTcon, fid);
- }
- }
-#endif /* EXPERIMENTAL */
#else
- cFYI(1, "query POSIX ACL not supported yet");
+ cFYI(1, "Query POSIX ACL not supported yet");
#endif /* CONFIG_CIFS_POSIX */
} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
@@ -311,8 +291,33 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
#else
- cFYI(1, "query POSIX default ACL not supported yet");
-#endif
+ cFYI(1, "Query POSIX default ACL not supported yet");
+#endif /* CONFIG_CIFS_POSIX */
+ } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
+ strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
+#ifdef CONFIG_CIFS_ACL
+ u32 acllen;
+ struct cifs_ntsd *pacl;
+
+ pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
+ full_path, &acllen);
+ if (IS_ERR(pacl)) {
+ rc = PTR_ERR(pacl);
+ cERROR(1, "%s: error %zd getting sec desc",
+ __func__, rc);
+ } else {
+ if (ea_value) {
+ if (acllen > buf_size)
+ acllen = -ERANGE;
+ else
+ memcpy(ea_value, pacl, acllen);
+ }
+ rc = acllen;
+ kfree(pacl);
+ }
+#else
+ cFYI(1, "Query CIFS ACL not supported yet");
+#endif /* CONFIG_CIFS_ACL */
} else if (strncmp(ea_name,
CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
cFYI(1, "Trusted xattr namespace not supported yet");
diff --git a/fs/compat.c b/fs/compat.c
index c580c322fa6b..eb1740ac8c0a 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1350,6 +1350,10 @@ static int compat_count(compat_uptr_t __user *argv, int max)
argv++;
if (i++ >= max)
return -E2BIG;
+
+ if (fatal_signal_pending(current))
+ return -ERESTARTNOHAND;
+ cond_resched();
}
}
return i;
@@ -1391,6 +1395,12 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
while (len > 0) {
int offset, bytes_to_copy;
+ if (fatal_signal_pending(current)) {
+ ret = -ERESTARTNOHAND;
+ goto out;
+ }
+ cond_resched();
+
offset = pos % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
@@ -1407,18 +1417,8 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
struct page *page;
-#ifdef CONFIG_STACK_GROWSUP
- ret = expand_stack_downwards(bprm->vma, pos);
- if (ret < 0) {
- /* We've exceed the stack rlimit. */
- ret = -E2BIG;
- goto out;
- }
-#endif
- ret = get_user_pages(current, bprm->mm, pos,
- 1, 1, 1, &page, NULL);
- if (ret <= 0) {
- /* We've exceed the stack rlimit. */
+ page = get_arg_page(bprm, pos, 1);
+ if (!page) {
ret = -E2BIG;
goto out;
}
@@ -1539,8 +1539,10 @@ int compat_do_execve(char * filename,
return retval;
out:
- if (bprm->mm)
+ if (bprm->mm) {
+ acct_arg_size(bprm, 0);
mmput(bprm->mm);
+ }
out_file:
if (bprm->file) {
diff --git a/fs/exec.c b/fs/exec.c
index 99d33a1371e9..d68c378a3137 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -164,7 +164,26 @@ out:
#ifdef CONFIG_MMU
-static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
+{
+ struct mm_struct *mm = current->mm;
+ long diff = (long)(pages - bprm->vma_pages);
+
+ if (!mm || !diff)
+ return;
+
+ bprm->vma_pages = pages;
+
+#ifdef SPLIT_RSS_COUNTING
+ add_mm_counter(mm, MM_ANONPAGES, diff);
+#else
+ spin_lock(&mm->page_table_lock);
+ add_mm_counter(mm, MM_ANONPAGES, diff);
+ spin_unlock(&mm->page_table_lock);
+#endif
+}
+
+struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
@@ -186,6 +205,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
struct rlimit *rlim;
+ acct_arg_size(bprm, size / PAGE_SIZE);
+
/*
* We've historically supported up to 32 pages (ARG_MAX)
* of argument strings even with small stacks
@@ -276,7 +297,11 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len)
#else
-static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
+{
+}
+
+struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
int write)
{
struct page *page;
@@ -1003,6 +1028,7 @@ int flush_old_exec(struct linux_binprm * bprm)
/*
* Release all of the old mmap stuff
*/
+ acct_arg_size(bprm, 0);
retval = exec_mmap(bprm->mm);
if (retval)
goto out;
@@ -1426,8 +1452,10 @@ int do_execve(const char * filename,
return retval;
out:
- if (bprm->mm)
- mmput (bprm->mm);
+ if (bprm->mm) {
+ acct_arg_size(bprm, 0);
+ mmput(bprm->mm);
+ }
out_file:
if (bprm->file) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c8224587123f..9242d294fe90 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -134,6 +134,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
void fuse_finish_open(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = get_fuse_conn(inode);
if (ff->open_flags & FOPEN_DIRECT_IO)
file->f_op = &fuse_direct_io_file_operations;
@@ -141,6 +142,15 @@ void fuse_finish_open(struct inode *inode, struct file *file)
invalidate_inode_pages2(inode->i_mapping);
if (ff->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
+ if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fc->lock);
+ fi->attr_version = ++fc->attr_version;
+ i_size_write(inode, 0);
+ spin_unlock(&fc->lock);
+ fuse_invalidate_attr(inode);
+ }
}
int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 58a9b9998b42..f606baf9ba72 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -631,6 +631,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
struct fs_disk_quota *fdq)
{
struct inode *inode = &ip->i_inode;
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
struct address_space *mapping = inode->i_mapping;
unsigned long index = loc >> PAGE_CACHE_SHIFT;
unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
@@ -658,11 +659,11 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
qd->qd_qb.qb_value = qp->qu_value;
if (fdq) {
if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
+ qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
qd->qd_qb.qb_warn = qp->qu_warn;
}
if (fdq->d_fieldmask & FS_DQ_BHARD) {
- qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
+ qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
qd->qd_qb.qb_limit = qp->qu_limit;
}
}
@@ -1497,9 +1498,9 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
fdq->d_version = FS_DQUOT_VERSION;
fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
fdq->d_id = id;
- fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
- fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
- fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
+ fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
+ fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
+ fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
gfs2_glock_dq_uninit(&q_gh);
out:
@@ -1566,10 +1567,10 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
/* If nothing has changed, this is a no-op */
if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
- (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
+ ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
fdq->d_fieldmask ^= FS_DQ_BSOFT;
if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
- (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
+ ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
fdq->d_fieldmask ^= FS_DQ_BHARD;
if (fdq->d_fieldmask == 0)
goto out_i;
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 2f7d05c89922..7da2a06508e5 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
}
ret = -ESRCH;
- /*
- * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
- * so we can't use rcu_read_lock(). See re-copy of ->ioprio
- * in copy_process().
- */
- read_lock(&tasklist_lock);
+ rcu_read_lock();
switch (which) {
case IOPRIO_WHO_PROCESS:
- rcu_read_lock();
if (!who)
p = current;
else
p = find_task_by_vpid(who);
if (p)
ret = set_task_ioprio(p, ioprio);
- rcu_read_unlock();
break;
case IOPRIO_WHO_PGRP:
if (!who)
@@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
break;
do_each_thread(g, p) {
- int match;
-
- rcu_read_lock();
- match = __task_cred(p)->uid == who;
- rcu_read_unlock();
- if (!match)
+ if (__task_cred(p)->uid != who)
continue;
ret = set_task_ioprio(p, ioprio);
if (ret)
@@ -160,7 +148,7 @@ free_uid:
ret = -EINVAL;
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return ret;
}
@@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
int ret = -ESRCH;
int tmpio;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
switch (which) {
case IOPRIO_WHO_PROCESS:
- rcu_read_lock();
if (!who)
p = current;
else
p = find_task_by_vpid(who);
if (p)
ret = get_task_ioprio(p);
- rcu_read_unlock();
break;
case IOPRIO_WHO_PGRP:
if (!who)
@@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
break;
do_each_thread(g, p) {
- int match;
-
- rcu_read_lock();
- match = __task_cred(p)->uid == user->uid;
- rcu_read_unlock();
- if (!match)
+ if (__task_cred(p)->uid != user->uid)
continue;
tmpio = get_task_ioprio(p);
if (tmpio < 0)
@@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
ret = -EINVAL;
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return ret;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 662df2a5fad5..f0a384e2ae63 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -162,6 +162,7 @@ struct nfs_cache_array_entry {
u64 cookie;
u64 ino;
struct qstr string;
+ unsigned char d_type;
};
struct nfs_cache_array {
@@ -171,8 +172,6 @@ struct nfs_cache_array {
struct nfs_cache_array_entry array[0];
};
-#define MAX_READDIR_ARRAY ((PAGE_SIZE - sizeof(struct nfs_cache_array)) / sizeof(struct nfs_cache_array_entry))
-
typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
typedef struct {
struct file *file;
@@ -257,13 +256,17 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
if (IS_ERR(array))
return PTR_ERR(array);
+
+ cache_entry = &array->array[array->size];
+
+ /* Check that this entry lies within the page bounds */
ret = -ENOSPC;
- if (array->size >= MAX_READDIR_ARRAY)
+ if ((char *)&cache_entry[1] - (char *)page_address(page) > PAGE_SIZE)
goto out;
- cache_entry = &array->array[array->size];
cache_entry->cookie = entry->prev_cookie;
cache_entry->ino = entry->ino;
+ cache_entry->d_type = entry->d_type;
ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len);
if (ret)
goto out;
@@ -392,13 +395,9 @@ int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct x
static
int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
{
- struct nfs_inode *node;
if (dentry->d_inode == NULL)
goto different;
- node = NFS_I(dentry->d_inode);
- if (node->fh.size != entry->fh->size)
- goto different;
- if (strncmp(node->fh.data, entry->fh->data, node->fh.size) != 0)
+ if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0)
goto different;
return 1;
different:
@@ -466,8 +465,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
struct xdr_stream stream;
struct xdr_buf buf;
__be32 *ptr = xdr_page;
- int status;
struct nfs_cache_array *array;
+ unsigned int count = 0;
+ int status;
buf.head->iov_base = xdr_page;
buf.head->iov_len = buflen;
@@ -488,6 +488,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
break;
}
+ count++;
+
if (desc->plus == 1)
nfs_prime_dcache(desc->file->f_path.dentry, entry);
@@ -496,13 +498,14 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
break;
} while (!entry->eof);
- if (status == -EBADCOOKIE && entry->eof) {
+ if (count == 0 || (status == -EBADCOOKIE && entry->eof == 1)) {
array = nfs_readdir_get_array(page);
if (!IS_ERR(array)) {
array->eof_index = array->size;
status = 0;
nfs_readdir_release_array(page);
- }
+ } else
+ status = PTR_ERR(array);
}
return status;
}
@@ -696,21 +699,23 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
int i = 0;
int res = 0;
struct nfs_cache_array *array = NULL;
- unsigned int d_type = DT_UNKNOWN;
- struct dentry *dentry = NULL;
array = nfs_readdir_get_array(desc->page);
- if (IS_ERR(array))
- return PTR_ERR(array);
+ if (IS_ERR(array)) {
+ res = PTR_ERR(array);
+ goto out;
+ }
for (i = desc->cache_entry_index; i < array->size; i++) {
- d_type = DT_UNKNOWN;
+ struct nfs_cache_array_entry *ent;
- res = filldir(dirent, array->array[i].string.name,
- array->array[i].string.len, file->f_pos,
- nfs_compat_user_ino64(array->array[i].ino), d_type);
- if (res < 0)
+ ent = &array->array[i];
+ if (filldir(dirent, ent->string.name, ent->string.len,
+ file->f_pos, nfs_compat_user_ino64(ent->ino),
+ ent->d_type) < 0) {
+ desc->eof = 1;
break;
+ }
file->f_pos++;
desc->cache_entry_index = i;
if (i < (array->size-1))
@@ -722,9 +727,8 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
desc->eof = 1;
nfs_readdir_release_array(desc->page);
+out:
cache_page_release(desc);
- if (dentry != NULL)
- dput(dentry);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
(unsigned long long)*desc->dir_cookie, res);
return res;
@@ -759,13 +763,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
goto out;
}
- if (nfs_readdir_xdr_to_array(desc, page, inode) == -1) {
- status = -EIO;
- goto out_release;
- }
-
desc->page_index = 0;
desc->page = page;
+
+ status = nfs_readdir_xdr_to_array(desc, page, inode);
+ if (status < 0)
+ goto out_release;
+
status = nfs_do_filldir(desc, dirent, filldir);
out:
@@ -816,14 +820,14 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
res = readdir_search_pagecache(desc);
if (res == -EBADCOOKIE) {
+ res = 0;
/* This means either end of directory */
if (*desc->dir_cookie && desc->eof == 0) {
/* Or that the server has 'lost' a cookie */
res = uncached_readdir(desc, dirent, filldir);
- if (res >= 0)
+ if (res == 0)
continue;
}
- res = 0;
break;
}
if (res == -ETOOSMALL && desc->plus) {
@@ -838,10 +842,8 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
break;
res = nfs_do_filldir(desc, dirent, filldir);
- if (res < 0) {
- res = 0;
+ if (res < 0)
break;
- }
}
out:
nfs_unblock_sillyrename(dentry);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 84d3c8b90206..e6ace0d93c71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -867,7 +867,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
goto out;
nfs_alloc_commit_data(dreq);
- if (dreq->commit_data == NULL || count < wsize)
+ if (dreq->commit_data == NULL || count <= wsize)
sync = NFS_FILE_SYNC;
dreq->inode = inode;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index db08ff3ff454..e6356b750b77 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -362,6 +362,15 @@ unsigned int nfs_page_length(struct page *page)
}
/*
+ * Convert a umode to a dirent->d_type
+ */
+static inline
+unsigned char nfs_umode_to_dtype(umode_t mode)
+{
+ return (mode >> 12) & 15;
+}
+
+/*
* Determine the number of pages in an array of length 'len' and
* with a base offset of 'base'
*/
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 2563f765c9b4..5914a1911c95 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -485,6 +485,8 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se
entry->prev_cookie = entry->cookie;
entry->cookie = ntohl(*p++);
+ entry->d_type = DT_UNKNOWN;
+
p = xdr_inline_peek(xdr, 8);
if (p != NULL)
entry->eof = !p[0] && p[1];
@@ -495,7 +497,7 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se
out_overflow:
print_overflow_msg(__func__, xdr);
- return ERR_PTR(-EIO);
+ return ERR_PTR(-EAGAIN);
}
/*
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 748dc91a4a14..f6cc60f06dac 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -622,11 +622,13 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s
entry->prev_cookie = entry->cookie;
p = xdr_decode_hyper(p, &entry->cookie);
+ entry->d_type = DT_UNKNOWN;
if (plus) {
entry->fattr->valid = 0;
p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
if (IS_ERR(p))
goto out_overflow_exit;
+ entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
/* In fact, a post_op_fh3: */
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
@@ -656,7 +658,7 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s
out_overflow:
print_overflow_msg(__func__, xdr);
out_overflow_exit:
- return ERR_PTR(-EIO);
+ return ERR_PTR(-EAGAIN);
}
/*
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b7a204ff6fe1..9f1826b012e6 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -6208,6 +6208,10 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
entry->ino = entry->fattr->fileid;
+ entry->d_type = DT_UNKNOWN;
+ if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
+ entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
+
if (verify_attr_len(xdr, p, len) < 0)
goto out_overflow;
@@ -6221,7 +6225,7 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
out_overflow:
print_overflow_msg(__func__, xdr);
- return ERR_PTR(-EIO);
+ return ERR_PTR(-EAGAIN);
}
/*
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 49c844dab33a..59e5fe742f7b 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -335,7 +335,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
* the device at this point.
*
* To prevent nilfs_dat_translate() from returning the
- * uncommited block number, this makes a copy of the entry
+ * uncommitted block number, this makes a copy of the entry
* buffer and redirects nilfs_dat_translate() to the copy.
*/
if (!buffer_nilfs_redirected(entry_bh)) {
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 3e90f86d5bfe..e00d9457c256 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -349,8 +349,8 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
ino = vdesc->vd_ino;
cno = vdesc->vd_cno;
inode = nilfs_iget_for_gc(sb, ino, cno);
- if (unlikely(inode == NULL)) {
- ret = -ENOMEM;
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
goto failed;
}
do {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 52c7557f3e25..9f26ac9be2a4 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1964,8 +1964,10 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
if (reg == NULL)
return ERR_PTR(-ENOMEM);
- if (strlen(name) > O2HB_MAX_REGION_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
+ if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) {
+ ret = -ENAMETOOLONG;
+ goto free;
+ }
spin_lock(&o2hb_live_lock);
reg->hr_region_num = 0;
@@ -1974,7 +1976,8 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
O2NM_MAX_REGIONS);
if (reg->hr_region_num >= O2NM_MAX_REGIONS) {
spin_unlock(&o2hb_live_lock);
- return ERR_PTR(-EFBIG);
+ ret = -EFBIG;
+ goto free;
}
set_bit(reg->hr_region_num, o2hb_region_bitmap);
}
@@ -1986,10 +1989,13 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
if (ret) {
config_item_put(&reg->hr_item);
- return ERR_PTR(ret);
+ goto free;
}
return &reg->hr_item;
+free:
+ kfree(reg);
+ return ERR_PTR(ret);
}
static void o2hb_heartbeat_group_drop_item(struct config_group *group,
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index edaded48e7e9..895532ac4d98 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -476,7 +476,6 @@ static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode)
out:
iput(inode);
- ocfs2_dentry_attach_gen(dentry);
}
/*
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 58a93b953735..cc2aaa96cfe5 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -959,7 +959,7 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
r += O2HB_MAX_REGION_NAME_LEN;
}
- local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
+ local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC);
if (!local) {
status = -ENOMEM;
goto bail;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 1efea3615589..70dd3b1798f1 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -159,9 +159,9 @@ struct ocfs2_lock_res {
char l_name[OCFS2_LOCK_ID_MAX_LEN];
unsigned int l_ro_holders;
unsigned int l_ex_holders;
- char l_level;
- char l_requested;
- char l_blocking;
+ signed char l_level;
+ signed char l_requested;
+ signed char l_blocking;
/* Data packed - type enum ocfs2_lock_type */
unsigned char l_type;
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 252e7c82f929..a5ebe421195f 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -190,7 +190,7 @@ static struct ocfs2_live_connection *ocfs2_connection_find(const char *name)
return c;
}
- return c;
+ return NULL;
}
/*
diff --git a/fs/pipe.c b/fs/pipe.c
index a8012a955720..04629f36e397 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1199,12 +1199,24 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
return ret;
}
+/*
+ * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
+ * location, so checking ->i_pipe is not enough to verify that this is a
+ * pipe.
+ */
+struct pipe_inode_info *get_pipe_info(struct file *file)
+{
+ struct inode *i = file->f_path.dentry->d_inode;
+
+ return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
+}
+
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe;
long ret;
- pipe = file->f_path.dentry->d_inode->i_pipe;
+ pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2fa0ce29b6dc..08cba2c3b612 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1650,7 +1650,7 @@ static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
if (!tmp)
return -ENOMEM;
- pathname = d_path_with_unreachable(path, tmp, PAGE_SIZE);
+ pathname = d_path(path, tmp, PAGE_SIZE);
len = PTR_ERR(pathname);
if (IS_ERR(pathname))
goto out;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index da6b01d70f01..c126c83b9a45 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -706,6 +706,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
* skip over unmapped regions.
*/
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
+#define PAGEMAP_WALK_MASK (PMD_MASK)
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -776,7 +777,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
unsigned long end;
pm.pos = 0;
- end = start_vaddr + PAGEMAP_WALK_SIZE;
+ end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index bd9763e76bae..79265fdc317a 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -183,12 +183,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
return 0;
}
- /* we need to make sure nobody is changing the file size beneath
- ** us
- */
- reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
depth = reiserfs_write_lock_once(inode->i_sb);
+ /* we need to make sure nobody is changing the file size beneath us */
+ reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
if (write_from == 0) {
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 536d697a8a28..90d2fcb67a31 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -472,7 +472,9 @@ int reiserfs_acl_chmod(struct inode *inode)
struct reiserfs_transaction_handle th;
size_t size = reiserfs_xattr_nblocks(inode,
reiserfs_acl_size(clone->a_count));
- reiserfs_write_lock(inode->i_sb);
+ int depth;
+
+ depth = reiserfs_write_lock_once(inode->i_sb);
error = journal_begin(&th, inode->i_sb, size * 2);
if (!error) {
int error2;
@@ -482,7 +484,7 @@ int reiserfs_acl_chmod(struct inode *inode)
if (error2)
error = error2;
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, depth);
}
posix_acl_release(clone);
return error;
diff --git a/fs/splice.c b/fs/splice.c
index 8f1dfaecc8f0..ce2f02579e35 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1311,18 +1311,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags);
-/*
- * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
- * location, so checking ->i_pipe is not enough to verify that this is a
- * pipe.
- */
-static inline struct pipe_inode_info *pipe_info(struct inode *inode)
-{
- if (S_ISFIFO(inode->i_mode))
- return inode->i_pipe;
-
- return NULL;
-}
/*
* Determine where to splice to/from.
@@ -1336,8 +1324,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
loff_t offset, *off;
long ret;
- ipipe = pipe_info(in->f_path.dentry->d_inode);
- opipe = pipe_info(out->f_path.dentry->d_inode);
+ ipipe = get_pipe_info(in);
+ opipe = get_pipe_info(out);
if (ipipe && opipe) {
if (off_in || off_out)
@@ -1555,7 +1543,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
int error;
long ret;
- pipe = pipe_info(file->f_path.dentry->d_inode);
+ pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
@@ -1642,7 +1630,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
};
long ret;
- pipe = pipe_info(file->f_path.dentry->d_inode);
+ pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
@@ -2022,8 +2010,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
static long do_tee(struct file *in, struct file *out, size_t len,
unsigned int flags)
{
- struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
- struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
+ struct pipe_inode_info *ipipe = get_pipe_info(in);
+ struct pipe_inode_info *opipe = get_pipe_info(out);
int ret = -EINVAL;
/*
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 7d287afccde5..691f61223ed6 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -934,7 +934,6 @@ xfs_aops_discard_page(
struct xfs_inode *ip = XFS_I(inode);
struct buffer_head *bh, *head;
loff_t offset = page_offset(page);
- ssize_t len = 1 << inode->i_blkbits;
if (!xfs_is_delayed_page(page, IO_DELAY))
goto out_invalidate;
@@ -949,58 +948,14 @@ xfs_aops_discard_page(
xfs_ilock(ip, XFS_ILOCK_EXCL);
bh = head = page_buffers(page);
do {
- int done;
- xfs_fileoff_t offset_fsb;
- xfs_bmbt_irec_t imap;
- int nimaps = 1;
int error;
- xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
+ xfs_fileoff_t start_fsb;
if (!buffer_delay(bh))
goto next_buffer;
- offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
-
- /*
- * Map the range first and check that it is a delalloc extent
- * before trying to unmap the range. Otherwise we will be
- * trying to remove a real extent (which requires a
- * transaction) or a hole, which is probably a bad idea...
- */
- error = xfs_bmapi(NULL, ip, offset_fsb, 1,
- XFS_BMAPI_ENTIRE, NULL, 0, &imap,
- &nimaps, NULL);
-
- if (error) {
- /* something screwed, just bail */
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
- "page discard failed delalloc mapping lookup.");
- }
- break;
- }
- if (!nimaps) {
- /* nothing there */
- goto next_buffer;
- }
- if (imap.br_startblock != DELAYSTARTBLOCK) {
- /* been converted, ignore */
- goto next_buffer;
- }
- WARN_ON(imap.br_blockcount == 0);
-
- /*
- * Note: while we initialise the firstblock/flist pair, they
- * should never be used because blocks should never be
- * allocated or freed for a delalloc extent and hence we need
- * don't cancel or finish them after the xfs_bunmapi() call.
- */
- xfs_bmap_init(&flist, &firstblock);
- error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
- &flist, &done);
-
- ASSERT(!flist.xbf_count && !flist.xbf_first);
+ start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
if (error) {
/* something screwed, just bail */
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
@@ -1010,7 +965,7 @@ xfs_aops_discard_page(
break;
}
next_buffer:
- offset += len;
+ offset += 1 << inode->i_blkbits;
} while ((bh = bh->b_this_page) != head);
@@ -1505,11 +1460,42 @@ xfs_vm_write_failed(
struct inode *inode = mapping->host;
if (to > inode->i_size) {
- struct iattr ia = {
- .ia_valid = ATTR_SIZE | ATTR_FORCE,
- .ia_size = inode->i_size,
- };
- xfs_setattr(XFS_I(inode), &ia, XFS_ATTR_NOLOCK);
+ /*
+ * punch out the delalloc blocks we have already allocated. We
+ * don't call xfs_setattr() to do this as we may be in the
+ * middle of a multi-iovec write and so the vfs inode->i_size
+ * will not match the xfs ip->i_size and so it will zero too
+ * much. Hence we jus truncate the page cache to zero what is
+ * necessary and punch the delalloc blocks directly.
+ */
+ struct xfs_inode *ip = XFS_I(inode);
+ xfs_fileoff_t start_fsb;
+ xfs_fileoff_t end_fsb;
+ int error;
+
+ truncate_pagecache(inode, to, inode->i_size);
+
+ /*
+ * Check if there are any blocks that are outside of i_size
+ * that need to be trimmed back.
+ */
+ start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
+ end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
+ if (end_fsb <= start_fsb)
+ return;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+ end_fsb - start_fsb);
+ if (error) {
+ /* something screwed, just bail */
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+ "xfs_vm_write_failed: unable to clean up ino %lld",
+ ip->i_ino);
+ }
+ }
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index aa1d353def29..4c5deb6e9e31 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -488,29 +488,16 @@ found:
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
- /* Attempt to get the semaphore without sleeping,
- * if this does not work then we need to drop the
- * spinlock and do a hard attempt on the semaphore.
- */
- if (down_trylock(&bp->b_sema)) {
+ if (xfs_buf_cond_lock(bp)) {
+ /* failed, so wait for the lock if requested. */
if (!(flags & XBF_TRYLOCK)) {
- /* wait for buffer ownership */
xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited);
} else {
- /* We asked for a trylock and failed, no need
- * to look at file offset and length here, we
- * know that this buffer at least overlaps our
- * buffer and is locked, therefore our buffer
- * either does not exist, or is this buffer.
- */
xfs_buf_rele(bp);
XFS_STATS_INC(xb_busy_locked);
return NULL;
}
- } else {
- /* trylock worked */
- XB_SET_OWNER(bp);
}
if (bp->b_flags & XBF_STALE) {
@@ -876,10 +863,18 @@ xfs_buf_rele(
*/
/*
- * Locks a buffer object, if it is not already locked.
- * Note that this in no way locks the underlying pages, so it is only
- * useful for synchronizing concurrent use of buffer objects, not for
- * synchronizing independent access to the underlying pages.
+ * Locks a buffer object, if it is not already locked. Note that this in
+ * no way locks the underlying pages, so it is only useful for
+ * synchronizing concurrent use of buffer objects, not for synchronizing
+ * independent access to the underlying pages.
+ *
+ * If we come across a stale, pinned, locked buffer, we know that we are
+ * being asked to lock a buffer that has been reallocated. Because it is
+ * pinned, we know that the log has not been pushed to disk and hence it
+ * will still be locked. Rather than continuing to have trylock attempts
+ * fail until someone else pushes the log, push it ourselves before
+ * returning. This means that the xfsaild will not get stuck trying
+ * to push on stale inode buffers.
*/
int
xfs_buf_cond_lock(
@@ -890,6 +885,8 @@ xfs_buf_cond_lock(
locked = down_trylock(&bp->b_sema) == 0;
if (locked)
XB_SET_OWNER(bp);
+ else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
+ xfs_log_force(bp->b_target->bt_mount, 0);
trace_xfs_buf_cond_lock(bp, _RET_IP_);
return locked ? 0 : -EBUSY;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 8abd12e32e13..4111cd3966c7 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5471,8 +5471,13 @@ xfs_getbmap(
if (error)
goto out_unlock_iolock;
}
-
- ASSERT(ip->i_delayed_blks == 0);
+ /*
+ * even after flushing the inode, there can still be delalloc
+ * blocks on the inode beyond EOF due to speculative
+ * preallocation. These are not removed until the release
+ * function is called or the inode is inactivated. Hence we
+ * cannot assert here that ip->i_delayed_blks == 0.
+ */
}
lock = xfs_ilock_map_shared(ip);
@@ -6070,3 +6075,79 @@ xfs_bmap_disk_count_leaves(
*count += xfs_bmbt_disk_get_blockcount(frp);
}
}
+
+/*
+ * dead simple method of punching delalyed allocation blocks from a range in
+ * the inode. Walks a block at a time so will be slow, but is only executed in
+ * rare error cases so the overhead is not critical. This will alays punch out
+ * both the start and end blocks, even if the ranges only partially overlap
+ * them, so it is up to the caller to ensure that partial blocks are not
+ * passed in.
+ */
+int
+xfs_bmap_punch_delalloc_range(
+ struct xfs_inode *ip,
+ xfs_fileoff_t start_fsb,
+ xfs_fileoff_t length)
+{
+ xfs_fileoff_t remaining = length;
+ int error = 0;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ do {
+ int done;
+ xfs_bmbt_irec_t imap;
+ int nimaps = 1;
+ xfs_fsblock_t firstblock;
+ xfs_bmap_free_t flist;
+
+ /*
+ * Map the range first and check that it is a delalloc extent
+ * before trying to unmap the range. Otherwise we will be
+ * trying to remove a real extent (which requires a
+ * transaction) or a hole, which is probably a bad idea...
+ */
+ error = xfs_bmapi(NULL, ip, start_fsb, 1,
+ XFS_BMAPI_ENTIRE, NULL, 0, &imap,
+ &nimaps, NULL);
+
+ if (error) {
+ /* something screwed, just bail */
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
+ "Failed delalloc mapping lookup ino %lld fsb %lld.",
+ ip->i_ino, start_fsb);
+ }
+ break;
+ }
+ if (!nimaps) {
+ /* nothing there */
+ goto next_block;
+ }
+ if (imap.br_startblock != DELAYSTARTBLOCK) {
+ /* been converted, ignore */
+ goto next_block;
+ }
+ WARN_ON(imap.br_blockcount == 0);
+
+ /*
+ * Note: while we initialise the firstblock/flist pair, they
+ * should never be used because blocks should never be
+ * allocated or freed for a delalloc extent and hence we need
+ * don't cancel or finish them after the xfs_bunmapi() call.
+ */
+ xfs_bmap_init(&flist, &firstblock);
+ error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
+ &flist, &done);
+ if (error)
+ break;
+
+ ASSERT(!flist.xbf_count && !flist.xbf_first);
+next_block:
+ start_fsb++;
+ remaining--;
+ } while(remaining > 0);
+
+ return error;
+}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 71ec9b6ecdfc..3651191daea1 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -394,6 +394,11 @@ xfs_bmap_count_blocks(
int whichfork,
int *count);
+int
+xfs_bmap_punch_delalloc_range(
+ struct xfs_inode *ip,
+ xfs_fileoff_t start_fsb,
+ xfs_fileoff_t length);
#endif /* __KERNEL__ */
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 3b9582c60a22..e60490bc00a6 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -377,6 +377,19 @@ xfs_swap_extents(
ip->i_d.di_format = tip->i_d.di_format;
tip->i_d.di_format = tmp;
+ /*
+ * The extents in the source inode could still contain speculative
+ * preallocation beyond EOF (e.g. the file is open but not modified
+ * while defrag is in progress). In that case, we need to copy over the
+ * number of delalloc blocks the data fork in the source inode is
+ * tracking beyond EOF so that when the fork is truncated away when the
+ * temporary inode is unlinked we don't underrun the i_delayed_blks
+ * counter on that inode.
+ */
+ ASSERT(tip->i_delayed_blks == 0);
+ tip->i_delayed_blks = ip->i_delayed_blks;
+ ip->i_delayed_blks = 0;
+
ilf_fields = XFS_ILOG_CORE;
switch(ip->i_d.di_format) {
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index ed9990267661..c78cc6a3d87c 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -58,6 +58,7 @@ xfs_error_trap(int e)
int xfs_etest[XFS_NUM_INJECT_ERROR];
int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR];
+int xfs_error_test_active;
int
xfs_error_test(int error_tag, int *fsidp, char *expression,
@@ -108,6 +109,7 @@ xfs_errortag_add(int error_tag, xfs_mount_t *mp)
len = strlen(mp->m_fsname);
xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP);
strcpy(xfs_etest_fsname[i], mp->m_fsname);
+ xfs_error_test_active++;
return 0;
}
}
@@ -137,6 +139,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
xfs_etest_fsid[i] = 0LL;
kmem_free(xfs_etest_fsname[i]);
xfs_etest_fsname[i] = NULL;
+ xfs_error_test_active--;
}
}
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index c2c1a072bb82..f338847f80b8 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -127,13 +127,14 @@ extern void xfs_corruption_error(const char *tag, int level,
#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
#ifdef DEBUG
+extern int xfs_error_test_active;
extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
#define XFS_NUM_INJECT_ERROR 10
#define XFS_TEST_ERROR(expr, mp, tag, rf) \
- ((expr) || \
+ ((expr) || (xfs_error_test_active && \
xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
- (rf)))
+ (rf))))
extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index c7ac020705df..7c8d30c453c3 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -657,18 +657,37 @@ xfs_inode_item_unlock(
}
/*
- * This is called to find out where the oldest active copy of the
- * inode log item in the on disk log resides now that the last log
- * write of it completed at the given lsn. Since we always re-log
- * all dirty data in an inode, the latest copy in the on disk log
- * is the only one that matters. Therefore, simply return the
- * given lsn.
+ * This is called to find out where the oldest active copy of the inode log
+ * item in the on disk log resides now that the last log write of it completed
+ * at the given lsn. Since we always re-log all dirty data in an inode, the
+ * latest copy in the on disk log is the only one that matters. Therefore,
+ * simply return the given lsn.
+ *
+ * If the inode has been marked stale because the cluster is being freed, we
+ * don't want to (re-)insert this inode into the AIL. There is a race condition
+ * where the cluster buffer may be unpinned before the inode is inserted into
+ * the AIL during transaction committed processing. If the buffer is unpinned
+ * before the inode item has been committed and inserted, then it is possible
+ * for the buffer to be written and IO completions before the inode is inserted
+ * into the AIL. In that case, we'd be inserting a clean, stale inode into the
+ * AIL which will never get removed. It will, however, get reclaimed which
+ * triggers an assert in xfs_inode_free() complaining about freein an inode
+ * still in the AIL.
+ *
+ * To avoid this, return a lower LSN than the one passed in so that the
+ * transaction committed code will not move the inode forward in the AIL but
+ * will still unpin it properly.
*/
STATIC xfs_lsn_t
xfs_inode_item_committed(
struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ struct xfs_inode *ip = iip->ili_inode;
+
+ if (xfs_iflags_test(ip, XFS_ISTALE))
+ return lsn - 1;
return lsn;
}