summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/v9fs.c30
-rw-r--r--fs/9p/vfs_file.c1
-rw-r--r--fs/9p/vfs_inode.c8
-rw-r--r--fs/Kconfig.binfmt8
-rw-r--r--fs/Makefile3
-rw-r--r--fs/autofs4/root.c6
-rw-r--r--fs/binfmt_aout.c54
-rw-r--r--fs/binfmt_elf.c154
-rw-r--r--fs/binfmt_elf_fdpic.c9
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/bio-integrity.c44
-rw-r--r--fs/bio.c231
-rw-r--r--fs/block_dev.c105
-rw-r--r--fs/btrfs/backref.c307
-rw-r--r--fs/btrfs/backref.h14
-rw-r--r--fs/btrfs/btrfs_inode.h15
-rw-r--r--fs/btrfs/check-integrity.c16
-rw-r--r--fs/btrfs/compression.c13
-rw-r--r--fs/btrfs/ctree.c218
-rw-r--r--fs/btrfs/ctree.h112
-rw-r--r--fs/btrfs/delayed-inode.c6
-rw-r--r--fs/btrfs/disk-io.c230
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c376
-rw-r--r--fs/btrfs/extent_io.c130
-rw-r--r--fs/btrfs/extent_io.h23
-rw-r--r--fs/btrfs/extent_map.c55
-rw-r--r--fs/btrfs/extent_map.h8
-rw-r--r--fs/btrfs/file-item.c5
-rw-r--r--fs/btrfs/file.c449
-rw-r--r--fs/btrfs/free-space-cache.c10
-rw-r--r--fs/btrfs/hash.h10
-rw-r--r--fs/btrfs/inode-item.c285
-rw-r--r--fs/btrfs/inode.c393
-rw-r--r--fs/btrfs/ioctl.c108
-rw-r--r--fs/btrfs/ordered-data.c97
-rw-r--r--fs/btrfs/ordered-data.h12
-rw-r--r--fs/btrfs/qgroup.c57
-rw-r--r--fs/btrfs/relocation.c11
-rw-r--r--fs/btrfs/root-tree.c29
-rw-r--r--fs/btrfs/scrub.c30
-rw-r--r--fs/btrfs/send.c1065
-rw-r--r--fs/btrfs/send.h1
-rw-r--r--fs/btrfs/super.c74
-rw-r--r--fs/btrfs/transaction.c283
-rw-r--r--fs/btrfs/transaction.h20
-rw-r--r--fs/btrfs/tree-log.c889
-rw-r--r--fs/btrfs/ulist.c7
-rw-r--r--fs/btrfs/ulist.h9
-rw-r--r--fs/btrfs/volumes.c80
-rw-r--r--fs/btrfs/zlib.c8
-rw-r--r--fs/buffer.c13
-rw-r--r--fs/ceph/addr.c21
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/export.c20
-rw-r--r--fs/ceph/file.c4
-rw-r--r--fs/ceph/ioctl.c8
-rw-r--r--fs/ceph/mds_client.c3
-rw-r--r--fs/ceph/super.c37
-rw-r--r--fs/char_dev.c18
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifs_unicode.c22
-rw-r--r--fs/cifs/cifsacl.c8
-rw-r--r--fs/cifs/connect.c9
-rw-r--r--fs/cifs/file.c1
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/compat.c12
-rw-r--r--fs/compat_binfmt_elf.c7
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/coredump.c25
-rw-r--r--fs/coredump.h6
-rw-r--r--fs/eventpoll.c38
-rw-r--r--fs/exec.c91
-rw-r--r--fs/exofs/ore.c5
-rw-r--r--fs/exofs/ore_raid.c2
-rw-r--r--fs/exofs/super.c4
-rw-r--r--fs/exofs/sys.c7
-rw-r--r--fs/ext2/super.c4
-rw-r--r--fs/ext3/namei.c40
-rw-r--r--fs/ext3/namei.h19
-rw-r--r--fs/ext3/super.c10
-rw-r--r--fs/ext4/balloc.c8
-rw-r--r--fs/ext4/bitmap.c6
-rw-r--r--fs/ext4/ext4.h56
-rw-r--r--fs/ext4/ext4_jbd2.c8
-rw-r--r--fs/ext4/extents.c318
-rw-r--r--fs/ext4/file.c8
-rw-r--r--fs/ext4/fsync.c92
-rw-r--r--fs/ext4/ialloc.c13
-rw-r--r--fs/ext4/indirect.c18
-rw-r--r--fs/ext4/inode.c83
-rw-r--r--fs/ext4/ioctl.c22
-rw-r--r--fs/ext4/mballoc.c143
-rw-r--r--fs/ext4/mballoc.h5
-rw-r--r--fs/ext4/move_extent.c520
-rw-r--r--fs/ext4/namei.c105
-rw-r--r--fs/ext4/page-io.c176
-rw-r--r--fs/ext4/resize.c435
-rw-r--r--fs/ext4/super.c103
-rw-r--r--fs/fat/Makefile2
-rw-r--r--fs/fat/cache.c10
-rw-r--r--fs/fat/dir.c60
-rw-r--r--fs/fat/fat.h94
-rw-r--r--fs/fat/fatent.c13
-rw-r--r--fs/fat/inode.c192
-rw-r--r--fs/fat/namei_msdos.c33
-rw-r--r--fs/fat/namei_vfat.c35
-rw-r--r--fs/fat/nfs.c101
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/file.c3
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/filesystems.c4
-rw-r--r--fs/fs-writeback.c10
-rw-r--r--fs/fuse/file.c1
-rw-r--r--fs/gfs2/export.c4
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c12
-rw-r--r--fs/hostfs/hostfs_user.c1
-rw-r--r--fs/hpfs/anode.c6
-rw-r--r--fs/hpfs/dnode.c28
-rw-r--r--fs/hpfs/super.c3
-rw-r--r--fs/hppfs/hppfs.c4
-rw-r--r--fs/hugetlbfs/inode.c11
-rw-r--r--fs/inode.c2
-rw-r--r--fs/internal.h4
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/jbd2/commit.c40
-rw-r--r--fs/jbd2/journal.c5
-rw-r--r--fs/jbd2/recovery.c7
-rw-r--r--fs/jbd2/transaction.c65
-rw-r--r--fs/jffs2/readinode.c13
-rw-r--r--fs/jffs2/super.c4
-rw-r--r--fs/jffs2/wbuf.c8
-rw-r--r--fs/jfs/jfs_discard.c16
-rw-r--r--fs/lockd/clntxdr.c2
-rw-r--r--fs/lockd/mon.c93
-rw-r--r--fs/lockd/netns.h4
-rw-r--r--fs/lockd/svc.c18
-rw-r--r--fs/lockd/svcproc.c3
-rw-r--r--fs/locks.c6
-rw-r--r--fs/namei.c220
-rw-r--r--fs/namespace.c16
-rw-r--r--fs/nfs/Kconfig4
-rw-r--r--fs/nfs/blocklayout/blocklayout.c306
-rw-r--r--fs/nfs/blocklayout/blocklayout.h2
-rw-r--r--fs/nfs/blocklayout/blocklayoutdev.c25
-rw-r--r--fs/nfs/blocklayout/extents.c3
-rw-r--r--fs/nfs/callback.c337
-rw-r--r--fs/nfs/callback.h3
-rw-r--r--fs/nfs/callback_proc.c31
-rw-r--r--fs/nfs/client.c23
-rw-r--r--fs/nfs/dir.c16
-rw-r--r--fs/nfs/direct.c32
-rw-r--r--fs/nfs/file.c42
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/nfs/idmap.c114
-rw-r--r--fs/nfs/inode.c10
-rw-r--r--fs/nfs/internal.h15
-rw-r--r--fs/nfs/netns.h4
-rw-r--r--fs/nfs/nfs4_fs.h19
-rw-r--r--fs/nfs/nfs4client.c256
-rw-r--r--fs/nfs/nfs4file.c29
-rw-r--r--fs/nfs/nfs4filelayout.c62
-rw-r--r--fs/nfs/nfs4filelayout.h17
-rw-r--r--fs/nfs/nfs4filelayoutdev.c39
-rw-r--r--fs/nfs/nfs4getroot.c1
-rw-r--r--fs/nfs/nfs4namespace.c16
-rw-r--r--fs/nfs/nfs4proc.c342
-rw-r--r--fs/nfs/nfs4state.c228
-rw-r--r--fs/nfs/nfs4sysctl.c1
-rw-r--r--fs/nfs/nfs4xdr.c31
-rw-r--r--fs/nfs/objlayout/objio_osd.c15
-rw-r--r--fs/nfs/pagelist.c12
-rw-r--r--fs/nfs/pnfs.c417
-rw-r--r--fs/nfs/pnfs.h58
-rw-r--r--fs/nfs/pnfs_dev.c27
-rw-r--r--fs/nfs/super.c31
-rw-r--r--fs/nfs/write.c11
-rw-r--r--fs/nfsd/nfs2acl.c3
-rw-r--r--fs/nfsd/nfs3proc.c2
-rw-r--r--fs/nfsd/nfs4callback.c1
-rw-r--r--fs/nfsd/nfs4idmap.c4
-rw-r--r--fs/nfsd/nfs4proc.c6
-rw-r--r--fs/nfsd/nfs4state.c351
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/nfsctl.c84
-rw-r--r--fs/nfsd/nfsd.h4
-rw-r--r--fs/nfsd/nfssvc.c26
-rw-r--r--fs/nfsd/state.h8
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/file.c3
-rw-r--r--fs/ocfs2/mmap.c2
-rw-r--r--fs/omfs/file.c5
-rw-r--r--fs/open.c29
-rw-r--r--fs/proc/base.c122
-rw-r--r--fs/proc/generic.c15
-rw-r--r--fs/proc/inode.c1
-rw-r--r--fs/proc/internal.h6
-rw-r--r--fs/proc/page.c8
-rw-r--r--fs/proc/proc_sysctl.c8
-rw-r--r--fs/proc/root.c2
-rw-r--r--fs/proc/stat.c14
-rw-r--r--fs/proc/task_mmu.c55
-rw-r--r--fs/pstore/Kconfig1
-rw-r--r--fs/pstore/ftrace.c96
-rw-r--r--fs/pstore/internal.h6
-rw-r--r--fs/pstore/platform.c9
-rw-r--r--fs/pstore/ram.c28
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/quota/quota.c4
-rw-r--r--fs/reiserfs/inode.c6
-rw-r--r--fs/super.c25
-rw-r--r--fs/sysfs/dir.c16
-rw-r--r--fs/sysv/balloc.c18
-rw-r--r--fs/sysv/ialloc.c14
-rw-r--r--fs/sysv/inode.c4
-rw-r--r--fs/sysv/super.c1
-rw-r--r--fs/sysv/sysv.h1
-rw-r--r--fs/ubifs/file.c1
-rw-r--r--fs/ufs/balloc.c30
-rw-r--r--fs/ufs/ialloc.c16
-rw-r--r--fs/ufs/super.c21
-rw-r--r--fs/ufs/ufs.h1
-rw-r--r--fs/xattr.c10
-rw-r--r--fs/xattr_acl.c2
-rw-r--r--fs/xfs/xfs_export.c3
-rw-r--r--fs/xfs/xfs_file.c2
228 files changed, 8967 insertions, 4538 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 392c5dac1981..d934f04e7736 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -184,10 +184,20 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
v9ses->afid = option;
break;
case Opt_uname:
- match_strlcpy(v9ses->uname, &args[0], PATH_MAX);
+ kfree(v9ses->uname);
+ v9ses->uname = match_strdup(&args[0]);
+ if (!v9ses->uname) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
break;
case Opt_remotename:
- match_strlcpy(v9ses->aname, &args[0], PATH_MAX);
+ kfree(v9ses->aname);
+ v9ses->aname = match_strdup(&args[0]);
+ if (!v9ses->aname) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
break;
case Opt_nodevmap:
v9ses->nodev = 1;
@@ -287,21 +297,21 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
struct p9_fid *fid;
int rc;
- v9ses->uname = __getname();
+ v9ses->uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL);
if (!v9ses->uname)
return ERR_PTR(-ENOMEM);
- v9ses->aname = __getname();
+ v9ses->aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL);
if (!v9ses->aname) {
- __putname(v9ses->uname);
+ kfree(v9ses->uname);
return ERR_PTR(-ENOMEM);
}
init_rwsem(&v9ses->rename_sem);
rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY);
if (rc) {
- __putname(v9ses->aname);
- __putname(v9ses->uname);
+ kfree(v9ses->aname);
+ kfree(v9ses->uname);
return ERR_PTR(rc);
}
@@ -309,8 +319,6 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
list_add(&v9ses->slist, &v9fs_sessionlist);
spin_unlock(&v9fs_sessionlist_lock);
- strcpy(v9ses->uname, V9FS_DEFUSER);
- strcpy(v9ses->aname, V9FS_DEFANAME);
v9ses->uid = ~0;
v9ses->dfltuid = V9FS_DEFUID;
v9ses->dfltgid = V9FS_DEFGID;
@@ -412,8 +420,8 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
kfree(v9ses->cachetag);
}
#endif
- __putname(v9ses->uname);
- __putname(v9ses->aname);
+ kfree(v9ses->uname);
+ kfree(v9ses->aname);
bdi_destroy(&v9ses->bdi);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index dd6f7ee1e312..c2483e97beee 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -738,6 +738,7 @@ v9fs_cached_file_write(struct file *filp, const char __user * data,
static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = v9fs_vm_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index cbf9dbb1b2a2..890bed538f9b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1276,12 +1276,12 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
}
/* copy extension buffer into buffer */
- strncpy(buffer, st->extension, buflen);
+ retval = min(strlen(st->extension)+1, (size_t)buflen);
+ memcpy(buffer, st->extension, retval);
- p9_debug(P9_DEBUG_VFS, "%s -> %s (%s)\n",
- dentry->d_name.name, st->extension, buffer);
+ p9_debug(P9_DEBUG_VFS, "%s -> %s (%.*s)\n",
+ dentry->d_name.name, st->extension, buflen, buffer);
- retval = strnlen(buffer, buflen);
done:
p9stat_free(st);
kfree(st);
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 022574202749..0efd1524b977 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -164,3 +164,11 @@ config BINFMT_MISC
You may say M here for module support and later load the module when
you have use for it; the module is called binfmt_misc. If you
don't know what to answer at this point, say Y.
+
+config COREDUMP
+ bool "Enable core dump support" if EXPERT
+ default y
+ help
+ This option enables support for performing core dumps. You almost
+ certainly want to say Y here. Not necessary on systems that never
+ need debugging or only ever run flawless code.
diff --git a/fs/Makefile b/fs/Makefile
index 8938f8250320..1d7af79288a0 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \
attr.o bad_inode.o file.o filesystems.o namespace.o \
seq_file.o xattr.o libfs.o fs-writeback.o \
pnode.o drop_caches.o splice.o sync.o utimes.o \
- stack.o fs_struct.o statfs.o coredump.o
+ stack.o fs_struct.o statfs.o
ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_FS_MBCACHE) += mbcache.o
obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
obj-$(CONFIG_NFS_COMMON) += nfs_common/
obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
+obj-$(CONFIG_COREDUMP) += coredump.o
obj-$(CONFIG_FHANDLE) += fhandle.o
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index e7396cfdb109..91b11650722e 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
status = autofs4_mount_wait(dentry);
- if (status)
- return ERR_PTR(status);
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_PENDING;
+ if (status) {
+ spin_unlock(&sbi->fs_lock);
+ return ERR_PTR(status);
+ }
}
done:
if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index d146e181d10d..0e7a6f81ae36 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -32,31 +32,8 @@
static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout_library(struct file*);
-static int aout_core_dump(struct coredump_params *cprm);
-
-static struct linux_binfmt aout_format = {
- .module = THIS_MODULE,
- .load_binary = load_aout_binary,
- .load_shlib = load_aout_library,
- .core_dump = aout_core_dump,
- .min_coredump = PAGE_SIZE
-};
-
-#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
-
-static int set_brk(unsigned long start, unsigned long end)
-{
- start = PAGE_ALIGN(start);
- end = PAGE_ALIGN(end);
- if (end > start) {
- unsigned long addr;
- addr = vm_brk(start, end - start);
- if (BAD_ADDR(addr))
- return addr;
- }
- return 0;
-}
+#ifdef CONFIG_COREDUMP
/*
* Routine writes a core dump image in the current directory.
* Currently only a stub-function.
@@ -66,7 +43,6 @@ static int set_brk(unsigned long start, unsigned long end)
* field, which also makes sure the core-dumps won't be recursive if the
* dumping of the process results in another error..
*/
-
static int aout_core_dump(struct coredump_params *cprm)
{
struct file *file = cprm->file;
@@ -89,7 +65,7 @@ static int aout_core_dump(struct coredump_params *cprm)
current->flags |= PF_DUMPCORE;
strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
dump.u_ar0 = offsetof(struct user, regs);
- dump.signal = cprm->signr;
+ dump.signal = cprm->siginfo->si_signo;
aout_dump_thread(cprm->regs, &dump);
/* If the size of the dump file exceeds the rlimit, then see what would happen
@@ -135,6 +111,32 @@ end_coredump:
set_fs(fs);
return has_dumped;
}
+#else
+#define aout_core_dump NULL
+#endif
+
+static struct linux_binfmt aout_format = {
+ .module = THIS_MODULE,
+ .load_binary = load_aout_binary,
+ .load_shlib = load_aout_library,
+ .core_dump = aout_core_dump,
+ .min_coredump = PAGE_SIZE
+};
+
+#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
+
+static int set_brk(unsigned long start, unsigned long end)
+{
+ start = PAGE_ALIGN(start);
+ end = PAGE_ALIGN(end);
+ if (end > start) {
+ unsigned long addr;
+ addr = vm_brk(start, end - start);
+ if (BAD_ADDR(addr))
+ return addr;
+ }
+ return 0;
+}
/*
* create_aout_tables() parses the env- and arg-strings in new user
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 0225fddf49b7..fbd9f60bd763 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -27,6 +27,7 @@
#include <linux/compiler.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/random.h>
#include <linux/elf.h>
@@ -35,7 +36,13 @@
#include <asm/uaccess.h>
#include <asm/param.h>
#include <asm/page.h>
-#include <asm/exec.h>
+
+#ifndef user_long_t
+#define user_long_t long
+#endif
+#ifndef user_siginfo_t
+#define user_siginfo_t siginfo_t
+#endif
static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
static int load_elf_library(struct file *);
@@ -881,7 +888,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
}
if (elf_interpreter) {
- unsigned long uninitialized_var(interp_map_addr);
+ unsigned long interp_map_addr = 0;
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
@@ -1115,7 +1122,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
if (always_dump_vma(vma))
goto whole;
- if (vma->vm_flags & VM_NODUMP)
+ if (vma->vm_flags & VM_DONTDUMP)
return 0;
/* Hugetlb memory check */
@@ -1127,7 +1134,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
}
/* Do not dump I/O mapped devices or special mappings */
- if (vma->vm_flags & (VM_IO | VM_RESERVED))
+ if (vma->vm_flags & VM_IO)
return 0;
/* By default, dump shared memory if mapped from an anonymous file. */
@@ -1372,6 +1379,103 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
+static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
+ siginfo_t *siginfo)
+{
+ mm_segment_t old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
+ set_fs(old_fs);
+ fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
+}
+
+#define MAX_FILE_NOTE_SIZE (4*1024*1024)
+/*
+ * Format of NT_FILE note:
+ *
+ * long count -- how many files are mapped
+ * long page_size -- units for file_ofs
+ * array of [COUNT] elements of
+ * long start
+ * long end
+ * long file_ofs
+ * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
+ */
+static void fill_files_note(struct memelfnote *note)
+{
+ struct vm_area_struct *vma;
+ unsigned count, size, names_ofs, remaining, n;
+ user_long_t *data;
+ user_long_t *start_end_ofs;
+ char *name_base, *name_curpos;
+
+ /* *Estimated* file count and total data size needed */
+ count = current->mm->map_count;
+ size = count * 64;
+
+ names_ofs = (2 + 3 * count) * sizeof(data[0]);
+ alloc:
+ if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
+ goto err;
+ size = round_up(size, PAGE_SIZE);
+ data = vmalloc(size);
+ if (!data)
+ goto err;
+
+ start_end_ofs = data + 2;
+ name_base = name_curpos = ((char *)data) + names_ofs;
+ remaining = size - names_ofs;
+ count = 0;
+ for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+ struct file *file;
+ const char *filename;
+
+ file = vma->vm_file;
+ if (!file)
+ continue;
+ filename = d_path(&file->f_path, name_curpos, remaining);
+ if (IS_ERR(filename)) {
+ if (PTR_ERR(filename) == -ENAMETOOLONG) {
+ vfree(data);
+ size = size * 5 / 4;
+ goto alloc;
+ }
+ continue;
+ }
+
+ /* d_path() fills at the end, move name down */
+ /* n = strlen(filename) + 1: */
+ n = (name_curpos + remaining) - filename;
+ remaining = filename - name_curpos;
+ memmove(name_curpos, filename, n);
+ name_curpos += n;
+
+ *start_end_ofs++ = vma->vm_start;
+ *start_end_ofs++ = vma->vm_end;
+ *start_end_ofs++ = vma->vm_pgoff;
+ count++;
+ }
+
+ /* Now we know exact count of files, can store it */
+ data[0] = count;
+ data[1] = PAGE_SIZE;
+ /*
+ * Count usually is less than current->mm->map_count,
+ * we need to move filenames down.
+ */
+ n = current->mm->map_count - count;
+ if (n != 0) {
+ unsigned shift_bytes = n * 3 * sizeof(data[0]);
+ memmove(name_base - shift_bytes, name_base,
+ name_curpos - name_base);
+ name_curpos -= shift_bytes;
+ }
+
+ size = name_curpos - (char *)data;
+ fill_note(note, "CORE", NT_FILE, size, data);
+ err: ;
+}
+
#ifdef CORE_DUMP_USE_REGSET
#include <linux/regset.h>
@@ -1385,7 +1489,10 @@ struct elf_thread_core_info {
struct elf_note_info {
struct elf_thread_core_info *thread;
struct memelfnote psinfo;
+ struct memelfnote signote;
struct memelfnote auxv;
+ struct memelfnote files;
+ user_siginfo_t csigdata;
size_t size;
int thread_notes;
};
@@ -1480,7 +1587,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
- long signr, struct pt_regs *regs)
+ siginfo_t *siginfo, struct pt_regs *regs)
{
struct task_struct *dump_task = current;
const struct user_regset_view *view = task_user_regset_view(dump_task);
@@ -1550,7 +1657,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
* Now fill in each thread's information.
*/
for (t = info->thread; t != NULL; t = t->next)
- if (!fill_thread_core_info(t, view, signr, &info->size))
+ if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
return 0;
/*
@@ -1559,9 +1666,15 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
info->size += notesize(&info->psinfo);
+ fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
+ info->size += notesize(&info->signote);
+
fill_auxv_note(&info->auxv, current->mm);
info->size += notesize(&info->auxv);
+ fill_files_note(&info->files);
+ info->size += notesize(&info->files);
+
return 1;
}
@@ -1588,8 +1701,12 @@ static int write_note_info(struct elf_note_info *info,
if (first && !writenote(&info->psinfo, file, foffset))
return 0;
+ if (first && !writenote(&info->signote, file, foffset))
+ return 0;
if (first && !writenote(&info->auxv, file, foffset))
return 0;
+ if (first && !writenote(&info->files, file, foffset))
+ return 0;
for (i = 1; i < info->thread_notes; ++i)
if (t->notes[i].data &&
@@ -1616,6 +1733,7 @@ static void free_note_info(struct elf_note_info *info)
kfree(t);
}
kfree(info->psinfo.data);
+ vfree(info->files.data);
}
#else
@@ -1681,6 +1799,7 @@ struct elf_note_info {
#ifdef ELF_CORE_COPY_XFPREGS
elf_fpxregset_t *xfpu;
#endif
+ user_siginfo_t csigdata;
int thread_status_size;
int numnote;
};
@@ -1690,8 +1809,8 @@ static int elf_note_info_init(struct elf_note_info *info)
memset(info, 0, sizeof(*info));
INIT_LIST_HEAD(&info->thread_list);
- /* Allocate space for six ELF notes */
- info->notes = kmalloc(6 * sizeof(struct memelfnote), GFP_KERNEL);
+ /* Allocate space for ELF notes */
+ info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
if (!info->notes)
return 0;
info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
@@ -1713,14 +1832,14 @@ static int elf_note_info_init(struct elf_note_info *info)
static int fill_note_info(struct elfhdr *elf, int phdrs,
struct elf_note_info *info,
- long signr, struct pt_regs *regs)
+ siginfo_t *siginfo, struct pt_regs *regs)
{
struct list_head *t;
if (!elf_note_info_init(info))
return 0;
- if (signr) {
+ if (siginfo->si_signo) {
struct core_thread *ct;
struct elf_thread_status *ets;
@@ -1738,13 +1857,13 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
int sz;
ets = list_entry(t, struct elf_thread_status, list);
- sz = elf_dump_thread_status(signr, ets);
+ sz = elf_dump_thread_status(siginfo->si_signo, ets);
info->thread_status_size += sz;
}
}
/* now collect the dump for the current */
memset(info->prstatus, 0, sizeof(*info->prstatus));
- fill_prstatus(info->prstatus, current, signr);
+ fill_prstatus(info->prstatus, current, siginfo->si_signo);
elf_core_copy_regs(&info->prstatus->pr_reg, regs);
/* Set up header */
@@ -1761,9 +1880,11 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
sizeof(*info->psinfo), info->psinfo);
- info->numnote = 2;
+ fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
+ fill_auxv_note(info->notes + 3, current->mm);
+ fill_files_note(info->notes + 4);
- fill_auxv_note(&info->notes[info->numnote++], current->mm);
+ info->numnote = 5;
/* Try to dump the FPU. */
info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
@@ -1825,6 +1946,9 @@ static void free_note_info(struct elf_note_info *info)
kfree(list_entry(tmp, struct elf_thread_status, list));
}
+ /* Free data allocated by fill_files_note(): */
+ vfree(info->notes[4].data);
+
kfree(info->prstatus);
kfree(info->psinfo);
kfree(info->notes);
@@ -1951,7 +2075,7 @@ static int elf_core_dump(struct coredump_params *cprm)
* Collect all the non-memory information about the process for the
* notes. This also sets up the file header.
*/
- if (!fill_note_info(elf, e_phnum, &info, cprm->signr, cprm->regs))
+ if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
goto cleanup;
has_dumped = 1;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 3d77cf81ba3c..a46049154107 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -39,7 +39,6 @@
#include <asm/uaccess.h>
#include <asm/param.h>
#include <asm/pgalloc.h>
-#include <asm/exec.h>
typedef char *elf_caddr_t;
@@ -1205,7 +1204,7 @@ static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
int dump_ok;
/* Do not dump I/O mapped devices or special mappings */
- if (vma->vm_flags & (VM_IO | VM_RESERVED)) {
+ if (vma->vm_flags & VM_IO) {
kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
return 0;
}
@@ -1642,7 +1641,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
goto cleanup;
#endif
- if (cprm->signr) {
+ if (cprm->siginfo->si_signo) {
struct core_thread *ct;
struct elf_thread_status *tmp;
@@ -1661,13 +1660,13 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
int sz;
tmp = list_entry(t, struct elf_thread_status, list);
- sz = elf_dump_thread_status(cprm->signr, tmp);
+ sz = elf_dump_thread_status(cprm->siginfo->si_signo, tmp);
thread_status_size += sz;
}
}
/* now collect the dump for the current */
- fill_prstatus(prstatus, current, cprm->signr);
+ fill_prstatus(prstatus, current, cprm->siginfo->si_signo);
elf_core_copy_regs(&prstatus->pr_reg, cprm->regs);
segs = current->mm->map_count;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 178cb70acc26..e280352b28f9 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -107,7 +107,7 @@ static struct linux_binfmt flat_format = {
static int flat_core_dump(struct coredump_params *cprm)
{
printk("Process %s:%d received signr %d and should have core dumped\n",
- current->comm, current->pid, (int) cprm->signr);
+ current->comm, current->pid, (int) cprm->siginfo->si_signo);
return(1);
}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index e85c04b9f61c..a3f28f331b2b 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -70,23 +70,25 @@ static inline int use_bip_pool(unsigned int idx)
}
/**
- * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
- * @bs: bio_set to allocate from
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
-struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs,
- struct bio_set *bs)
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs)
{
struct bio_integrity_payload *bip;
unsigned int idx = vecs_to_idx(nr_vecs);
+ struct bio_set *bs = bio->bi_pool;
+
+ if (!bs)
+ bs = fs_bio_set;
BUG_ON(bio == NULL);
bip = NULL;
@@ -114,37 +116,22 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
return bip;
}
-EXPORT_SYMBOL(bio_integrity_alloc_bioset);
-
-/**
- * bio_integrity_alloc - Allocate integrity payload and attach it to bio
- * @bio: bio to attach integrity metadata to
- * @gfp_mask: Memory allocation mask
- * @nr_vecs: Number of integrity metadata scatter-gather elements
- *
- * Description: This function prepares a bio for attaching integrity
- * metadata. nr_vecs specifies the maximum number of pages containing
- * integrity metadata that can be attached.
- */
-struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs)
-{
- return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
-}
EXPORT_SYMBOL(bio_integrity_alloc);
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
- * @bs: bio_set this bio was allocated from
*
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
-void bio_integrity_free(struct bio *bio, struct bio_set *bs)
+void bio_integrity_free(struct bio *bio)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct bio_set *bs = bio->bi_pool;
+
+ if (!bs)
+ bs = fs_bio_set;
BUG_ON(bip == NULL);
@@ -730,19 +717,18 @@ EXPORT_SYMBOL(bio_integrity_split);
* @bio: New bio
* @bio_src: Original bio
* @gfp_mask: Memory allocation mask
- * @bs: bio_set to allocate bip from
*
* Description: Called to allocate a bip when cloning a bio
*/
int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
- gfp_t gfp_mask, struct bio_set *bs)
+ gfp_t gfp_mask)
{
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
BUG_ON(bip_src == NULL);
- bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
+ bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
if (bip == NULL)
return -EIO;
diff --git a/fs/bio.c b/fs/bio.c
index 71072ab99128..9298c65ad9c7 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
* IO code that does not need private memory pools.
*/
struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
/*
* Our slab pool management
@@ -233,26 +234,37 @@ fallback:
return bvl;
}
-void bio_free(struct bio *bio, struct bio_set *bs)
+static void __bio_free(struct bio *bio)
{
+ bio_disassociate_task(bio);
+
+ if (bio_integrity(bio))
+ bio_integrity_free(bio);
+}
+
+static void bio_free(struct bio *bio)
+{
+ struct bio_set *bs = bio->bi_pool;
void *p;
- if (bio_has_allocated_vec(bio))
- bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
+ __bio_free(bio);
- if (bio_integrity(bio))
- bio_integrity_free(bio, bs);
+ if (bs) {
+ if (bio_has_allocated_vec(bio))
+ bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
- /*
- * If we have front padding, adjust the bio pointer before freeing
- */
- p = bio;
- if (bs->front_pad)
+ /*
+ * If we have front padding, adjust the bio pointer before freeing
+ */
+ p = bio;
p -= bs->front_pad;
- mempool_free(p, bs->bio_pool);
+ mempool_free(p, bs->bio_pool);
+ } else {
+ /* Bio was allocated by bio_kmalloc() */
+ kfree(bio);
+ }
}
-EXPORT_SYMBOL(bio_free);
void bio_init(struct bio *bio)
{
@@ -263,48 +275,85 @@ void bio_init(struct bio *bio)
EXPORT_SYMBOL(bio_init);
/**
+ * bio_reset - reinitialize a bio
+ * @bio: bio to reset
+ *
+ * Description:
+ * After calling bio_reset(), @bio will be in the same state as a freshly
+ * allocated bio returned bio bio_alloc_bioset() - the only fields that are
+ * preserved are the ones that are initialized by bio_alloc_bioset(). See
+ * comment in struct bio.
+ */
+void bio_reset(struct bio *bio)
+{
+ unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
+
+ __bio_free(bio);
+
+ memset(bio, 0, BIO_RESET_BYTES);
+ bio->bi_flags = flags|(1 << BIO_UPTODATE);
+}
+EXPORT_SYMBOL(bio_reset);
+
+/**
* bio_alloc_bioset - allocate a bio for I/O
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
* @bs: the bio_set to allocate from.
*
* Description:
- * bio_alloc_bioset will try its own mempool to satisfy the allocation.
- * If %__GFP_WAIT is set then we will block on the internal pool waiting
- * for a &struct bio to become free.
+ * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ * backed by the @bs's mempool.
*
- * Note that the caller must set ->bi_destructor on successful return
- * of a bio, to do the appropriate freeing of the bio once the reference
- * count drops to zero.
- **/
+ * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ * able to allocate a bio. This is due to the mempool guarantees. To make this
+ * work, callers must never allocate more than 1 bio at a time from this pool.
+ * Callers that need to allocate more than 1 bio must always submit the
+ * previously allocated bio for IO before attempting to allocate a new one.
+ * Failure to do so can cause deadlocks under memory pressure.
+ *
+ * RETURNS:
+ * Pointer to new bio on success, NULL on failure.
+ */
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
+ unsigned front_pad;
+ unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
- p = mempool_alloc(bs->bio_pool, gfp_mask);
+ if (!bs) {
+ if (nr_iovecs > UIO_MAXIOV)
+ return NULL;
+
+ p = kmalloc(sizeof(struct bio) +
+ nr_iovecs * sizeof(struct bio_vec),
+ gfp_mask);
+ front_pad = 0;
+ inline_vecs = nr_iovecs;
+ } else {
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
+ front_pad = bs->front_pad;
+ inline_vecs = BIO_INLINE_VECS;
+ }
+
if (unlikely(!p))
return NULL;
- bio = p + bs->front_pad;
+ bio = p + front_pad;
bio_init(bio);
- if (unlikely(!nr_iovecs))
- goto out_set;
-
- if (nr_iovecs <= BIO_INLINE_VECS) {
- bvl = bio->bi_inline_vecs;
- nr_iovecs = BIO_INLINE_VECS;
- } else {
+ if (nr_iovecs > inline_vecs) {
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
if (unlikely(!bvl))
goto err_free;
-
- nr_iovecs = bvec_nr_vecs(idx);
+ } else if (nr_iovecs) {
+ bvl = bio->bi_inline_vecs;
}
-out_set:
+
+ bio->bi_pool = bs;
bio->bi_flags |= idx << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
@@ -316,80 +365,6 @@ err_free:
}
EXPORT_SYMBOL(bio_alloc_bioset);
-static void bio_fs_destructor(struct bio *bio)
-{
- bio_free(bio, fs_bio_set);
-}
-
-/**
- * bio_alloc - allocate a new bio, memory pool backed
- * @gfp_mask: allocation mask to use
- * @nr_iovecs: number of iovecs
- *
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * allocated bio for IO before attempting to allocate a new one. Failure to
- * do so can cause livelocks under memory pressure.
- *
- * RETURNS:
- * Pointer to new bio on success, NULL on failure.
- */
-struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
-
- if (bio)
- bio->bi_destructor = bio_fs_destructor;
-
- return bio;
-}
-EXPORT_SYMBOL(bio_alloc);
-
-static void bio_kmalloc_destructor(struct bio *bio)
-{
- if (bio_integrity(bio))
- bio_integrity_free(bio, fs_bio_set);
- kfree(bio);
-}
-
-/**
- * bio_kmalloc - allocate a bio for I/O using kmalloc()
- * @gfp_mask: the GFP_ mask given to the slab allocator
- * @nr_iovecs: number of iovecs to pre-allocate
- *
- * Description:
- * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
- * %__GFP_WAIT, the allocation is guaranteed to succeed.
- *
- **/
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- struct bio *bio;
-
- if (nr_iovecs > UIO_MAXIOV)
- return NULL;
-
- bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
- gfp_mask);
- if (unlikely(!bio))
- return NULL;
-
- bio_init(bio);
- bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
- bio->bi_max_vecs = nr_iovecs;
- bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_destructor = bio_kmalloc_destructor;
-
- return bio;
-}
-EXPORT_SYMBOL(bio_kmalloc);
-
void zero_fill_bio(struct bio *bio)
{
unsigned long flags;
@@ -420,11 +395,8 @@ void bio_put(struct bio *bio)
/*
* last put frees it
*/
- if (atomic_dec_and_test(&bio->bi_cnt)) {
- bio_disassociate_task(bio);
- bio->bi_next = NULL;
- bio->bi_destructor(bio);
- }
+ if (atomic_dec_and_test(&bio->bi_cnt))
+ bio_free(bio);
}
EXPORT_SYMBOL(bio_put);
@@ -466,26 +438,28 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
EXPORT_SYMBOL(__bio_clone);
/**
- * bio_clone - clone a bio
+ * bio_clone_bioset - clone a bio
* @bio: bio to clone
* @gfp_mask: allocation priority
+ * @bs: bio_set to allocate from
*
* Like __bio_clone, only also allocates the returned bio
*/
-struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
+struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
+ struct bio_set *bs)
{
- struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
+ struct bio *b;
+ b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
if (!b)
return NULL;
- b->bi_destructor = bio_fs_destructor;
__bio_clone(b, bio);
if (bio_integrity(bio)) {
int ret;
- ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
+ ret = bio_integrity_clone(b, bio, gfp_mask);
if (ret < 0) {
bio_put(b);
@@ -495,7 +469,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
return b;
}
-EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_clone_bioset);
/**
* bio_get_nr_vecs - return approx number of vecs
@@ -1501,7 +1475,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
bi->bi_sector + first_sectors);
- BUG_ON(bi->bi_vcnt != 1);
+ BUG_ON(bi->bi_vcnt != 1 && bi->bi_vcnt != 0);
BUG_ON(bi->bi_idx != 0);
atomic_set(&bp->cnt, 3);
bp->error = 0;
@@ -1511,17 +1485,22 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
bp->bio2.bi_size -= first_sectors << 9;
bp->bio1.bi_size = first_sectors << 9;
- bp->bv1 = bi->bi_io_vec[0];
- bp->bv2 = bi->bi_io_vec[0];
- bp->bv2.bv_offset += first_sectors << 9;
- bp->bv2.bv_len -= first_sectors << 9;
- bp->bv1.bv_len = first_sectors << 9;
+ if (bi->bi_vcnt != 0) {
+ bp->bv1 = bi->bi_io_vec[0];
+ bp->bv2 = bi->bi_io_vec[0];
+
+ if (bio_is_rw(bi)) {
+ bp->bv2.bv_offset += first_sectors << 9;
+ bp->bv2.bv_len -= first_sectors << 9;
+ bp->bv1.bv_len = first_sectors << 9;
+ }
- bp->bio1.bi_io_vec = &bp->bv1;
- bp->bio2.bi_io_vec = &bp->bv2;
+ bp->bio1.bi_io_vec = &bp->bv1;
+ bp->bio2.bi_io_vec = &bp->bv2;
- bp->bio1.bi_max_vecs = 1;
- bp->bio2.bi_max_vecs = 1;
+ bp->bio1.bi_max_vecs = 1;
+ bp->bio2.bi_max_vecs = 1;
+ }
bp->bio1.bi_end_io = bio_pair_end_1;
bp->bio2.bi_end_io = bio_pair_end_2;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 38e721b35d45..1a1e5e3b1eaf 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -116,6 +116,8 @@ EXPORT_SYMBOL(invalidate_bdev);
int set_blocksize(struct block_device *bdev, int size)
{
+ struct address_space *mapping;
+
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
return -EINVAL;
@@ -124,6 +126,19 @@ int set_blocksize(struct block_device *bdev, int size)
if (size < bdev_logical_block_size(bdev))
return -EINVAL;
+ /* Prevent starting I/O or mapping the device */
+ percpu_down_write(&bdev->bd_block_size_semaphore);
+
+ /* Check that the block device is not memory mapped */
+ mapping = bdev->bd_inode->i_mapping;
+ mutex_lock(&mapping->i_mmap_mutex);
+ if (mapping_mapped(mapping)) {
+ mutex_unlock(&mapping->i_mmap_mutex);
+ percpu_up_write(&bdev->bd_block_size_semaphore);
+ return -EBUSY;
+ }
+ mutex_unlock(&mapping->i_mmap_mutex);
+
/* Don't change the size if it is same as current */
if (bdev->bd_block_size != size) {
sync_blockdev(bdev);
@@ -131,6 +146,9 @@ int set_blocksize(struct block_device *bdev, int size)
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
+
+ percpu_up_write(&bdev->bd_block_size_semaphore);
+
return 0;
}
@@ -441,6 +459,12 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
if (!ei)
return NULL;
+
+ if (unlikely(percpu_init_rwsem(&ei->bdev.bd_block_size_semaphore))) {
+ kmem_cache_free(bdev_cachep, ei);
+ return NULL;
+ }
+
return &ei->vfs_inode;
}
@@ -449,6 +473,8 @@ static void bdev_i_callback(struct rcu_head *head)
struct inode *inode = container_of(head, struct inode, i_rcu);
struct bdev_inode *bdi = BDEV_I(inode);
+ percpu_free_rwsem(&bdi->bdev.bd_block_size_semaphore);
+
kmem_cache_free(bdev_cachep, bdi);
}
@@ -1567,6 +1593,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return blkdev_ioctl(bdev, mode, cmd, arg);
}
+ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_aio_read);
+
/*
* Write data to the block device. Only intended for the block device itself
* and the raw driver which basically is a fake block device.
@@ -1578,12 +1620,16 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
blk_start_plug(&plug);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
@@ -1592,11 +1638,62 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err < 0 && ret > 0)
ret = err;
}
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
blk_finish_plug(&plug);
+
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_aio_write);
+static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_mmap(file, vma);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
+static ssize_t blkdev_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_splice_read(file, ppos, pipe, len, flags);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
+static ssize_t blkdev_splice_write(struct pipe_inode_info *pipe,
+ struct file *file, loff_t *ppos, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_splice_write(pipe, file, ppos, len, flags);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
+
/*
* Try to release a page associated with block device when the system
* is under memory pressure.
@@ -1627,16 +1724,16 @@ const struct file_operations def_blk_fops = {
.llseek = block_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
+ .aio_read = blkdev_aio_read,
.aio_write = blkdev_aio_write,
- .mmap = generic_file_mmap,
+ .mmap = blkdev_mmap,
.fsync = blkdev_fsync,
.unlocked_ioctl = block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_blkdev_ioctl,
#endif
- .splice_read = generic_file_splice_read,
- .splice_write = generic_file_splice_write,
+ .splice_read = blkdev_splice_read,
+ .splice_write = blkdev_splice_write,
};
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ff6475f409d6..208d8aa5b07e 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -16,6 +16,7 @@
* Boston, MA 021110-1307, USA.
*/
+#include <linux/vmalloc.h>
#include "ctree.h"
#include "disk-io.h"
#include "backref.h"
@@ -231,7 +232,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
}
if (!ret) {
ret = ulist_add(parents, eb->start,
- (unsigned long)eie, GFP_NOFS);
+ (uintptr_t)eie, GFP_NOFS);
if (ret < 0)
break;
if (!extent_item_pos) {
@@ -282,9 +283,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
goto out;
}
- rcu_read_lock();
- root_level = btrfs_header_level(root->node);
- rcu_read_unlock();
+ root_level = btrfs_old_root_level(root, time_seq);
if (root_level + 1 == level)
goto out;
@@ -363,8 +362,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
ULIST_ITER_INIT(&uiter);
node = ulist_next(parents, &uiter);
ref->parent = node ? node->val : 0;
- ref->inode_list =
- node ? (struct extent_inode_elem *)node->aux : 0;
+ ref->inode_list = node ?
+ (struct extent_inode_elem *)(uintptr_t)node->aux : 0;
/* additional parents require new refs being added here */
while ((node = ulist_next(parents, &uiter))) {
@@ -375,8 +374,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
}
memcpy(new_ref, ref, sizeof(*ref));
new_ref->parent = node->val;
- new_ref->inode_list =
- (struct extent_inode_elem *)node->aux;
+ new_ref->inode_list = (struct extent_inode_elem *)
+ (uintptr_t)node->aux;
list_add(&new_ref->list, &ref->list);
}
ulist_reinit(parents);
@@ -914,8 +913,8 @@ again:
free_extent_buffer(eb);
}
ret = ulist_add_merge(refs, ref->parent,
- (unsigned long)ref->inode_list,
- (unsigned long *)&eie, GFP_NOFS);
+ (uintptr_t)ref->inode_list,
+ (u64 *)&eie, GFP_NOFS);
if (!ret && extent_item_pos) {
/*
* we've recorded that parent, so we must extend
@@ -959,7 +958,7 @@ static void free_leaf_list(struct ulist *blocks)
while ((node = ulist_next(blocks, &uiter))) {
if (!node->aux)
continue;
- eie = (struct extent_inode_elem *)node->aux;
+ eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
for (; eie; eie = eie_next) {
eie_next = eie->next;
kfree(eie);
@@ -1108,44 +1107,97 @@ static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
found_key);
}
-/*
- * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
- * of the path are separated by '/' and the path is guaranteed to be
- * 0-terminated. the path is only given within the current file system.
- * Therefore, it never starts with a '/'. the caller is responsible to provide
- * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
- * the start point of the resulting string is returned. this pointer is within
- * dest, normally.
- * in case the path buffer would overflow, the pointer is decremented further
- * as if output was written to the buffer, though no more output is actually
- * generated. that way, the caller can determine how much space would be
- * required for the path to fit into the buffer. in that case, the returned
- * value will be smaller than dest. callers must check this!
- */
-char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
- struct btrfs_inode_ref *iref,
- struct extent_buffer *eb_in, u64 parent,
- char *dest, u32 size)
+int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
+ u64 start_off, struct btrfs_path *path,
+ struct btrfs_inode_extref **ret_extref,
+ u64 *found_off)
+{
+ int ret, slot;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_inode_extref *extref;
+ struct extent_buffer *leaf;
+ unsigned long ptr;
+
+ key.objectid = inode_objectid;
+ btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
+ key.offset = start_off;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ while (1) {
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ if (slot >= btrfs_header_nritems(leaf)) {
+ /*
+ * If the item at offset is not found,
+ * btrfs_search_slot will point us to the slot
+ * where it should be inserted. In our case
+ * that will be the slot directly before the
+ * next INODE_REF_KEY_V2 item. In the case
+ * that we're pointing to the last slot in a
+ * leaf, we must move one leaf over.
+ */
+ ret = btrfs_next_leaf(root, path);
+ if (ret) {
+ if (ret >= 1)
+ ret = -ENOENT;
+ break;
+ }
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
+
+ /*
+ * Check that we're still looking at an extended ref key for
+ * this particular objectid. If we have different
+ * objectid or type then there are no more to be found
+ * in the tree and we can exit.
+ */
+ ret = -ENOENT;
+ if (found_key.objectid != inode_objectid)
+ break;
+ if (btrfs_key_type(&found_key) != BTRFS_INODE_EXTREF_KEY)
+ break;
+
+ ret = 0;
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ extref = (struct btrfs_inode_extref *)ptr;
+ *ret_extref = extref;
+ if (found_off)
+ *found_off = found_key.offset;
+ break;
+ }
+
+ return ret;
+}
+
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ u32 name_len, unsigned long name_off,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size)
{
- u32 len;
int slot;
u64 next_inum;
int ret;
- s64 bytes_left = size - 1;
+ s64 bytes_left = ((s64)size) - 1;
struct extent_buffer *eb = eb_in;
struct btrfs_key found_key;
int leave_spinning = path->leave_spinning;
+ struct btrfs_inode_ref *iref;
if (bytes_left >= 0)
dest[bytes_left] = '\0';
path->leave_spinning = 1;
while (1) {
- len = btrfs_inode_ref_name_len(eb, iref);
- bytes_left -= len;
+ bytes_left -= name_len;
if (bytes_left >= 0)
read_extent_buffer(eb, dest + bytes_left,
- (unsigned long)(iref + 1), len);
+ name_off, name_len);
if (eb != eb_in) {
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
@@ -1155,6 +1207,7 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
ret = -ENOENT;
if (ret)
break;
+
next_inum = found_key.offset;
/* regular exit ahead */
@@ -1170,8 +1223,11 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
}
btrfs_release_path(path);
-
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+
+ name_len = btrfs_inode_ref_name_len(eb, iref);
+ name_off = (unsigned long)(iref + 1);
+
parent = next_inum;
--bytes_left;
if (bytes_left >= 0)
@@ -1188,12 +1244,39 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
}
/*
+ * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
+ * of the path are separated by '/' and the path is guaranteed to be
+ * 0-terminated. the path is only given within the current file system.
+ * Therefore, it never starts with a '/'. the caller is responsible to provide
+ * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
+ * the start point of the resulting string is returned. this pointer is within
+ * dest, normally.
+ * in case the path buffer would overflow, the pointer is decremented further
+ * as if output was written to the buffer, though no more output is actually
+ * generated. that way, the caller can determine how much space would be
+ * required for the path to fit into the buffer. in that case, the returned
+ * value will be smaller than dest. callers must check this!
+ */
+char *btrfs_iref_to_path(struct btrfs_root *fs_root,
+ struct btrfs_path *path,
+ struct btrfs_inode_ref *iref,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size)
+{
+ return btrfs_ref_to_path(fs_root, path,
+ btrfs_inode_ref_name_len(eb_in, iref),
+ (unsigned long)(iref + 1),
+ eb_in, parent, dest, size);
+}
+
+/*
* this makes the path point to (logical EXTENT_ITEM *)
* returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
* tree blocks and <0 on error.
*/
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
- struct btrfs_path *path, struct btrfs_key *found_key)
+ struct btrfs_path *path, struct btrfs_key *found_key,
+ u64 *flags_ret)
{
int ret;
u64 flags;
@@ -1237,10 +1320,17 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
(unsigned long long)found_key->objectid,
(unsigned long long)found_key->offset,
(unsigned long long)flags, item_size);
- if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
- return BTRFS_EXTENT_FLAG_TREE_BLOCK;
- if (flags & BTRFS_EXTENT_FLAG_DATA)
- return BTRFS_EXTENT_FLAG_DATA;
+
+ WARN_ON(!flags_ret);
+ if (flags_ret) {
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
+ else if (flags & BTRFS_EXTENT_FLAG_DATA)
+ *flags_ret = BTRFS_EXTENT_FLAG_DATA;
+ else
+ BUG_ON(1);
+ return 0;
+ }
return -EIO;
}
@@ -1404,12 +1494,13 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ULIST_ITER_INIT(&root_uiter);
while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
pr_debug("root %llu references leaf %llu, data list "
- "%#lx\n", root_node->val, ref_node->val,
- ref_node->aux);
- ret = iterate_leaf_refs(
- (struct extent_inode_elem *)ref_node->aux,
- root_node->val, extent_item_objectid,
- iterate, ctx);
+ "%#llx\n", root_node->val, ref_node->val,
+ (long long)ref_node->aux);
+ ret = iterate_leaf_refs((struct extent_inode_elem *)
+ (uintptr_t)ref_node->aux,
+ root_node->val,
+ extent_item_objectid,
+ iterate, ctx);
}
ulist_free(roots);
roots = NULL;
@@ -1432,15 +1523,15 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
{
int ret;
u64 extent_item_pos;
+ u64 flags = 0;
struct btrfs_key found_key;
int search_commit_root = path->search_commit_root;
- ret = extent_from_logical(fs_info, logical, path,
- &found_key);
+ ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
btrfs_release_path(path);
if (ret < 0)
return ret;
- if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
return -EINVAL;
extent_item_pos = logical - found_key.objectid;
@@ -1451,9 +1542,12 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
return ret;
}
-static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
- struct btrfs_path *path,
- iterate_irefs_t *iterate, void *ctx)
+typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
+ struct extent_buffer *eb, void *ctx);
+
+static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
+ struct btrfs_path *path,
+ iterate_irefs_t *iterate, void *ctx)
{
int ret = 0;
int slot;
@@ -1470,7 +1564,7 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
while (!ret) {
path->leave_spinning = 1;
ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
- &found_key);
+ &found_key);
if (ret < 0)
break;
if (ret) {
@@ -1498,7 +1592,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
"tree %llu\n", cur,
(unsigned long long)found_key.objectid,
(unsigned long long)fs_root->objectid);
- ret = iterate(parent, iref, eb, ctx);
+ ret = iterate(parent, name_len,
+ (unsigned long)(iref + 1), eb, ctx);
if (ret)
break;
len = sizeof(*iref) + name_len;
@@ -1513,12 +1608,98 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
return ret;
}
+static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ struct btrfs_path *path,
+ iterate_irefs_t *iterate, void *ctx)
+{
+ int ret;
+ int slot;
+ u64 offset = 0;
+ u64 parent;
+ int found = 0;
+ struct extent_buffer *eb;
+ struct btrfs_inode_extref *extref;
+ struct extent_buffer *leaf;
+ u32 item_size;
+ u32 cur_offset;
+ unsigned long ptr;
+
+ while (1) {
+ ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
+ &offset);
+ if (ret < 0)
+ break;
+ if (ret) {
+ ret = found ? 0 : -ENOENT;
+ break;
+ }
+ ++found;
+
+ slot = path->slots[0];
+ eb = path->nodes[0];
+ /* make sure we can use eb after releasing the path */
+ atomic_inc(&eb->refs);
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ btrfs_release_path(path);
+
+ leaf = path->nodes[0];
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+ cur_offset = 0;
+
+ while (cur_offset < item_size) {
+ u32 name_len;
+
+ extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
+ parent = btrfs_inode_extref_parent(eb, extref);
+ name_len = btrfs_inode_extref_name_len(eb, extref);
+ ret = iterate(parent, name_len,
+ (unsigned long)&extref->name, eb, ctx);
+ if (ret)
+ break;
+
+ cur_offset += btrfs_inode_extref_name_len(leaf, extref);
+ cur_offset += sizeof(*extref);
+ }
+ btrfs_tree_read_unlock_blocking(eb);
+ free_extent_buffer(eb);
+
+ offset++;
+ }
+
+ btrfs_release_path(path);
+
+ return ret;
+}
+
+static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
+ struct btrfs_path *path, iterate_irefs_t *iterate,
+ void *ctx)
+{
+ int ret;
+ int found_refs = 0;
+
+ ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
+ if (!ret)
+ ++found_refs;
+ else if (ret != -ENOENT)
+ return ret;
+
+ ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
+ if (ret == -ENOENT && found_refs)
+ return 0;
+
+ return ret;
+}
+
/*
* returns 0 if the path could be dumped (probably truncated)
* returns <0 in case of an error
*/
-static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
- struct extent_buffer *eb, void *ctx)
+static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
+ struct extent_buffer *eb, void *ctx)
{
struct inode_fs_paths *ipath = ctx;
char *fspath;
@@ -1531,20 +1712,16 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
ipath->fspath->bytes_left - s_ptr : 0;
fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
- fspath = btrfs_iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
- inum, fspath_min, bytes_left);
+ fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
+ name_off, eb, inum, fspath_min, bytes_left);
if (IS_ERR(fspath))
return PTR_ERR(fspath);
if (fspath > fspath_min) {
- pr_debug("path resolved: %s\n", fspath);
ipath->fspath->val[i] = (u64)(unsigned long)fspath;
++ipath->fspath->elem_cnt;
ipath->fspath->bytes_left = fspath - fspath_min;
} else {
- pr_debug("missed path, not enough space. missing bytes: %lu, "
- "constructed so far: %s\n",
- (unsigned long)(fspath_min - fspath), fspath_min);
++ipath->fspath->elem_missed;
ipath->fspath->bytes_missing += fspath_min - fspath;
ipath->fspath->bytes_left = 0;
@@ -1566,7 +1743,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
{
return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
- inode_to_path, ipath);
+ inode_to_path, ipath);
}
struct btrfs_data_container *init_data_container(u32 total_bytes)
@@ -1575,7 +1752,7 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
- data = kmalloc(alloc_bytes, GFP_NOFS);
+ data = vmalloc(alloc_bytes);
if (!data)
return ERR_PTR(-ENOMEM);
@@ -1626,6 +1803,6 @@ void free_ipath(struct inode_fs_paths *ipath)
{
if (!ipath)
return;
- kfree(ipath->fspath);
+ vfree(ipath->fspath);
kfree(ipath);
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 032f4dc7eab8..d61feca79455 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -33,14 +33,13 @@ struct inode_fs_paths {
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
void *ctx);
-typedef int (iterate_irefs_t)(u64 parent, struct btrfs_inode_ref *iref,
- struct extent_buffer *eb, void *ctx);
int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
struct btrfs_path *path);
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
- struct btrfs_path *path, struct btrfs_key *found_key);
+ struct btrfs_path *path, struct btrfs_key *found_key,
+ u64 *flags);
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
struct btrfs_extent_item *ei, u32 item_size,
@@ -63,10 +62,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
struct btrfs_inode_ref *iref, struct extent_buffer *eb,
u64 parent, char *dest, u32 size);
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ u32 name_len, unsigned long name_off,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size);
struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
struct btrfs_path *path);
void free_ipath(struct inode_fs_paths *ipath);
+int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
+ u64 start_off, struct btrfs_path *path,
+ struct btrfs_inode_extref **ret_extref,
+ u64 *found_off);
+
#endif
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 5b2ad6bc4fe7..ed8ca7ca5eff 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -38,6 +38,7 @@
#define BTRFS_INODE_DELALLOC_META_RESERVED 4
#define BTRFS_INODE_HAS_ORPHAN_ITEM 5
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
+#define BTRFS_INODE_NEEDS_FULL_SYNC 7
/* in memory btrfs inode */
struct btrfs_inode {
@@ -143,6 +144,9 @@ struct btrfs_inode {
/* flags field from the on disk inode */
u32 flags;
+ /* a local copy of root's last_log_commit */
+ unsigned long last_log_commit;
+
/*
* Counters to keep track of the number of extent item's we may use due
* to delalloc and such. outstanding_extents is the number of extent
@@ -202,15 +206,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode)
static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
- int ret = 0;
-
- mutex_lock(&root->log_mutex);
if (BTRFS_I(inode)->logged_trans == generation &&
- BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
- ret = 1;
- mutex_unlock(&root->log_mutex);
- return ret;
+ BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit)
+ return 1;
+ return 0;
}
#endif
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 9197e2e33407..5a3e45db642a 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -37,8 +37,9 @@
* the file system was mounted, (i.e., they have been
* referenced by the super block) or they have been
* written since then and the write completion callback
- * was called and a FLUSH request to the device where
- * these blocks are located was received and completed.
+ * was called and no write error was indicated and a
+ * FLUSH request to the device where these blocks are
+ * located was received and completed.
* 2b. All referenced blocks need to have a generation
* number which is equal to the parent's number.
*
@@ -2601,6 +2602,17 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
ret = -1;
+ } else if (l->block_ref_to->iodone_w_error) {
+ printk(KERN_INFO "btrfs: attempt to write superblock"
+ " which references block %c @%llu (%s/%llu/%d)"
+ " which has write error!\n",
+ btrfsic_get_block_type(state, l->block_ref_to),
+ (unsigned long long)
+ l->block_ref_to->logical_bytenr,
+ l->block_ref_to->dev_state->name,
+ (unsigned long long)l->block_ref_to->dev_bytenr,
+ l->block_ref_to->mirror_num);
+ ret = -1;
} else if (l->parent_generation !=
l->block_ref_to->generation &&
BTRFSIC_GENERATION_UNKNOWN !=
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 43d1c5a3a030..c6467aa88bee 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -577,6 +577,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
u64 em_start;
struct extent_map *em;
int ret = -ENOMEM;
+ int faili = 0;
u32 *sums;
tree = &BTRFS_I(inode)->io_tree;
@@ -626,9 +627,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
- if (!cb->compressed_pages[pg_index])
+ if (!cb->compressed_pages[pg_index]) {
+ faili = pg_index - 1;
+ ret = -ENOMEM;
goto fail2;
+ }
}
+ faili = nr_pages - 1;
cb->nr_pages = nr_pages;
add_ra_bio_pages(inode, em_start + em_len, cb);
@@ -713,8 +718,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return 0;
fail2:
- for (pg_index = 0; pg_index < nr_pages; pg_index++)
- free_page((unsigned long)cb->compressed_pages[pg_index]);
+ while (faili >= 0) {
+ __free_page(cb->compressed_pages[faili]);
+ faili--;
+ }
kfree(cb->compressed_pages);
fail1:
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 6d183f60d63a..cdfb4c49a806 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -596,6 +596,11 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
if (tree_mod_dont_log(fs_info, eb))
return 0;
+ /*
+ * When we override something during the move, we log these removals.
+ * This can only happen when we move towards the beginning of the
+ * buffer, i.e. dst_slot < src_slot.
+ */
for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
MOD_LOG_KEY_REMOVE_WHILE_MOVING);
@@ -647,8 +652,6 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
if (tree_mod_dont_log(fs_info, NULL))
return 0;
- __tree_mod_log_free_eb(fs_info, old_root);
-
ret = tree_mod_alloc(fs_info, flags, &tm);
if (ret < 0)
goto out;
@@ -926,12 +929,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
ret = btrfs_dec_ref(trans, root, buf, 1, 1);
BUG_ON(ret); /* -ENOMEM */
}
- /*
- * don't log freeing in case we're freeing the root node, this
- * is done by tree_mod_log_set_root_pointer later
- */
- if (buf != root->node && btrfs_header_level(buf) != 0)
- tree_mod_log_free_eb(root->fs_info, buf);
+ tree_mod_log_free_eb(root->fs_info, buf);
clean_tree_block(trans, root, buf);
*last_ref = 1;
}
@@ -1225,6 +1223,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
free_extent_buffer(eb);
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
+ WARN_ON(btrfs_header_nritems(eb_rewin) >
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
return eb_rewin;
}
@@ -1241,9 +1241,11 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
{
struct tree_mod_elem *tm;
struct extent_buffer *eb;
+ struct extent_buffer *old;
struct tree_mod_root *old_root = NULL;
u64 old_generation = 0;
u64 logical;
+ u32 blocksize;
eb = btrfs_read_lock_root_node(root);
tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
@@ -1259,14 +1261,32 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
}
tm = tree_mod_log_search(root->fs_info, logical, time_seq);
- if (old_root)
+ if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+ btrfs_tree_read_unlock(root->node);
+ free_extent_buffer(root->node);
+ blocksize = btrfs_level_size(root, old_root->level);
+ old = read_tree_block(root, logical, blocksize, 0);
+ if (!old) {
+ pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
+ logical);
+ WARN_ON(1);
+ } else {
+ eb = btrfs_clone_extent_buffer(old);
+ free_extent_buffer(old);
+ }
+ } else if (old_root) {
+ btrfs_tree_read_unlock(root->node);
+ free_extent_buffer(root->node);
eb = alloc_dummy_extent_buffer(logical, root->nodesize);
- else
+ } else {
eb = btrfs_clone_extent_buffer(root->node);
- btrfs_tree_read_unlock(root->node);
- free_extent_buffer(root->node);
+ btrfs_tree_read_unlock(root->node);
+ free_extent_buffer(root->node);
+ }
+
if (!eb)
return NULL;
+ extent_buffer_get(eb);
btrfs_tree_read_lock(eb);
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
@@ -1279,11 +1299,28 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
__tree_mod_log_rewind(eb, time_seq, tm);
else
WARN_ON(btrfs_header_level(eb) != 0);
- extent_buffer_get(eb);
+ WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
return eb;
}
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
+{
+ struct tree_mod_elem *tm;
+ int level;
+
+ tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+ if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
+ level = tm->old_root.level;
+ } else {
+ rcu_read_lock();
+ level = btrfs_header_level(root->node);
+ rcu_read_unlock();
+ }
+
+ return level;
+}
+
static inline int should_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf)
@@ -1725,6 +1762,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto enospc;
}
+ tree_mod_log_free_eb(root->fs_info, root->node);
tree_mod_log_set_root_pointer(root, child);
rcu_assign_pointer(root->node, child);
@@ -2970,8 +3008,10 @@ static int push_node_left(struct btrfs_trans_handle *trans,
push_items * sizeof(struct btrfs_key_ptr));
if (push_items < src_nritems) {
- tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
- src_nritems - push_items);
+ /*
+ * don't call tree_mod_log_eb_move here, key removal was already
+ * fully logged by tree_mod_log_eb_copy above.
+ */
memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
btrfs_node_key_ptr_offset(push_items),
(src_nritems - push_items) *
@@ -4402,149 +4442,6 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans,
}
/*
- * Given a key and some data, insert items into the tree.
- * This does all the path init required, making room in the tree if needed.
- * Returns the number of keys that were inserted.
- */
-int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *cpu_key, u32 *data_size,
- int nr)
-{
- struct extent_buffer *leaf;
- struct btrfs_item *item;
- int ret = 0;
- int slot;
- int i;
- u32 nritems;
- u32 total_data = 0;
- u32 total_size = 0;
- unsigned int data_end;
- struct btrfs_disk_key disk_key;
- struct btrfs_key found_key;
- struct btrfs_map_token token;
-
- btrfs_init_map_token(&token);
-
- for (i = 0; i < nr; i++) {
- if (total_size + data_size[i] + sizeof(struct btrfs_item) >
- BTRFS_LEAF_DATA_SIZE(root)) {
- break;
- nr = i;
- }
- total_data += data_size[i];
- total_size += data_size[i] + sizeof(struct btrfs_item);
- }
- BUG_ON(nr == 0);
-
- ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
- if (ret == 0)
- return -EEXIST;
- if (ret < 0)
- goto out;
-
- leaf = path->nodes[0];
-
- nritems = btrfs_header_nritems(leaf);
- data_end = leaf_data_end(root, leaf);
-
- if (btrfs_leaf_free_space(root, leaf) < total_size) {
- for (i = nr; i >= 0; i--) {
- total_data -= data_size[i];
- total_size -= data_size[i] + sizeof(struct btrfs_item);
- if (total_size < btrfs_leaf_free_space(root, leaf))
- break;
- }
- nr = i;
- }
-
- slot = path->slots[0];
- BUG_ON(slot < 0);
-
- if (slot != nritems) {
- unsigned int old_data = btrfs_item_end_nr(leaf, slot);
-
- item = btrfs_item_nr(leaf, slot);
- btrfs_item_key_to_cpu(leaf, &found_key, slot);
-
- /* figure out how many keys we can insert in here */
- total_data = data_size[0];
- for (i = 1; i < nr; i++) {
- if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
- break;
- total_data += data_size[i];
- }
- nr = i;
-
- if (old_data < data_end) {
- btrfs_print_leaf(root, leaf);
- printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
- slot, old_data, data_end);
- BUG_ON(1);
- }
- /*
- * item0..itemN ... dataN.offset..dataN.size .. data0.size
- */
- /* first correct the data pointers */
- for (i = slot; i < nritems; i++) {
- u32 ioff;
-
- item = btrfs_item_nr(leaf, i);
- ioff = btrfs_token_item_offset(leaf, item, &token);
- btrfs_set_token_item_offset(leaf, item,
- ioff - total_data, &token);
- }
- /* shift the items */
- memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
- btrfs_item_nr_offset(slot),
- (nritems - slot) * sizeof(struct btrfs_item));
-
- /* shift the data */
- memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
- data_end - total_data, btrfs_leaf_data(leaf) +
- data_end, old_data - data_end);
- data_end = old_data;
- } else {
- /*
- * this sucks but it has to be done, if we are inserting at
- * the end of the leaf only insert 1 of the items, since we
- * have no way of knowing whats on the next leaf and we'd have
- * to drop our current locks to figure it out
- */
- nr = 1;
- }
-
- /* setup the item for the new data */
- for (i = 0; i < nr; i++) {
- btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
- btrfs_set_item_key(leaf, &disk_key, slot + i);
- item = btrfs_item_nr(leaf, slot + i);
- btrfs_set_token_item_offset(leaf, item,
- data_end - data_size[i], &token);
- data_end -= data_size[i];
- btrfs_set_token_item_size(leaf, item, data_size[i], &token);
- }
- btrfs_set_header_nritems(leaf, nritems + nr);
- btrfs_mark_buffer_dirty(leaf);
-
- ret = 0;
- if (slot == 0) {
- btrfs_cpu_key_to_disk(&disk_key, cpu_key);
- fixup_low_keys(trans, root, path, &disk_key, 1);
- }
-
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
- BUG();
- }
-out:
- if (!ret)
- ret = nr;
- return ret;
-}
-
-/*
* this is a helper for btrfs_insert_empty_items, the main goal here is
* to save stack depth by doing the bulk of the work in a function
* that doesn't call btrfs_search_slot
@@ -5073,6 +4970,7 @@ static void tree_move_down(struct btrfs_root *root,
struct btrfs_path *path,
int *level, int root_level)
{
+ BUG_ON(*level == 0);
path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
path->slots[*level]);
path->slots[*level - 1] = 0;
@@ -5089,7 +4987,7 @@ static int tree_move_next_or_upnext(struct btrfs_root *root,
path->slots[*level]++;
- while (path->slots[*level] == nritems) {
+ while (path->slots[*level] >= nritems) {
if (*level == root_level)
return -1;
@@ -5433,9 +5331,11 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
advance_right = ADVANCE;
} else {
+ WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
ret = tree_compare_item(left_root, left_path,
right_path, tmp_buf);
if (ret) {
+ WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
ret = changed_cb(left_root, right_root,
left_path, right_path,
&left_key,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9821b672f5a2..c72ead869507 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -154,6 +154,13 @@ struct btrfs_ordered_sum;
*/
#define BTRFS_NAME_LEN 255
+/*
+ * Theoretical limit is larger, but we keep this down to a sane
+ * value. That should limit greatly the possibility of collisions on
+ * inode ref items.
+ */
+#define BTRFS_LINK_MAX 65535U
+
/* 32 bytes in various csum fields */
#define BTRFS_CSUM_SIZE 32
@@ -489,6 +496,8 @@ struct btrfs_super_block {
*/
#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
+#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
+
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
#define BTRFS_FEATURE_INCOMPAT_SUPP \
@@ -496,7 +505,8 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
- BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO)
+ BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
+ BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
/*
* A leaf is full of items. offset and size tell us where to find
@@ -643,6 +653,14 @@ struct btrfs_inode_ref {
/* name goes here */
} __attribute__ ((__packed__));
+struct btrfs_inode_extref {
+ __le64 parent_objectid;
+ __le64 index;
+ __le16 name_len;
+ __u8 name[0];
+ /* name goes here */
+} __attribute__ ((__packed__));
+
struct btrfs_timespec {
__le64 sec;
__le32 nsec;
@@ -1028,12 +1046,22 @@ struct btrfs_space_info {
wait_queue_head_t wait;
};
+#define BTRFS_BLOCK_RSV_GLOBAL 1
+#define BTRFS_BLOCK_RSV_DELALLOC 2
+#define BTRFS_BLOCK_RSV_TRANS 3
+#define BTRFS_BLOCK_RSV_CHUNK 4
+#define BTRFS_BLOCK_RSV_DELOPS 5
+#define BTRFS_BLOCK_RSV_EMPTY 6
+#define BTRFS_BLOCK_RSV_TEMP 7
+
struct btrfs_block_rsv {
u64 size;
u64 reserved;
struct btrfs_space_info *space_info;
spinlock_t lock;
- unsigned int full;
+ unsigned short full;
+ unsigned short type;
+ unsigned short failfast;
};
/*
@@ -1127,6 +1155,9 @@ struct btrfs_block_group_cache {
* Today it will only have one thing on it, but that may change
*/
struct list_head cluster_list;
+
+ /* For delayed block group creation */
+ struct list_head new_bg_list;
};
/* delayed seq elem */
@@ -1240,7 +1271,6 @@ struct btrfs_fs_info {
struct mutex reloc_mutex;
struct list_head trans_list;
- struct list_head hashers;
struct list_head dead_roots;
struct list_head caching_block_groups;
@@ -1366,9 +1396,6 @@ struct btrfs_fs_info {
struct rb_root defrag_inodes;
atomic_t defrag_running;
- spinlock_t ref_cache_lock;
- u64 total_ref_cache_size;
-
/*
* these three are in extended format (availability of single
* chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
@@ -1441,6 +1468,8 @@ struct btrfs_fs_info {
/* next backup root to be overwritten */
int backup_root_index;
+
+ int num_tolerated_disk_barrier_failures;
};
/*
@@ -1481,9 +1510,9 @@ struct btrfs_root {
wait_queue_head_t log_commit_wait[2];
atomic_t log_writers;
atomic_t log_commit[2];
+ atomic_t log_batch;
unsigned long log_transid;
unsigned long last_log_commit;
- unsigned long log_batch;
pid_t log_start_pid;
bool log_multiple_pids;
@@ -1592,6 +1621,7 @@ struct btrfs_ioctl_defrag_range_args {
*/
#define BTRFS_INODE_ITEM_KEY 1
#define BTRFS_INODE_REF_KEY 12
+#define BTRFS_INODE_EXTREF_KEY 13
#define BTRFS_XATTR_ITEM_KEY 24
#define BTRFS_ORPHAN_ITEM_KEY 48
/* reserve 2-15 close to the inode for later flexibility */
@@ -1978,6 +2008,13 @@ BTRFS_SETGET_STACK_FUNCS(block_group_flags,
BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
+/* struct btrfs_inode_extref */
+BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref,
+ parent_objectid, 64);
+BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref,
+ name_len, 16);
+BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64);
+
/* struct btrfs_inode_item */
BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64);
BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64);
@@ -2858,6 +2895,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 size);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 group_start);
+void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
@@ -2874,8 +2913,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes);
-void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv);
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root);
+void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+ unsigned short type);
void btrfs_free_block_rsv(struct btrfs_root *root,
struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
@@ -3080,6 +3120,7 @@ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
{
return atomic_inc_return(&fs_info->tree_mod_seq);
}
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -3172,12 +3213,12 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 *index);
-struct btrfs_inode_ref *
-btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, int mod);
+int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid, int mod,
+ u64 *ret_index);
int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid);
@@ -3185,6 +3226,19 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path,
struct btrfs_key *location, int mod);
+struct btrfs_inode_extref *
+btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid, int ins_len,
+ int cow);
+
+int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
+ u64 ref_objectid, const char *name,
+ int name_len,
+ struct btrfs_inode_extref **extref_ret);
+
/* file-item.c */
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len);
@@ -3249,6 +3303,8 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *dir, u64 objectid,
const char *name, int name_len);
+int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
+ int front);
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode, u64 new_size,
@@ -3283,6 +3339,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
+int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
@@ -3308,16 +3366,27 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
int btrfs_defrag_file(struct inode *inode, struct file *file,
struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_pages);
+void btrfs_get_block_group_info(struct list_head *groups_list,
+ struct btrfs_ioctl_space_info *space);
+
/* file.c */
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
-int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
- int skip_pinned);
+void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+ int skip_pinned);
+int btrfs_replace_extent_cache(struct inode *inode, struct extent_map *replace,
+ u64 start, u64 end, int skip_pinned,
+ int modified);
extern const struct file_operations btrfs_file_operations;
-int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
- u64 start, u64 end, u64 *hint_byte, int drop_cache);
+int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode,
+ struct btrfs_path *path, u64 start, u64 end,
+ u64 *drop_end, int drop_cache);
+int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode, u64 start,
+ u64 end, int drop_cache);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
@@ -3378,6 +3447,11 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info,
}
}
+/*
+ * Call btrfs_abort_transaction as early as possible when an error condition is
+ * detected, that way the exact line number is reported.
+ */
+
#define btrfs_abort_transaction(trans, root, errno) \
do { \
__btrfs_abort_transaction(trans, root, __func__, \
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 52c85e2b95d0..478f66bdc57b 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -29,7 +29,7 @@ static struct kmem_cache *delayed_node_cache;
int __init btrfs_delayed_inode_init(void)
{
- delayed_node_cache = kmem_cache_create("delayed_node",
+ delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
sizeof(struct btrfs_delayed_node),
0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
@@ -650,7 +650,7 @@ static int btrfs_delayed_inode_reserve_metadata(
* we're accounted for.
*/
if (!src_rsv || (!trans->bytes_reserved &&
- src_rsv != &root->fs_info->delalloc_block_rsv)) {
+ src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
/*
* Since we're under a transaction reserve_metadata_bytes could
@@ -668,7 +668,7 @@ static int btrfs_delayed_inode_reserve_metadata(
num_bytes, 1);
}
return ret;
- } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
+ } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
spin_lock(&BTRFS_I(inode)->lock);
if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
&BTRFS_I(inode)->runtime_flags)) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 22e98e04c2ea..7cda51995c1e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -46,6 +46,10 @@
#include "check-integrity.h"
#include "rcu-string.h"
+#ifdef CONFIG_X86
+#include <asm/cpufeature.h>
+#endif
+
static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
@@ -217,26 +221,16 @@ static struct extent_map *btree_get_extent(struct inode *inode,
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
if (ret == -EEXIST) {
- u64 failed_start = em->start;
- u64 failed_len = em->len;
-
free_extent_map(em);
em = lookup_extent_mapping(em_tree, start, len);
- if (em) {
- ret = 0;
- } else {
- em = lookup_extent_mapping(em_tree, failed_start,
- failed_len);
- ret = -EIO;
- }
+ if (!em)
+ em = ERR_PTR(-EIO);
} else if (ret) {
free_extent_map(em);
- em = NULL;
+ em = ERR_PTR(ret);
}
write_unlock(&em_tree->lock);
- if (ret)
- em = ERR_PTR(ret);
out:
return em;
}
@@ -439,10 +433,6 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
WARN_ON(1);
return 0;
}
- if (eb->pages[0] != page) {
- WARN_ON(1);
- return 0;
- }
if (!PageUptodate(page)) {
WARN_ON(1);
return 0;
@@ -869,10 +859,22 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
}
+static int check_async_write(struct inode *inode, unsigned long bio_flags)
+{
+ if (bio_flags & EXTENT_BIO_TREE_LOG)
+ return 0;
+#ifdef CONFIG_X86
+ if (cpu_has_xmm4_2)
+ return 0;
+#endif
+ return 1;
+}
+
static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
+ int async = check_async_write(inode, bio_flags);
int ret;
if (!(rw & REQ_WRITE)) {
@@ -887,6 +889,12 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
return ret;
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
mirror_num, 0);
+ } else if (!async) {
+ ret = btree_csum_one_bio(bio);
+ if (ret)
+ return ret;
+ return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
+ mirror_num, 0);
}
/*
@@ -1168,8 +1176,8 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
atomic_set(&root->log_commit[0], 0);
atomic_set(&root->log_commit[1], 0);
atomic_set(&root->log_writers, 0);
+ atomic_set(&root->log_batch, 0);
atomic_set(&root->orphan_inodes, 0);
- root->log_batch = 0;
root->log_transid = 0;
root->last_log_commit = 0;
extent_io_tree_init(&root->dirty_log_pages,
@@ -1667,9 +1675,10 @@ static int transaction_kthread(void *arg)
spin_unlock(&root->fs_info->trans_lock);
/* If the file system is aborted, this will always fail. */
- trans = btrfs_join_transaction(root);
+ trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
- cannot_commit = true;
+ if (PTR_ERR(trans) != -ENOENT)
+ cannot_commit = true;
goto sleep;
}
if (transid == trans->transid) {
@@ -1994,13 +2003,11 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
- INIT_LIST_HEAD(&fs_info->hashers);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
INIT_LIST_HEAD(&fs_info->ordered_operations);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->trans_lock);
- spin_lock_init(&fs_info->ref_cache_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
@@ -2014,12 +2021,15 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->space_info);
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
btrfs_mapping_init(&fs_info->mapping_tree);
- btrfs_init_block_rsv(&fs_info->global_block_rsv);
- btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
- btrfs_init_block_rsv(&fs_info->trans_block_rsv);
- btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
- btrfs_init_block_rsv(&fs_info->empty_block_rsv);
- btrfs_init_block_rsv(&fs_info->delayed_block_rsv);
+ btrfs_init_block_rsv(&fs_info->global_block_rsv,
+ BTRFS_BLOCK_RSV_GLOBAL);
+ btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
+ BTRFS_BLOCK_RSV_DELALLOC);
+ btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
+ btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
+ btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
+ btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
+ BTRFS_BLOCK_RSV_DELOPS);
atomic_set(&fs_info->nr_async_submits, 0);
atomic_set(&fs_info->async_delalloc_pages, 0);
atomic_set(&fs_info->async_submit_draining, 0);
@@ -2491,6 +2501,8 @@ retry_root_backup:
printk(KERN_ERR "Failed to read block groups: %d\n", ret);
goto fail_block_groups;
}
+ fs_info->num_tolerated_disk_barrier_failures =
+ btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
"btrfs-cleaner");
@@ -2874,12 +2886,10 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
printk_in_rcu("btrfs: disabling barriers on dev %s\n",
rcu_str_deref(device->name));
device->nobarriers = 1;
- }
- if (!bio_flagged(bio, BIO_UPTODATE)) {
+ } else if (!bio_flagged(bio, BIO_UPTODATE)) {
ret = -EIO;
- if (!bio_flagged(bio, BIO_EOPNOTSUPP))
- btrfs_dev_stat_inc_and_print(device,
- BTRFS_DEV_STAT_FLUSH_ERRS);
+ btrfs_dev_stat_inc_and_print(device,
+ BTRFS_DEV_STAT_FLUSH_ERRS);
}
/* drop the reference from the wait == 0 run */
@@ -2918,14 +2928,15 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
{
struct list_head *head;
struct btrfs_device *dev;
- int errors = 0;
+ int errors_send = 0;
+ int errors_wait = 0;
int ret;
/* send down all the barriers */
head = &info->fs_devices->devices;
list_for_each_entry_rcu(dev, head, dev_list) {
if (!dev->bdev) {
- errors++;
+ errors_send++;
continue;
}
if (!dev->in_fs_metadata || !dev->writeable)
@@ -2933,13 +2944,13 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
ret = write_dev_flush(dev, 0);
if (ret)
- errors++;
+ errors_send++;
}
/* wait for all the barriers */
list_for_each_entry_rcu(dev, head, dev_list) {
if (!dev->bdev) {
- errors++;
+ errors_wait++;
continue;
}
if (!dev->in_fs_metadata || !dev->writeable)
@@ -2947,13 +2958,87 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
ret = write_dev_flush(dev, 1);
if (ret)
- errors++;
+ errors_wait++;
}
- if (errors)
+ if (errors_send > info->num_tolerated_disk_barrier_failures ||
+ errors_wait > info->num_tolerated_disk_barrier_failures)
return -EIO;
return 0;
}
+int btrfs_calc_num_tolerated_disk_barrier_failures(
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_ioctl_space_info space;
+ struct btrfs_space_info *sinfo;
+ u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
+ BTRFS_BLOCK_GROUP_SYSTEM,
+ BTRFS_BLOCK_GROUP_METADATA,
+ BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
+ int num_types = 4;
+ int i;
+ int c;
+ int num_tolerated_disk_barrier_failures =
+ (int)fs_info->fs_devices->num_devices;
+
+ for (i = 0; i < num_types; i++) {
+ struct btrfs_space_info *tmp;
+
+ sinfo = NULL;
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
+ if (tmp->flags == types[i]) {
+ sinfo = tmp;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sinfo)
+ continue;
+
+ down_read(&sinfo->groups_sem);
+ for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
+ if (!list_empty(&sinfo->block_groups[c])) {
+ u64 flags;
+
+ btrfs_get_block_group_info(
+ &sinfo->block_groups[c], &space);
+ if (space.total_bytes == 0 ||
+ space.used_bytes == 0)
+ continue;
+ flags = space.flags;
+ /*
+ * return
+ * 0: if dup, single or RAID0 is configured for
+ * any of metadata, system or data, else
+ * 1: if RAID5 is configured, or if RAID1 or
+ * RAID10 is configured and only two mirrors
+ * are used, else
+ * 2: if RAID6 is configured, else
+ * num_mirrors - 1: if RAID1 or RAID10 is
+ * configured and more than
+ * 2 mirrors are used.
+ */
+ if (num_tolerated_disk_barrier_failures > 0 &&
+ ((flags & (BTRFS_BLOCK_GROUP_DUP |
+ BTRFS_BLOCK_GROUP_RAID0)) ||
+ ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
+ == 0)))
+ num_tolerated_disk_barrier_failures = 0;
+ else if (num_tolerated_disk_barrier_failures > 1
+ &&
+ (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10)))
+ num_tolerated_disk_barrier_failures = 1;
+ }
+ }
+ up_read(&sinfo->groups_sem);
+ }
+
+ return num_tolerated_disk_barrier_failures;
+}
+
int write_all_supers(struct btrfs_root *root, int max_mirrors)
{
struct list_head *head;
@@ -2976,8 +3061,16 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
head = &root->fs_info->fs_devices->devices;
- if (do_barriers)
- barrier_all_devices(root->fs_info);
+ if (do_barriers) {
+ ret = barrier_all_devices(root->fs_info);
+ if (ret) {
+ mutex_unlock(
+ &root->fs_info->fs_devices->device_list_mutex);
+ btrfs_error(root->fs_info, ret,
+ "errors while submitting device barriers.");
+ return ret;
+ }
+ }
list_for_each_entry_rcu(dev, head, dev_list) {
if (!dev->bdev) {
@@ -3211,10 +3304,6 @@ int close_ctree(struct btrfs_root *root)
printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
(unsigned long long)fs_info->delalloc_bytes);
}
- if (fs_info->total_ref_cache_size) {
- printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
- (unsigned long long)fs_info->total_ref_cache_size);
- }
free_extent_buffer(fs_info->extent_root->node);
free_extent_buffer(fs_info->extent_root->commit_root);
@@ -3360,52 +3449,6 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
}
-int btree_lock_page_hook(struct page *page, void *data,
- void (*flush_fn)(void *))
-{
- struct inode *inode = page->mapping->host;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_buffer *eb;
-
- /*
- * We culled this eb but the page is still hanging out on the mapping,
- * carry on.
- */
- if (!PagePrivate(page))
- goto out;
-
- eb = (struct extent_buffer *)page->private;
- if (!eb) {
- WARN_ON(1);
- goto out;
- }
- if (page != eb->pages[0])
- goto out;
-
- if (!btrfs_try_tree_write_lock(eb)) {
- flush_fn(data);
- btrfs_tree_lock(eb);
- }
- btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
-
- if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
- spin_lock(&root->fs_info->delalloc_lock);
- if (root->fs_info->dirty_metadata_bytes >= eb->len)
- root->fs_info->dirty_metadata_bytes -= eb->len;
- else
- WARN_ON(1);
- spin_unlock(&root->fs_info->delalloc_lock);
- }
-
- btrfs_tree_unlock(eb);
-out:
- if (!trylock_page(page)) {
- flush_fn(data);
- lock_page(page);
- }
- return 0;
-}
-
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only)
{
@@ -3608,7 +3651,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
while (1) {
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
- mark);
+ mark, NULL);
if (ret)
break;
@@ -3663,7 +3706,7 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
again:
while (1) {
ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY);
+ EXTENT_DIRTY, NULL);
if (ret)
break;
@@ -3800,7 +3843,6 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
}
static struct extent_io_ops btree_extent_io_ops = {
- .write_cache_pages_lock_hook = btree_lock_page_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
.readpage_io_failed_hook = btree_io_failed_hook,
.submit_bio_hook = btree_submit_bio_hook,
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index c5b00a735fef..2025a9132c16 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -95,6 +95,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid);
int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *));
+int btrfs_calc_num_tolerated_disk_barrier_failures(
+ struct btrfs_fs_info *fs_info);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index ba58024d40d3..3d3e2c17d8d1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -94,8 +94,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
u64 flags, struct btrfs_disk_key *key,
int level, struct btrfs_key *ins);
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 alloc_bytes,
- u64 flags, int force);
+ struct btrfs_root *extent_root, u64 flags,
+ int force);
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key);
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
@@ -312,7 +312,8 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
while (start < end) {
ret = find_first_extent_bit(info->pinned_extents, start,
&extent_start, &extent_end,
- EXTENT_DIRTY | EXTENT_UPTODATE);
+ EXTENT_DIRTY | EXTENT_UPTODATE,
+ NULL);
if (ret)
break;
@@ -2361,10 +2362,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
}
next:
- do_chunk_alloc(trans, fs_info->extent_root,
- 2 * 1024 * 1024,
- btrfs_get_alloc_profile(root, 0),
- CHUNK_ALLOC_NO_FORCE);
cond_resched();
spin_lock(&delayed_refs->lock);
}
@@ -2478,10 +2475,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (root == root->fs_info->extent_root)
root = root->fs_info->tree_root;
- do_chunk_alloc(trans, root->fs_info->extent_root,
- 2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
- CHUNK_ALLOC_NO_FORCE);
-
btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
delayed_refs = &trans->transaction->delayed_refs;
@@ -2551,6 +2544,12 @@ again:
}
if (run_all) {
+ if (!list_empty(&trans->new_bgs)) {
+ spin_unlock(&delayed_refs->lock);
+ btrfs_create_pending_block_groups(trans, root);
+ spin_lock(&delayed_refs->lock);
+ }
+
node = rb_first(&delayed_refs->root);
if (!node)
goto out;
@@ -3406,7 +3405,6 @@ alloc:
return PTR_ERR(trans);
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- bytes + 2 * 1024 * 1024,
alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
@@ -3488,8 +3486,7 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
}
static int should_alloc_chunk(struct btrfs_root *root,
- struct btrfs_space_info *sinfo, u64 alloc_bytes,
- int force)
+ struct btrfs_space_info *sinfo, int force)
{
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
@@ -3504,7 +3501,8 @@ static int should_alloc_chunk(struct btrfs_root *root,
* and purposes it's used space. Don't worry about locking the
* global_rsv, it doesn't change except when the transaction commits.
*/
- num_allocated += global_rsv->size;
+ if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
+ num_allocated += global_rsv->size;
/*
* in limited mode, we want to have some free space up to
@@ -3518,15 +3516,8 @@ static int should_alloc_chunk(struct btrfs_root *root,
if (num_bytes - num_allocated < thresh)
return 1;
}
- thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
- /* 256MB or 2% of the FS */
- thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
- /* system chunks need a much small threshold */
- if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
- thresh = 32 * 1024 * 1024;
-
- if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
+ if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
return 0;
return 1;
}
@@ -3576,8 +3567,7 @@ static void check_system_chunk(struct btrfs_trans_handle *trans,
}
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
- struct btrfs_root *extent_root, u64 alloc_bytes,
- u64 flags, int force)
+ struct btrfs_root *extent_root, u64 flags, int force)
{
struct btrfs_space_info *space_info;
struct btrfs_fs_info *fs_info = extent_root->fs_info;
@@ -3601,7 +3591,7 @@ again:
return 0;
}
- if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
+ if (!should_alloc_chunk(extent_root, space_info, force)) {
spin_unlock(&space_info->lock);
return 0;
} else if (space_info->chunk_alloc) {
@@ -3669,6 +3659,46 @@ out:
return ret;
}
+static int can_overcommit(struct btrfs_root *root,
+ struct btrfs_space_info *space_info, u64 bytes,
+ int flush)
+{
+ u64 profile = btrfs_get_alloc_profile(root, 0);
+ u64 avail;
+ u64 used;
+
+ used = space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_pinned + space_info->bytes_readonly +
+ space_info->bytes_may_use;
+
+ spin_lock(&root->fs_info->free_chunk_lock);
+ avail = root->fs_info->free_chunk_space;
+ spin_unlock(&root->fs_info->free_chunk_lock);
+
+ /*
+ * If we have dup, raid1 or raid10 then only half of the free
+ * space is actually useable.
+ */
+ if (profile & (BTRFS_BLOCK_GROUP_DUP |
+ BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10))
+ avail >>= 1;
+
+ /*
+ * If we aren't flushing don't let us overcommit too much, say
+ * 1/8th of the space. If we can flush, let it overcommit up to
+ * 1/2 of the space.
+ */
+ if (flush)
+ avail >>= 3;
+ else
+ avail >>= 1;
+
+ if (used + bytes < space_info->total_bytes + avail)
+ return 1;
+ return 0;
+}
+
/*
* shrink metadata reservation for delalloc
*/
@@ -3693,7 +3723,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
if (delalloc_bytes == 0) {
if (trans)
return;
- btrfs_wait_ordered_extents(root, 0, 0);
+ btrfs_wait_ordered_extents(root, 0);
return;
}
@@ -3703,11 +3733,15 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
WB_REASON_FS_FREE_SPACE);
+ /*
+ * We need to wait for the async pages to actually start before
+ * we do anything.
+ */
+ wait_event(root->fs_info->async_submit_wait,
+ !atomic_read(&root->fs_info->async_delalloc_pages));
+
spin_lock(&space_info->lock);
- if (space_info->bytes_used + space_info->bytes_reserved +
- space_info->bytes_pinned + space_info->bytes_readonly +
- space_info->bytes_may_use + orig <=
- space_info->total_bytes) {
+ if (can_overcommit(root, space_info, orig, !trans)) {
spin_unlock(&space_info->lock);
break;
}
@@ -3715,7 +3749,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
loops++;
if (wait_ordered && !trans) {
- btrfs_wait_ordered_extents(root, 0, 0);
+ btrfs_wait_ordered_extents(root, 0);
} else {
time_left = schedule_timeout_killable(1);
if (time_left)
@@ -3784,11 +3818,12 @@ commit:
}
enum flush_state {
- FLUSH_DELALLOC = 1,
- FLUSH_DELALLOC_WAIT = 2,
- FLUSH_DELAYED_ITEMS_NR = 3,
- FLUSH_DELAYED_ITEMS = 4,
- COMMIT_TRANS = 5,
+ FLUSH_DELAYED_ITEMS_NR = 1,
+ FLUSH_DELAYED_ITEMS = 2,
+ FLUSH_DELALLOC = 3,
+ FLUSH_DELALLOC_WAIT = 4,
+ ALLOC_CHUNK = 5,
+ COMMIT_TRANS = 6,
};
static int flush_space(struct btrfs_root *root,
@@ -3800,11 +3835,6 @@ static int flush_space(struct btrfs_root *root,
int ret = 0;
switch (state) {
- case FLUSH_DELALLOC:
- case FLUSH_DELALLOC_WAIT:
- shrink_delalloc(root, num_bytes, orig_bytes,
- state == FLUSH_DELALLOC_WAIT);
- break;
case FLUSH_DELAYED_ITEMS_NR:
case FLUSH_DELAYED_ITEMS:
if (state == FLUSH_DELAYED_ITEMS_NR) {
@@ -3825,6 +3855,24 @@ static int flush_space(struct btrfs_root *root,
ret = btrfs_run_delayed_items_nr(trans, root, nr);
btrfs_end_transaction(trans, root);
break;
+ case FLUSH_DELALLOC:
+ case FLUSH_DELALLOC_WAIT:
+ shrink_delalloc(root, num_bytes, orig_bytes,
+ state == FLUSH_DELALLOC_WAIT);
+ break;
+ case ALLOC_CHUNK:
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+ ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+ btrfs_get_alloc_profile(root, 0),
+ CHUNK_ALLOC_NO_FORCE);
+ btrfs_end_transaction(trans, root);
+ if (ret == -ENOSPC)
+ ret = 0;
+ break;
case COMMIT_TRANS:
ret = may_commit_transaction(root, space_info, orig_bytes, 0);
break;
@@ -3856,10 +3904,9 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_space_info *space_info = block_rsv->space_info;
u64 used;
u64 num_bytes = orig_bytes;
- int flush_state = FLUSH_DELALLOC;
+ int flush_state = FLUSH_DELAYED_ITEMS_NR;
int ret = 0;
bool flushing = false;
- bool committed = false;
again:
ret = 0;
@@ -3922,57 +3969,12 @@ again:
(orig_bytes * 2);
}
- if (ret) {
- u64 profile = btrfs_get_alloc_profile(root, 0);
- u64 avail;
-
- /*
- * If we have a lot of space that's pinned, don't bother doing
- * the overcommit dance yet and just commit the transaction.
- */
- avail = (space_info->total_bytes - space_info->bytes_used) * 8;
- do_div(avail, 10);
- if (space_info->bytes_pinned >= avail && flush && !committed) {
- space_info->flush = 1;
- flushing = true;
- spin_unlock(&space_info->lock);
- ret = may_commit_transaction(root, space_info,
- orig_bytes, 1);
- if (ret)
- goto out;
- committed = true;
- goto again;
- }
-
- spin_lock(&root->fs_info->free_chunk_lock);
- avail = root->fs_info->free_chunk_space;
-
- /*
- * If we have dup, raid1 or raid10 then only half of the free
- * space is actually useable.
- */
- if (profile & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))
- avail >>= 1;
-
- /*
- * If we aren't flushing don't let us overcommit too much, say
- * 1/8th of the space. If we can flush, let it overcommit up to
- * 1/2 of the space.
- */
- if (flush)
- avail >>= 3;
- else
- avail >>= 1;
- spin_unlock(&root->fs_info->free_chunk_lock);
-
- if (used + num_bytes < space_info->total_bytes + avail) {
- space_info->bytes_may_use += orig_bytes;
- trace_btrfs_space_reservation(root->fs_info,
- "space_info", space_info->flags, orig_bytes, 1);
- ret = 0;
- }
+ if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
+ space_info->bytes_may_use += orig_bytes;
+ trace_btrfs_space_reservation(root->fs_info, "space_info",
+ space_info->flags, orig_bytes,
+ 1);
+ ret = 0;
}
/*
@@ -4114,13 +4116,15 @@ static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
return 0;
}
-void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
+void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
{
memset(rsv, 0, sizeof(*rsv));
spin_lock_init(&rsv->lock);
+ rsv->type = type;
}
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+ unsigned short type)
{
struct btrfs_block_rsv *block_rsv;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4129,7 +4133,7 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
if (!block_rsv)
return NULL;
- btrfs_init_block_rsv(block_rsv);
+ btrfs_init_block_rsv(block_rsv, type);
block_rsv->space_info = __find_space_info(fs_info,
BTRFS_BLOCK_GROUP_METADATA);
return block_rsv;
@@ -4138,6 +4142,8 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
void btrfs_free_block_rsv(struct btrfs_root *root,
struct btrfs_block_rsv *rsv)
{
+ if (!rsv)
+ return;
btrfs_block_rsv_release(root, rsv, (u64)-1);
kfree(rsv);
}
@@ -4416,10 +4422,10 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
/*
- * two for root back/forward refs, two for directory entries
- * and one for root of the snapshot.
+ * two for root back/forward refs, two for directory entries,
+ * one for root of the snapshot and one for parent inode.
*/
- u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
+ u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
dst_rsv->space_info = src_rsv->space_info;
return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}
@@ -5018,7 +5024,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
while (1) {
ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY);
+ EXTENT_DIRTY, NULL);
if (ret)
break;
@@ -5096,8 +5102,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = remove_extent_backref(trans, extent_root, path,
NULL, refs_to_drop,
is_data);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
btrfs_release_path(path);
path->leave_spinning = 1;
@@ -5115,8 +5123,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_print_leaf(extent_root,
path->nodes[0]);
}
- if (ret < 0)
- goto abort;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
extent_slot = path->slots[0];
}
} else if (ret == -ENOENT) {
@@ -5130,7 +5140,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
(unsigned long long)owner_objectid,
(unsigned long long)owner_offset);
} else {
- goto abort;
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
}
leaf = path->nodes[0];
@@ -5140,8 +5151,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
BUG_ON(found_extent || extent_slot != path->slots[0]);
ret = convert_extent_item_v0(trans, extent_root, path,
owner_objectid, 0);
- if (ret < 0)
- goto abort;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
btrfs_release_path(path);
path->leave_spinning = 1;
@@ -5158,8 +5171,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
(unsigned long long)bytenr);
btrfs_print_leaf(extent_root, path->nodes[0]);
}
- if (ret < 0)
- goto abort;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
+
extent_slot = path->slots[0];
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, extent_slot);
@@ -5196,8 +5212,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = remove_extent_backref(trans, extent_root, path,
iref, refs_to_drop,
is_data);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
}
} else {
if (found_extent) {
@@ -5214,27 +5232,29 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
btrfs_release_path(path);
if (is_data) {
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
}
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, extent_root, ret);
+ goto out;
+ }
}
out:
btrfs_free_path(path);
return ret;
-
-abort:
- btrfs_abort_transaction(trans, extent_root, ret);
- goto out;
}
/*
@@ -5497,8 +5517,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *used_block_group;
u64 search_start = 0;
int empty_cluster = 2 * 1024 * 1024;
- int allowed_chunk_alloc = 0;
- int done_chunk_alloc = 0;
struct btrfs_space_info *space_info;
int loop = 0;
int index = 0;
@@ -5530,9 +5548,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (btrfs_mixed_space_info(space_info))
use_cluster = false;
- if (orig_root->ref_cows || empty_size)
- allowed_chunk_alloc = 1;
-
if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
last_ptr = &root->fs_info->meta_alloc_cluster;
if (!btrfs_test_opt(root, SSD))
@@ -5806,10 +5821,6 @@ checks:
trace_btrfs_reserve_extent(orig_root, block_group,
search_start, num_bytes);
- if (offset < search_start)
- btrfs_add_free_space(used_block_group, offset,
- search_start - offset);
- BUG_ON(offset > search_start);
if (used_block_group != block_group)
btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
@@ -5842,34 +5853,17 @@ loop:
index = 0;
loop++;
if (loop == LOOP_ALLOC_CHUNK) {
- if (allowed_chunk_alloc) {
- ret = do_chunk_alloc(trans, root, num_bytes +
- 2 * 1024 * 1024, data,
- CHUNK_ALLOC_LIMITED);
- /*
- * Do not bail out on ENOSPC since we
- * can do more things.
- */
- if (ret < 0 && ret != -ENOSPC) {
- btrfs_abort_transaction(trans,
- root, ret);
- goto out;
- }
- allowed_chunk_alloc = 0;
- if (ret == 1)
- done_chunk_alloc = 1;
- } else if (!done_chunk_alloc &&
- space_info->force_alloc ==
- CHUNK_ALLOC_NO_FORCE) {
- space_info->force_alloc = CHUNK_ALLOC_LIMITED;
+ ret = do_chunk_alloc(trans, root, data,
+ CHUNK_ALLOC_FORCE);
+ /*
+ * Do not bail out on ENOSPC since we
+ * can do more things.
+ */
+ if (ret < 0 && ret != -ENOSPC) {
+ btrfs_abort_transaction(trans,
+ root, ret);
+ goto out;
}
-
- /*
- * We didn't allocate a chunk, go ahead and drop the
- * empty size and loop again.
- */
- if (!done_chunk_alloc)
- loop = LOOP_NO_EMPTY_SIZE;
}
if (loop == LOOP_NO_EMPTY_SIZE) {
@@ -5944,20 +5938,6 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
data = btrfs_get_alloc_profile(root, data);
again:
- /*
- * the only place that sets empty_size is btrfs_realloc_node, which
- * is not called recursively on allocations
- */
- if (empty_size || root->ref_cows) {
- ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes + 2 * 1024 * 1024, data,
- CHUNK_ALLOC_NO_FORCE);
- if (ret < 0 && ret != -ENOSPC) {
- btrfs_abort_transaction(trans, root, ret);
- return ret;
- }
- }
-
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
hint_byte, ins, data);
@@ -5967,12 +5947,6 @@ again:
num_bytes = num_bytes >> 1;
num_bytes = num_bytes & ~(root->sectorsize - 1);
num_bytes = max(num_bytes, min_alloc_size);
- ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes, data, CHUNK_ALLOC_FORCE);
- if (ret < 0 && ret != -ENOSPC) {
- btrfs_abort_transaction(trans, root, ret);
- return ret;
- }
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
@@ -6314,7 +6288,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
ret = block_rsv_use_bytes(block_rsv, blocksize);
if (!ret)
return block_rsv;
- if (ret) {
+ if (ret && !block_rsv->failfast) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
/*DEFAULT_RATELIMIT_BURST*/ 2);
@@ -7279,7 +7253,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
alloc_flags = update_block_group_flags(root, cache->flags);
if (alloc_flags != cache->flags) {
- ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ ret = do_chunk_alloc(trans, root, alloc_flags,
CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
@@ -7289,7 +7263,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
if (!ret)
goto out;
alloc_flags = get_alloc_profile(root, cache->space_info->flags);
- ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ ret = do_chunk_alloc(trans, root, alloc_flags,
CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
@@ -7303,7 +7277,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type)
{
u64 alloc_flags = get_alloc_profile(root, type);
- return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ return do_chunk_alloc(trans, root, alloc_flags,
CHUNK_ALLOC_FORCE);
}
@@ -7810,6 +7784,34 @@ error:
return ret;
}
+void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_root *extent_root = root->fs_info->extent_root;
+ struct btrfs_block_group_item item;
+ struct btrfs_key key;
+ int ret = 0;
+
+ list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
+ new_bg_list) {
+ list_del_init(&block_group->new_bg_list);
+
+ if (ret)
+ continue;
+
+ spin_lock(&block_group->lock);
+ memcpy(&item, &block_group->item, sizeof(item));
+ memcpy(&key, &block_group->key, sizeof(key));
+ spin_unlock(&block_group->lock);
+
+ ret = btrfs_insert_item(trans, extent_root, &key, &item,
+ sizeof(item));
+ if (ret)
+ btrfs_abort_transaction(trans, extent_root, ret);
+ }
+}
+
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
@@ -7843,6 +7845,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
+ INIT_LIST_HEAD(&cache->new_bg_list);
btrfs_init_free_space_ctl(cache);
@@ -7874,12 +7877,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
ret = btrfs_add_block_group_cache(root->fs_info, cache);
BUG_ON(ret); /* Logic error */
- ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
- sizeof(cache->item));
- if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
- return ret;
- }
+ list_add_tail(&cache->new_bg_list, &trans->new_bgs);
set_avail_alloc_bits(extent_root->fs_info, type);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b08ea4717e9d..472873a94d96 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -45,6 +45,7 @@ struct extent_page_data {
struct bio *bio;
struct extent_io_tree *tree;
get_extent_t *get_extent;
+ unsigned long bio_flags;
/* tells writepage not to lock the state bits for this range
* it still does the unlocking
@@ -64,13 +65,13 @@ tree_fs_info(struct extent_io_tree *tree)
int __init extent_io_init(void)
{
- extent_state_cache = kmem_cache_create("extent_state",
+ extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
- extent_buffer_cache = kmem_cache_create("extent_buffers",
+ extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
sizeof(struct extent_buffer), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!extent_buffer_cache)
@@ -942,6 +943,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
* @end: the end offset in bytes (inclusive)
* @bits: the bits to set in this range
* @clear_bits: the bits to clear in this range
+ * @cached_state: state that we're going to cache
* @mask: the allocation mask
*
* This will go through and set bits for the given range. If any states exist
@@ -951,7 +953,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
* boundary bits like LOCK.
*/
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int clear_bits, gfp_t mask)
+ int bits, int clear_bits,
+ struct extent_state **cached_state, gfp_t mask)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -968,6 +971,15 @@ again:
}
spin_lock(&tree->lock);
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ if (state->start <= start && state->end > start &&
+ state->tree) {
+ node = &state->rb_node;
+ goto hit_next;
+ }
+ }
+
/*
* this search will find all the extents that end after
* our range starts.
@@ -998,6 +1010,7 @@ hit_next:
*/
if (state->start == start && state->end <= end) {
set_state_bits(tree, state, &bits);
+ cache_state(state, cached_state);
state = clear_state_bit(tree, state, &clear_bits, 0);
if (last_end == (u64)-1)
goto out;
@@ -1038,6 +1051,7 @@ hit_next:
goto out;
if (state->end <= end) {
set_state_bits(tree, state, &bits);
+ cache_state(state, cached_state);
state = clear_state_bit(tree, state, &clear_bits, 0);
if (last_end == (u64)-1)
goto out;
@@ -1076,6 +1090,7 @@ hit_next:
&bits);
if (err)
extent_io_tree_panic(tree, err);
+ cache_state(prealloc, cached_state);
prealloc = NULL;
start = this_end + 1;
goto search_again;
@@ -1098,6 +1113,7 @@ hit_next:
extent_io_tree_panic(tree, err);
set_state_bits(tree, prealloc, &bits);
+ cache_state(prealloc, cached_state);
clear_state_bit(tree, prealloc, &clear_bits, 0);
prealloc = NULL;
goto out;
@@ -1150,6 +1166,14 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
NULL, cached_state, mask);
}
+int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached_state, gfp_t mask)
+{
+ return set_extent_bit(tree, start, end,
+ EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
+ NULL, cached_state, mask);
+}
+
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask)
{
@@ -1294,18 +1318,42 @@ out:
* If nothing was found, 1 is returned. If found something, return 0.
*/
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits)
+ u64 *start_ret, u64 *end_ret, int bits,
+ struct extent_state **cached_state)
{
struct extent_state *state;
+ struct rb_node *n;
int ret = 1;
spin_lock(&tree->lock);
+ if (cached_state && *cached_state) {
+ state = *cached_state;
+ if (state->end == start - 1 && state->tree) {
+ n = rb_next(&state->rb_node);
+ while (n) {
+ state = rb_entry(n, struct extent_state,
+ rb_node);
+ if (state->state & bits)
+ goto got_it;
+ n = rb_next(n);
+ }
+ free_extent_state(*cached_state);
+ *cached_state = NULL;
+ goto out;
+ }
+ free_extent_state(*cached_state);
+ *cached_state = NULL;
+ }
+
state = find_first_extent_bit_state(tree, start, bits);
+got_it:
if (state) {
+ cache_state(state, cached_state);
*start_ret = state->start;
*end_ret = state->end;
ret = 0;
}
+out:
spin_unlock(&tree->lock);
return ret;
}
@@ -2068,7 +2116,7 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
}
read_unlock(&em_tree->lock);
- if (!em || IS_ERR(em)) {
+ if (!em) {
kfree(failrec);
return -EIO;
}
@@ -2304,8 +2352,8 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
struct extent_state *cached = NULL;
struct extent_state *state;
- pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
- "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
+ pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
+ "mirror=%ld\n", (u64)bio->bi_sector, err,
(long int)bio->bi_bdev);
tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -2709,12 +2757,15 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
end_bio_extent_readpage, mirror_num,
*bio_flags,
this_bio_flag);
- BUG_ON(ret == -ENOMEM);
- nr++;
- *bio_flags = this_bio_flag;
+ if (!ret) {
+ nr++;
+ *bio_flags = this_bio_flag;
+ }
}
- if (ret)
+ if (ret) {
SetPageError(page);
+ unlock_extent(tree, cur, cur + iosize - 1);
+ }
cur = cur + iosize;
pg_offset += iosize;
}
@@ -3161,12 +3212,16 @@ static int write_one_eb(struct extent_buffer *eb,
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
u64 offset = eb->start;
unsigned long i, num_pages;
+ unsigned long bio_flags = 0;
int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
int ret = 0;
clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
num_pages = num_extent_pages(eb->start, eb->len);
atomic_set(&eb->io_pages, num_pages);
+ if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
+ bio_flags = EXTENT_BIO_TREE_LOG;
+
for (i = 0; i < num_pages; i++) {
struct page *p = extent_buffer_page(eb, i);
@@ -3175,7 +3230,8 @@ static int write_one_eb(struct extent_buffer *eb,
ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
-1, end_bio_extent_buffer_writepage,
- 0, 0, 0);
+ 0, epd->bio_flags, bio_flags);
+ epd->bio_flags = bio_flags;
if (ret) {
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
SetPageError(p);
@@ -3210,6 +3266,7 @@ int btree_write_cache_pages(struct address_space *mapping,
.tree = tree,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
+ .bio_flags = 0,
};
int ret = 0;
int done = 0;
@@ -3254,19 +3311,34 @@ retry:
break;
}
+ spin_lock(&mapping->private_lock);
+ if (!PagePrivate(page)) {
+ spin_unlock(&mapping->private_lock);
+ continue;
+ }
+
eb = (struct extent_buffer *)page->private;
+
+ /*
+ * Shouldn't happen and normally this would be a BUG_ON
+ * but no sense in crashing the users box for something
+ * we can survive anyway.
+ */
if (!eb) {
+ spin_unlock(&mapping->private_lock);
WARN_ON(1);
continue;
}
- if (eb == prev_eb)
+ if (eb == prev_eb) {
+ spin_unlock(&mapping->private_lock);
continue;
+ }
- if (!atomic_inc_not_zero(&eb->refs)) {
- WARN_ON(1);
+ ret = atomic_inc_not_zero(&eb->refs);
+ spin_unlock(&mapping->private_lock);
+ if (!ret)
continue;
- }
prev_eb = eb;
ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
@@ -3457,7 +3529,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
if (epd->sync_io)
rw = WRITE_SYNC;
- ret = submit_one_bio(rw, epd->bio, 0, 0);
+ ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
BUG_ON(ret < 0); /* -ENOMEM */
epd->bio = NULL;
}
@@ -3480,6 +3552,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.get_extent = get_extent,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
+ .bio_flags = 0,
};
ret = __extent_writepage(page, wbc, &epd);
@@ -3504,6 +3577,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
.get_extent = get_extent,
.extent_locked = 1,
.sync_io = mode == WB_SYNC_ALL,
+ .bio_flags = 0,
};
struct writeback_control wbc_writepages = {
.sync_mode = mode,
@@ -3543,6 +3617,7 @@ int extent_writepages(struct extent_io_tree *tree,
.get_extent = get_extent,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
+ .bio_flags = 0,
};
ret = extent_write_cache_pages(tree, mapping, wbc,
@@ -3920,18 +3995,6 @@ out:
return ret;
}
-inline struct page *extent_buffer_page(struct extent_buffer *eb,
- unsigned long i)
-{
- return eb->pages[i];
-}
-
-inline unsigned long num_extent_pages(u64 start, u64 len)
-{
- return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
- (start >> PAGE_CACHE_SHIFT);
-}
-
static void __free_extent_buffer(struct extent_buffer *eb)
{
#if LEAK_DEBUG
@@ -4047,8 +4110,8 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
return eb;
err:
- for (i--; i > 0; i--)
- __free_page(eb->pages[i]);
+ for (; i > 0; i--)
+ __free_page(eb->pages[i - 1]);
__free_extent_buffer(eb);
return NULL;
}
@@ -4192,10 +4255,8 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
for (i = 0; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, GFP_NOFS);
- if (!p) {
- WARN_ON(1);
+ if (!p)
goto free_eb;
- }
spin_lock(&mapping->private_lock);
if (PagePrivate(p)) {
@@ -4338,7 +4399,6 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
/* Should be safe to release our pages at this point */
btrfs_release_extent_buffer_page(eb, 0);
-
call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
return 1;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 25900af5b15d..711d12b80028 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -27,6 +27,7 @@
* type for this bio
*/
#define EXTENT_BIO_COMPRESSED 1
+#define EXTENT_BIO_TREE_LOG 2
#define EXTENT_BIO_FLAG_SHIFT 16
/* these are bit numbers for test/set bit */
@@ -232,11 +233,15 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int clear_bits, gfp_t mask);
+ int bits, int clear_bits,
+ struct extent_state **cached_state, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
+int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
+ struct extent_state **cached_state, gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits);
+ u64 *start_ret, u64 *end_ret, int bits,
+ struct extent_state **cached_state);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
u64 start, int bits);
int extent_invalidatepage(struct extent_io_tree *tree,
@@ -277,8 +282,18 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
get_extent_t *get_extent, int mirror_num);
-unsigned long num_extent_pages(u64 start, u64 len);
-struct page *extent_buffer_page(struct extent_buffer *eb, unsigned long i);
+
+static inline unsigned long num_extent_pages(u64 start, u64 len)
+{
+ return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
+ (start >> PAGE_CACHE_SHIFT);
+}
+
+static inline struct page *extent_buffer_page(struct extent_buffer *eb,
+ unsigned long i)
+{
+ return eb->pages[i];
+}
static inline void extent_buffer_get(struct extent_buffer *eb)
{
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 7c97b3301459..b8cbc8d5c7f7 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -11,7 +11,7 @@ static struct kmem_cache *extent_map_cache;
int __init extent_map_init(void)
{
- extent_map_cache = kmem_cache_create("extent_map",
+ extent_map_cache = kmem_cache_create("btrfs_extent_map",
sizeof(struct extent_map), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!extent_map_cache)
@@ -35,6 +35,7 @@ void extent_map_exit(void)
void extent_map_tree_init(struct extent_map_tree *tree)
{
tree->map = RB_ROOT;
+ INIT_LIST_HEAD(&tree->modified_extents);
rwlock_init(&tree->lock);
}
@@ -54,7 +55,9 @@ struct extent_map *alloc_extent_map(void)
em->in_tree = 0;
em->flags = 0;
em->compress_type = BTRFS_COMPRESS_NONE;
+ em->generation = 0;
atomic_set(&em->refs, 1);
+ INIT_LIST_HEAD(&em->list);
return em;
}
@@ -72,6 +75,7 @@ void free_extent_map(struct extent_map *em)
WARN_ON(atomic_read(&em->refs) == 0);
if (atomic_dec_and_test(&em->refs)) {
WARN_ON(em->in_tree);
+ WARN_ON(!list_empty(&em->list));
kmem_cache_free(extent_map_cache, em);
}
}
@@ -198,6 +202,14 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->block_len += merge->block_len;
em->block_start = merge->block_start;
merge->in_tree = 0;
+ if (merge->generation > em->generation) {
+ em->mod_start = em->start;
+ em->mod_len = em->len;
+ em->generation = merge->generation;
+ list_move(&em->list, &tree->modified_extents);
+ }
+
+ list_del_init(&merge->list);
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
@@ -211,14 +223,34 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->block_len += merge->len;
rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0;
+ if (merge->generation > em->generation) {
+ em->mod_len = em->len;
+ em->generation = merge->generation;
+ list_move(&em->list, &tree->modified_extents);
+ }
+ list_del_init(&merge->list);
free_extent_map(merge);
}
}
-int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
+/**
+ * unpint_extent_cache - unpin an extent from the cache
+ * @tree: tree to unpin the extent in
+ * @start: logical offset in the file
+ * @len: length of the extent
+ * @gen: generation that this extent has been modified in
+ * @prealloc: if this is set we need to clear the prealloc flag
+ *
+ * Called after an extent has been written to disk properly. Set the generation
+ * to the generation that actually added the file item to the inode so we know
+ * we need to sync this extent when we call fsync().
+ */
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
+ u64 gen)
{
int ret = 0;
struct extent_map *em;
+ bool prealloc = false;
write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len);
@@ -228,10 +260,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
if (!em)
goto out;
+ list_move(&em->list, &tree->modified_extents);
+ em->generation = gen;
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
+ em->mod_start = em->start;
+ em->mod_len = em->len;
+
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ prealloc = true;
+ clear_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ }
try_merge_map(tree, em);
+ if (prealloc) {
+ em->mod_start = em->start;
+ em->mod_len = em->len;
+ }
+
free_extent_map(em);
out:
write_unlock(&tree->lock);
@@ -269,6 +315,9 @@ int add_extent_mapping(struct extent_map_tree *tree,
}
atomic_inc(&em->refs);
+ em->mod_start = em->start;
+ em->mod_len = em->len;
+
try_merge_map(tree, em);
out:
return ret;
@@ -358,6 +407,8 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
rb_erase(&em->rb_node, &tree->map);
+ if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
+ list_del_init(&em->list);
em->in_tree = 0;
return ret;
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 1195f09761fe..679225555f7b 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -13,6 +13,7 @@
#define EXTENT_FLAG_COMPRESSED 1
#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
+#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
struct extent_map {
struct rb_node rb_node;
@@ -20,18 +21,23 @@ struct extent_map {
/* all of these are in bytes */
u64 start;
u64 len;
+ u64 mod_start;
+ u64 mod_len;
u64 orig_start;
u64 block_start;
u64 block_len;
+ u64 generation;
unsigned long flags;
struct block_device *bdev;
atomic_t refs;
unsigned int in_tree;
unsigned int compress_type;
+ struct list_head list;
};
struct extent_map_tree {
struct rb_root map;
+ struct list_head modified_extents;
rwlock_t lock;
};
@@ -60,7 +66,7 @@ struct extent_map *alloc_extent_map(void);
void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void extent_map_exit(void);
-int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len);
+int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 857d93cd01dc..1ad08e4e4a15 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -25,11 +25,12 @@
#include "transaction.h"
#include "print-tree.h"
-#define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
+#define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) * 2) / \
size) - 1))
-#define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE))
+#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
+ PAGE_CACHE_SIZE))
#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
sizeof(struct btrfs_ordered_sum)) / \
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 5caf285c6e4d..9ab1bed88116 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -39,6 +39,7 @@
#include "tree-log.h"
#include "locking.h"
#include "compat.h"
+#include "volumes.h"
/*
* when auto defrag is enabled we
@@ -458,14 +459,15 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
* this drops all the extents in the cache that intersect the range
* [start, end]. Existing extents are split as required.
*/
-int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
- int skip_pinned)
+void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+ int skip_pinned)
{
struct extent_map *em;
struct extent_map *split = NULL;
struct extent_map *split2 = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 len = end - start + 1;
+ u64 gen;
int ret;
int testend = 1;
unsigned long flags;
@@ -477,11 +479,14 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
testend = 0;
}
while (1) {
+ int no_splits = 0;
+
if (!split)
split = alloc_extent_map();
if (!split2)
split2 = alloc_extent_map();
- BUG_ON(!split || !split2); /* -ENOMEM */
+ if (!split || !split2)
+ no_splits = 1;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
@@ -490,6 +495,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
break;
}
flags = em->flags;
+ gen = em->generation;
if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
if (testend && em->start + em->len >= start + len) {
free_extent_map(em);
@@ -506,6 +512,8 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
remove_extent_mapping(em_tree, em);
+ if (no_splits)
+ goto next;
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
em->start < start) {
@@ -518,12 +526,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split->block_len = em->block_len;
else
split->block_len = split->len;
-
+ split->generation = gen;
split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
ret = add_extent_mapping(em_tree, split);
BUG_ON(ret); /* Logic error */
+ list_move(&split->list, &em_tree->modified_extents);
free_extent_map(split);
split = split2;
split2 = NULL;
@@ -537,6 +546,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
+ split->generation = gen;
if (compressed) {
split->block_len = em->block_len;
@@ -550,9 +560,11 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
ret = add_extent_mapping(em_tree, split);
BUG_ON(ret); /* Logic error */
+ list_move(&split->list, &em_tree->modified_extents);
free_extent_map(split);
split = NULL;
}
+next:
write_unlock(&em_tree->lock);
/* once for us */
@@ -564,7 +576,6 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
free_extent_map(split);
if (split2)
free_extent_map(split2);
- return 0;
}
/*
@@ -576,13 +587,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
* it is either truncated or split. Anything entirely inside the range
* is deleted from the tree.
*/
-int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
- u64 start, u64 end, u64 *hint_byte, int drop_cache)
+int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode,
+ struct btrfs_path *path, u64 start, u64 end,
+ u64 *drop_end, int drop_cache)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
- struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key new_key;
u64 ino = btrfs_ino(inode);
@@ -597,14 +608,12 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
int recow;
int ret;
int modify_tree = -1;
+ int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
+ int found = 0;
if (drop_cache)
btrfs_drop_extent_cache(inode, start, end - 1, 0);
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
if (start >= BTRFS_I(inode)->disk_i_size)
modify_tree = 0;
@@ -666,6 +675,7 @@ next_slot:
goto next_slot;
}
+ found = 1;
search_start = max(key.offset, start);
if (recow || !modify_tree) {
modify_tree = -1;
@@ -707,14 +717,13 @@ next_slot:
extent_end - start);
btrfs_mark_buffer_dirty(leaf);
- if (disk_bytenr > 0) {
+ if (update_refs && disk_bytenr > 0) {
ret = btrfs_inc_extent_ref(trans, root,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
start - extent_offset, 0);
BUG_ON(ret); /* -ENOMEM */
- *hint_byte = disk_bytenr;
}
key.offset = start;
}
@@ -734,10 +743,8 @@ next_slot:
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - end);
btrfs_mark_buffer_dirty(leaf);
- if (disk_bytenr > 0) {
+ if (update_refs && disk_bytenr > 0)
inode_sub_bytes(inode, end - key.offset);
- *hint_byte = disk_bytenr;
- }
break;
}
@@ -753,10 +760,8 @@ next_slot:
btrfs_set_file_extent_num_bytes(leaf, fi,
start - key.offset);
btrfs_mark_buffer_dirty(leaf);
- if (disk_bytenr > 0) {
+ if (update_refs && disk_bytenr > 0)
inode_sub_bytes(inode, extent_end - start);
- *hint_byte = disk_bytenr;
- }
if (end == extent_end)
break;
@@ -777,12 +782,13 @@ next_slot:
del_nr++;
}
- if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ if (update_refs &&
+ extent_type == BTRFS_FILE_EXTENT_INLINE) {
inode_sub_bytes(inode,
extent_end - key.offset);
extent_end = ALIGN(extent_end,
root->sectorsize);
- } else if (disk_bytenr > 0) {
+ } else if (update_refs && disk_bytenr > 0) {
ret = btrfs_free_extent(trans, root,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
@@ -791,7 +797,6 @@ next_slot:
BUG_ON(ret); /* -ENOMEM */
inode_sub_bytes(inode,
extent_end - key.offset);
- *hint_byte = disk_bytenr;
}
if (end == extent_end)
@@ -806,7 +811,7 @@ next_slot:
del_nr);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
- goto out;
+ break;
}
del_nr = 0;
@@ -825,7 +830,24 @@ next_slot:
btrfs_abort_transaction(trans, root, ret);
}
-out:
+ if (drop_end)
+ *drop_end = found ? min(end, extent_end) : end;
+ btrfs_release_path(path);
+ return ret;
+}
+
+int btrfs_drop_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode, u64 start,
+ u64 end, int drop_cache)
+{
+ struct btrfs_path *path;
+ int ret;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
+ drop_cache);
btrfs_free_path(path);
return ret;
}
@@ -892,8 +914,6 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
int ret;
u64 ino = btrfs_ino(inode);
- btrfs_drop_extent_cache(inode, start, end - 1, 0);
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -935,12 +955,16 @@ again:
btrfs_set_item_key_safe(trans, root, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi,
+ trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - end);
btrfs_set_file_extent_offset(leaf, fi,
end - orig_offset);
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi,
+ trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
end - other_start);
btrfs_mark_buffer_dirty(leaf);
@@ -958,12 +982,16 @@ again:
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
start - key.offset);
+ btrfs_set_file_extent_generation(leaf, fi,
+ trans->transid);
path->slots[0]++;
new_key.offset = start;
btrfs_set_item_key_safe(trans, root, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi,
+ trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
other_end - start);
btrfs_set_file_extent_offset(leaf, fi,
@@ -991,12 +1019,14 @@ again:
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
split - key.offset);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - split);
@@ -1056,12 +1086,14 @@ again:
struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_mark_buffer_dirty(leaf);
} else {
fi = btrfs_item_ptr(leaf, del_slot - 1,
struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - key.offset);
btrfs_mark_buffer_dirty(leaf);
@@ -1173,8 +1205,8 @@ again:
clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
- GFP_NOFS);
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ 0, 0, &cached_state, GFP_NOFS);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, &cached_state,
GFP_NOFS);
@@ -1514,16 +1546,24 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trace_btrfs_sync_file(file, datasync);
+ /*
+ * We write the dirty pages in the range and wait until they complete
+ * out of the ->i_mutex. If so, we can flush the dirty pages by
+ * multi-task, and make the performance up.
+ */
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (ret)
+ return ret;
+
mutex_lock(&inode->i_mutex);
/*
- * we wait first, since the writeback may change the inode, also wait
- * ordered range does a filemape_write_and_wait_range which is why we
- * don't do it above like other file systems.
+ * We flush the dirty pages again to avoid some dirty pages in the
+ * range being left.
*/
- root->log_batch++;
+ atomic_inc(&root->log_batch);
btrfs_wait_ordered_range(inode, start, end);
- root->log_batch++;
+ atomic_inc(&root->log_batch);
/*
* check the transaction that last modified this inode
@@ -1544,6 +1584,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) {
BTRFS_I(inode)->last_trans = 0;
+
+ /*
+ * We'v had everything committed since the last time we were
+ * modified so clear this flag in case it was set for whatever
+ * reason, it's no longer relevant.
+ */
+ clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
mutex_unlock(&inode->i_mutex);
goto out;
}
@@ -1599,6 +1647,7 @@ out:
static const struct vm_operations_struct btrfs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = btrfs_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1610,11 +1659,328 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
file_accessed(filp);
vma->vm_ops = &btrfs_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
+static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
+ int slot, u64 start, u64 end)
+{
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_key key;
+
+ if (slot < 0 || slot >= btrfs_header_nritems(leaf))
+ return 0;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.objectid != btrfs_ino(inode) ||
+ key.type != BTRFS_EXTENT_DATA_KEY)
+ return 0;
+
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
+ return 0;
+
+ if (btrfs_file_extent_disk_bytenr(leaf, fi))
+ return 0;
+
+ if (key.offset == end)
+ return 1;
+ if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
+ return 1;
+ return 0;
+}
+
+static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
+ struct btrfs_path *path, u64 offset, u64 end)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *fi;
+ struct extent_map *hole_em;
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct btrfs_key key;
+ int ret;
+
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = offset;
+
+
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret < 0)
+ return ret;
+ BUG_ON(!ret);
+
+ leaf = path->nodes[0];
+ if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
+ u64 num_bytes;
+
+ path->slots[0]--;
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
+ end - offset;
+ btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+
+ if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
+ u64 num_bytes;
+
+ path->slots[0]++;
+ key.offset = offset;
+ btrfs_set_item_key_safe(trans, root, path, &key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
+ offset;
+ btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
+ btrfs_set_file_extent_offset(leaf, fi, 0);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+ btrfs_release_path(path);
+
+ ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
+ 0, 0, end - offset, 0, end - offset,
+ 0, 0, 0);
+ if (ret)
+ return ret;
+
+out:
+ btrfs_release_path(path);
+
+ hole_em = alloc_extent_map();
+ if (!hole_em) {
+ btrfs_drop_extent_cache(inode, offset, end - 1, 0);
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+ } else {
+ hole_em->start = offset;
+ hole_em->len = end - offset;
+ hole_em->orig_start = offset;
+
+ hole_em->block_start = EXTENT_MAP_HOLE;
+ hole_em->block_len = 0;
+ hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
+ hole_em->compress_type = BTRFS_COMPRESS_NONE;
+ hole_em->generation = trans->transid;
+
+ do {
+ btrfs_drop_extent_cache(inode, offset, end - 1, 0);
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, hole_em);
+ if (!ret)
+ list_move(&hole_em->list,
+ &em_tree->modified_extents);
+ write_unlock(&em_tree->lock);
+ } while (ret == -EEXIST);
+ free_extent_map(hole_em);
+ if (ret)
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+ }
+
+ return 0;
+}
+
+static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_state *cached_state = NULL;
+ struct btrfs_path *path;
+ struct btrfs_block_rsv *rsv;
+ struct btrfs_trans_handle *trans;
+ u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
+ u64 lockstart = (offset + mask) & ~mask;
+ u64 lockend = ((offset + len) & ~mask) - 1;
+ u64 cur_offset = lockstart;
+ u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
+ u64 drop_end;
+ unsigned long nr;
+ int ret = 0;
+ int err = 0;
+ bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
+ ((offset + len) >> PAGE_CACHE_SHIFT);
+
+ btrfs_wait_ordered_range(inode, offset, len);
+
+ mutex_lock(&inode->i_mutex);
+ if (offset >= inode->i_size) {
+ mutex_unlock(&inode->i_mutex);
+ return 0;
+ }
+
+ /*
+ * Only do this if we are in the same page and we aren't doing the
+ * entire page.
+ */
+ if (same_page && len < PAGE_CACHE_SIZE) {
+ ret = btrfs_truncate_page(inode, offset, len, 0);
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+ }
+
+ /* zero back part of the first page */
+ ret = btrfs_truncate_page(inode, offset, 0, 0);
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+ }
+
+ /* zero the front end of the last page */
+ ret = btrfs_truncate_page(inode, offset + len, 0, 1);
+ if (ret) {
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+ }
+
+ if (lockend < lockstart) {
+ mutex_unlock(&inode->i_mutex);
+ return 0;
+ }
+
+ while (1) {
+ struct btrfs_ordered_extent *ordered;
+
+ truncate_pagecache_range(inode, lockstart, lockend);
+
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ 0, &cached_state);
+ ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
+
+ /*
+ * We need to make sure we have no ordered extents in this range
+ * and nobody raced in and read a page in this range, if we did
+ * we need to try again.
+ */
+ if ((!ordered ||
+ (ordered->file_offset + ordered->len < lockstart ||
+ ordered->file_offset > lockend)) &&
+ !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, EXTENT_UPTODATE, 0,
+ cached_state)) {
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ break;
+ }
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state, GFP_NOFS);
+ btrfs_wait_ordered_range(inode, lockstart,
+ lockend - lockstart + 1);
+ }
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+ if (!rsv) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
+ rsv->failfast = 1;
+
+ /*
+ * 1 - update the inode
+ * 1 - removing the extents in the range
+ * 1 - adding the hole extent
+ */
+ trans = btrfs_start_transaction(root, 3);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+ goto out_free;
+ }
+
+ ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+ min_size);
+ BUG_ON(ret);
+ trans->block_rsv = rsv;
+
+ while (cur_offset < lockend) {
+ ret = __btrfs_drop_extents(trans, root, inode, path,
+ cur_offset, lockend + 1,
+ &drop_end, 1);
+ if (ret != -ENOSPC)
+ break;
+
+ trans->block_rsv = &root->fs_info->trans_block_rsv;
+
+ ret = fill_holes(trans, inode, path, cur_offset, drop_end);
+ if (ret) {
+ err = ret;
+ break;
+ }
+
+ cur_offset = drop_end;
+
+ ret = btrfs_update_inode(trans, root, inode);
+ if (ret) {
+ err = ret;
+ break;
+ }
+
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+
+ trans = btrfs_start_transaction(root, 3);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ break;
+ }
+
+ ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
+ rsv, min_size);
+ BUG_ON(ret); /* shouldn't happen */
+ trans->block_rsv = rsv;
+ }
+
+ if (ret) {
+ err = ret;
+ goto out_trans;
+ }
+
+ trans->block_rsv = &root->fs_info->trans_block_rsv;
+ ret = fill_holes(trans, inode, path, cur_offset, drop_end);
+ if (ret) {
+ err = ret;
+ goto out_trans;
+ }
+
+out_trans:
+ if (!trans)
+ goto out_free;
+
+ trans->block_rsv = &root->fs_info->trans_block_rsv;
+ ret = btrfs_update_inode(trans, root, inode);
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+out_free:
+ btrfs_free_path(path);
+ btrfs_free_block_rsv(root, rsv);
+out:
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ &cached_state, GFP_NOFS);
+ mutex_unlock(&inode->i_mutex);
+ if (ret && !err)
+ err = ret;
+ return err;
+}
+
static long btrfs_fallocate(struct file *file, int mode,
loff_t offset, loff_t len)
{
@@ -1633,15 +1999,18 @@ static long btrfs_fallocate(struct file *file, int mode,
alloc_start = offset & ~mask;
alloc_end = (offset + len + mask) & ~mask;
- /* We only support the FALLOC_FL_KEEP_SIZE mode */
- if (mode & ~FALLOC_FL_KEEP_SIZE)
+ /* Make sure we aren't being give some crap mode */
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
+ if (mode & FALLOC_FL_PUNCH_HOLE)
+ return btrfs_punch_hole(inode, offset, len);
+
/*
* Make sure we have enough space before we do the
* allocation.
*/
- ret = btrfs_check_data_free_space(inode, len);
+ ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start + 1);
if (ret)
return ret;
@@ -1748,7 +2117,7 @@ static long btrfs_fallocate(struct file *file, int mode,
out:
mutex_unlock(&inode->i_mutex);
/* Let go of our reservation. */
- btrfs_free_reserved_data_space(inode, len);
+ btrfs_free_reserved_data_space(inode, alloc_end - alloc_start + 1);
return ret;
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 6b10acfc2f5c..1027b854b90c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -966,7 +966,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
block_group->key.offset)) {
ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
- EXTENT_DIRTY);
+ EXTENT_DIRTY, NULL);
if (ret) {
ret = 0;
break;
@@ -1454,9 +1454,7 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
max_t(u64, *offset, bitmap_info->offset));
bits = bytes_to_bits(*bytes, ctl->unit);
- for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
- i < BITS_PER_BITMAP;
- i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
+ for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
next_zero = find_next_zero_bit(bitmap_info->bitmap,
BITS_PER_BITMAP, i);
if ((next_zero - i) >= bits) {
@@ -2307,9 +2305,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
again:
found_bits = 0;
- for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
- i < BITS_PER_BITMAP;
- i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
+ for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
next_zero = find_next_zero_bit(entry->bitmap,
BITS_PER_BITMAP, i);
if (next_zero - i >= min_bits) {
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h
index db2ff9773b99..1d982812ab67 100644
--- a/fs/btrfs/hash.h
+++ b/fs/btrfs/hash.h
@@ -24,4 +24,14 @@ static inline u64 btrfs_name_hash(const char *name, int len)
{
return crc32c((u32)~1, name, len);
}
+
+/*
+ * Figure the key offset of an extended inode ref
+ */
+static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
+ int len)
+{
+ return (u64) crc32c(parent_objectid, name, len);
+}
+
#endif
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index a13cf1a96c73..48b8fda93132 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -18,6 +18,7 @@
#include "ctree.h"
#include "disk-io.h"
+#include "hash.h"
#include "transaction.h"
#include "print-tree.h"
@@ -50,18 +51,57 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name,
return 0;
}
-struct btrfs_inode_ref *
+int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid,
+ const char *name, int name_len,
+ struct btrfs_inode_extref **extref_ret)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_inode_extref *extref;
+ unsigned long ptr;
+ unsigned long name_ptr;
+ u32 item_size;
+ u32 cur_offset = 0;
+ int ref_name_len;
+
+ leaf = path->nodes[0];
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+
+ /*
+ * Search all extended backrefs in this item. We're only
+ * looking through any collisions so most of the time this is
+ * just going to compare against one buffer. If all is well,
+ * we'll return success and the inode ref object.
+ */
+ while (cur_offset < item_size) {
+ extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
+ name_ptr = (unsigned long)(&extref->name);
+ ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
+
+ if (ref_name_len == name_len &&
+ btrfs_inode_extref_parent(leaf, extref) == ref_objectid &&
+ (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)) {
+ if (extref_ret)
+ *extref_ret = extref;
+ return 1;
+ }
+
+ cur_offset += ref_name_len + sizeof(*extref);
+ }
+ return 0;
+}
+
+static struct btrfs_inode_ref *
btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- const char *name, int name_len,
- u64 inode_objectid, u64 ref_objectid, int mod)
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid, int ins_len,
+ int cow)
{
+ int ret;
struct btrfs_key key;
struct btrfs_inode_ref *ref;
- int ins_len = mod < 0 ? -1 : 0;
- int cow = mod != 0;
- int ret;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_REF_KEY;
@@ -77,13 +117,150 @@ btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
return ref;
}
-int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+/* Returns NULL if no extref found */
+struct btrfs_inode_extref *
+btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid, int ins_len,
+ int cow)
+{
+ int ret;
+ struct btrfs_key key;
+ struct btrfs_inode_extref *extref;
+
+ key.objectid = inode_objectid;
+ key.type = BTRFS_INODE_EXTREF_KEY;
+ key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
+
+ ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret > 0)
+ return NULL;
+ if (!btrfs_find_name_in_ext_backref(path, ref_objectid, name, name_len, &extref))
+ return NULL;
+ return extref;
+}
+
+int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid, int mod,
+ u64 *ret_index)
+{
+ struct btrfs_inode_ref *ref;
+ struct btrfs_inode_extref *extref;
+ int ins_len = mod < 0 ? -1 : 0;
+ int cow = mod != 0;
+
+ ref = btrfs_lookup_inode_ref(trans, root, path, name, name_len,
+ inode_objectid, ref_objectid, ins_len,
+ cow);
+ if (IS_ERR(ref))
+ return PTR_ERR(ref);
+
+ if (ref != NULL) {
+ *ret_index = btrfs_inode_ref_index(path->nodes[0], ref);
+ return 0;
+ }
+
+ btrfs_release_path(path);
+
+ extref = btrfs_lookup_inode_extref(trans, root, path, name,
+ name_len, inode_objectid,
+ ref_objectid, ins_len, cow);
+ if (IS_ERR(extref))
+ return PTR_ERR(extref);
+
+ if (extref) {
+ *ret_index = btrfs_inode_extref_index(path->nodes[0], extref);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 *index)
{
struct btrfs_path *path;
struct btrfs_key key;
+ struct btrfs_inode_extref *extref;
+ struct extent_buffer *leaf;
+ int ret;
+ int del_len = name_len + sizeof(*extref);
+ unsigned long ptr;
+ unsigned long item_start;
+ u32 item_size;
+
+ key.objectid = inode_objectid;
+ btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
+ key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ path->leave_spinning = 1;
+
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Sanity check - did we find the right item for this name?
+ * This should always succeed so error here will make the FS
+ * readonly.
+ */
+ if (!btrfs_find_name_in_ext_backref(path, ref_objectid,
+ name, name_len, &extref)) {
+ btrfs_std_error(root->fs_info, -ENOENT);
+ ret = -EROFS;
+ goto out;
+ }
+
+ leaf = path->nodes[0];
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ if (index)
+ *index = btrfs_inode_extref_index(leaf, extref);
+
+ if (del_len == item_size) {
+ /*
+ * Common case only one ref in the item, remove the
+ * whole item.
+ */
+ ret = btrfs_del_item(trans, root, path);
+ goto out;
+ }
+
+ ptr = (unsigned long)extref;
+ item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
+
+ memmove_extent_buffer(leaf, ptr, ptr + del_len,
+ item_size - (ptr + del_len - item_start));
+
+ btrfs_truncate_item(trans, root, path, item_size - del_len, 1);
+
+out:
+ btrfs_free_path(path);
+
+ return ret;
+}
+
+int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid, u64 *index)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
struct btrfs_inode_ref *ref;
struct extent_buffer *leaf;
unsigned long ptr;
@@ -91,6 +268,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
u32 item_size;
u32 sub_item_len;
int ret;
+ int search_ext_refs = 0;
int del_len = name_len + sizeof(*ref);
key.objectid = inode_objectid;
@@ -106,12 +284,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = -ENOENT;
+ search_ext_refs = 1;
goto out;
} else if (ret < 0) {
goto out;
}
if (!find_name_in_backref(path, name, name_len, &ref)) {
ret = -ENOENT;
+ search_ext_refs = 1;
goto out;
}
leaf = path->nodes[0];
@@ -129,8 +309,78 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
- btrfs_truncate_item(trans, root, path,
- item_size - sub_item_len, 1);
+ btrfs_truncate_item(trans, root, path, item_size - sub_item_len, 1);
+out:
+ btrfs_free_path(path);
+
+ if (search_ext_refs) {
+ /*
+ * No refs were found, or we could not find the
+ * name in our ref array. Find and remove the extended
+ * inode ref then.
+ */
+ return btrfs_del_inode_extref(trans, root, name, name_len,
+ inode_objectid, ref_objectid, index);
+ }
+
+ return ret;
+}
+
+/*
+ * btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree.
+ *
+ * The caller must have checked against BTRFS_LINK_MAX already.
+ */
+static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ const char *name, int name_len,
+ u64 inode_objectid, u64 ref_objectid, u64 index)
+{
+ struct btrfs_inode_extref *extref;
+ int ret;
+ int ins_len = name_len + sizeof(*extref);
+ unsigned long ptr;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_item *item;
+
+ key.objectid = inode_objectid;
+ key.type = BTRFS_INODE_EXTREF_KEY;
+ key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ path->leave_spinning = 1;
+ ret = btrfs_insert_empty_item(trans, root, path, &key,
+ ins_len);
+ if (ret == -EEXIST) {
+ if (btrfs_find_name_in_ext_backref(path, ref_objectid,
+ name, name_len, NULL))
+ goto out;
+
+ btrfs_extend_item(trans, root, path, ins_len);
+ ret = 0;
+ }
+ if (ret < 0)
+ goto out;
+
+ leaf = path->nodes[0];
+ item = btrfs_item_nr(leaf, path->slots[0]);
+ ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char);
+ ptr += btrfs_item_size(leaf, item) - ins_len;
+ extref = (struct btrfs_inode_extref *)ptr;
+
+ btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len);
+ btrfs_set_inode_extref_index(path->nodes[0], extref, index);
+ btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid);
+
+ ptr = (unsigned long)&extref->name;
+ write_extent_buffer(path->nodes[0], name, ptr, name_len);
+ btrfs_mark_buffer_dirty(path->nodes[0]);
+
out:
btrfs_free_path(path);
return ret;
@@ -191,6 +441,19 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
+
+ if (ret == -EMLINK) {
+ struct btrfs_super_block *disk_super = root->fs_info->super_copy;
+ /* We ran out of space in the ref array. Need to
+ * add an extended ref. */
+ if (btrfs_super_incompat_flags(disk_super)
+ & BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
+ ret = btrfs_insert_inode_extref(trans, root, name,
+ name_len,
+ inode_objectid,
+ ref_objectid, index);
+ }
+
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a6ed6944e50c..95542a1b3dfc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -94,8 +94,6 @@ static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode);
static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *dir,
@@ -230,7 +228,6 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
u64 inline_len = actual_end - start;
u64 aligned_end = (end + root->sectorsize - 1) &
~((u64)root->sectorsize - 1);
- u64 hint_byte;
u64 data_len = inline_len;
int ret;
@@ -247,8 +244,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
return 1;
}
- ret = btrfs_drop_extents(trans, inode, start, aligned_end,
- &hint_byte, 1);
+ ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, 1);
if (ret)
return ret;
@@ -664,7 +660,7 @@ retry:
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint, &ins, 1);
- if (ret)
+ if (ret && ret != -ENOSPC)
btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
}
@@ -1308,6 +1304,7 @@ out_check:
em->block_start = disk_bytenr;
em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
while (1) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
@@ -1364,11 +1361,7 @@ out_check:
}
error:
- if (nolock) {
- err = btrfs_end_transaction_nolock(trans, root);
- } else {
- err = btrfs_end_transaction(trans, root);
- }
+ err = btrfs_end_transaction(trans, root);
if (!ret)
ret = err;
@@ -1785,7 +1778,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key ins;
- u64 hint;
int ret;
path = btrfs_alloc_path();
@@ -1803,8 +1795,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
- ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
- &hint, 0);
+ ret = btrfs_drop_extents(trans, root, inode, file_pos,
+ file_pos + num_bytes, 0);
if (ret)
goto out;
@@ -1828,10 +1820,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_encryption(leaf, fi, encryption);
btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
- btrfs_unlock_up_safe(path, 1);
- btrfs_set_lock_blocking(leaf);
-
btrfs_mark_buffer_dirty(leaf);
+ btrfs_release_path(path);
inode_add_bytes(inode, num_bytes);
@@ -1929,11 +1919,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
- unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
- ordered_extent->file_offset,
- ordered_extent->len);
}
-
+ unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
+ ordered_extent->file_offset, ordered_extent->len,
+ trans->transid);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
@@ -1949,6 +1938,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
+ } else {
+ btrfs_set_inode_last_trans(trans, inode);
}
ret = 0;
out_unlock:
@@ -1958,12 +1949,8 @@ out_unlock:
out:
if (root != root->fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
- if (trans) {
- if (nolock)
- btrfs_end_transaction_nolock(trans, root);
- else
- btrfs_end_transaction(trans, root);
- }
+ if (trans)
+ btrfs_end_transaction(trans, root);
if (ret)
clear_extent_uptodate(io_tree, ordered_extent->file_offset,
@@ -2119,7 +2106,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
if (empty)
return;
- down_read(&root->fs_info->cleanup_work_sem);
spin_lock(&fs_info->delayed_iput_lock);
list_splice_init(&fs_info->delayed_iputs, &list);
spin_unlock(&fs_info->delayed_iput_lock);
@@ -2130,7 +2116,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
iput(delayed->inode);
kfree(delayed);
}
- up_read(&root->fs_info->cleanup_work_sem);
}
enum btrfs_orphan_cleanup_state {
@@ -2198,7 +2183,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
int ret;
if (!root->orphan_block_rsv) {
- block_rsv = btrfs_alloc_block_rsv(root);
+ block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!block_rsv)
return -ENOMEM;
}
@@ -2225,7 +2210,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
insert = 1;
#endif
insert = 1;
- atomic_dec(&root->orphan_inodes);
+ atomic_inc(&root->orphan_inodes);
}
if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
@@ -2590,6 +2575,18 @@ static void btrfs_read_locked_inode(struct inode *inode)
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
+ BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
+
+ /*
+ * If we were modified in the current generation and evicted from memory
+ * and then re-read we need to do a full sync since we don't have any
+ * idea about which extents were modified before we were evicted from
+ * cache.
+ */
+ if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+
inode->i_version = btrfs_inode_sequence(leaf, inode_item);
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
@@ -2747,8 +2744,9 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
return btrfs_update_inode_item(trans, root, inode);
}
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode)
+noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode)
{
int ret;
@@ -2894,7 +2892,6 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
- struct btrfs_inode_ref *ref;
struct btrfs_dir_item *di;
struct inode *inode = dentry->d_inode;
u64 index;
@@ -3008,17 +3005,17 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
}
btrfs_release_path(path);
- ref = btrfs_lookup_inode_ref(trans, root, path,
- dentry->d_name.name, dentry->d_name.len,
- ino, dir_ino, 0);
- if (IS_ERR(ref)) {
- err = PTR_ERR(ref);
+ ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
+ dentry->d_name.len, ino, dir_ino, 0,
+ &index);
+ if (ret) {
+ err = ret;
goto out;
}
- BUG_ON(!ref); /* Logic error */
+
if (check_path_shared(root, path))
goto out;
- index = btrfs_inode_ref_index(path->nodes[0], ref);
+
btrfs_release_path(path);
/*
@@ -3061,7 +3058,7 @@ out:
static void __unlink_end_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- if (trans->block_rsv == &root->fs_info->global_block_rsv) {
+ if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
btrfs_block_rsv_release(root, trans->block_rsv,
trans->bytes_reserved);
trans->block_rsv = &root->fs_info->trans_block_rsv;
@@ -3191,9 +3188,10 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
struct btrfs_trans_handle *trans;
unsigned long nr = 0;
- if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
- btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
+ if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
+ if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
+ return -EPERM;
trans = __unlink_start_trans(dir, dentry);
if (IS_ERR(trans))
@@ -3267,8 +3265,13 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
return -ENOMEM;
path->reada = -1;
+ /*
+ * We want to drop from the next block forward in case this new size is
+ * not block aligned since we will be keeping the last block of the
+ * extent just the way it is.
+ */
if (root->ref_cows || root == root->fs_info->tree_root)
- btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
+ btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
@@ -3429,12 +3432,6 @@ delete:
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot) {
- if (root->ref_cows &&
- BTRFS_I(inode)->location.objectid !=
- BTRFS_FREE_INO_OBJECTID) {
- err = -EAGAIN;
- goto out;
- }
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
@@ -3465,12 +3462,20 @@ error:
}
/*
- * taken from block_truncate_page, but does cow as it zeros out
- * any bytes left in the last page in the file.
+ * btrfs_truncate_page - read, zero a chunk and write a page
+ * @inode - inode that we're zeroing
+ * @from - the offset to start zeroing
+ * @len - the length to zero, 0 to zero the entire range respective to the
+ * offset
+ * @front - zero up to the offset instead of from the offset on
+ *
+ * This will find the page for the "from" offset and cow the page and zero the
+ * part we want to zero. This is used with truncate and hole punching.
*/
-static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
+int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
+ int front)
{
- struct inode *inode = mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_ordered_extent *ordered;
@@ -3485,7 +3490,8 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
u64 page_start;
u64 page_end;
- if ((offset & (blocksize - 1)) == 0)
+ if ((offset & (blocksize - 1)) == 0 &&
+ (!len || ((len & (blocksize - 1)) == 0)))
goto out;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (ret)
@@ -3532,7 +3538,8 @@ again:
}
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
- EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
@@ -3545,8 +3552,13 @@ again:
ret = 0;
if (offset != PAGE_CACHE_SIZE) {
+ if (!len)
+ len = PAGE_CACHE_SIZE - offset;
kaddr = kmap(page);
- memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
+ if (front)
+ memset(kaddr, 0, offset);
+ else
+ memset(kaddr + offset, 0, len);
flush_dcache_page(page);
kunmap(page);
}
@@ -3577,6 +3589,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 mask = root->sectorsize - 1;
u64 hole_start = (oldsize + mask) & ~mask;
u64 block_end = (size + mask) & ~mask;
@@ -3613,7 +3626,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
- u64 hint_byte = 0;
+ struct extent_map *hole_em;
hole_size = last_byte - cur_offset;
trans = btrfs_start_transaction(root, 3);
@@ -3622,9 +3635,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
break;
}
- err = btrfs_drop_extents(trans, inode, cur_offset,
- cur_offset + hole_size,
- &hint_byte, 1);
+ err = btrfs_drop_extents(trans, root, inode,
+ cur_offset,
+ cur_offset + hole_size, 1);
if (err) {
btrfs_abort_transaction(trans, root, err);
btrfs_end_transaction(trans, root);
@@ -3641,9 +3654,39 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
break;
}
- btrfs_drop_extent_cache(inode, hole_start,
- last_byte - 1, 0);
+ btrfs_drop_extent_cache(inode, cur_offset,
+ cur_offset + hole_size - 1, 0);
+ hole_em = alloc_extent_map();
+ if (!hole_em) {
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+ goto next;
+ }
+ hole_em->start = cur_offset;
+ hole_em->len = hole_size;
+ hole_em->orig_start = cur_offset;
+
+ hole_em->block_start = EXTENT_MAP_HOLE;
+ hole_em->block_len = 0;
+ hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
+ hole_em->compress_type = BTRFS_COMPRESS_NONE;
+ hole_em->generation = trans->transid;
+ while (1) {
+ write_lock(&em_tree->lock);
+ err = add_extent_mapping(em_tree, hole_em);
+ if (!err)
+ list_move(&hole_em->list,
+ &em_tree->modified_extents);
+ write_unlock(&em_tree->lock);
+ if (err != -EEXIST)
+ break;
+ btrfs_drop_extent_cache(inode, cur_offset,
+ cur_offset +
+ hole_size - 1, 0);
+ }
+ free_extent_map(hole_em);
+next:
btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
@@ -3768,26 +3811,22 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
- rsv = btrfs_alloc_block_rsv(root);
+ rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
rsv->size = min_size;
+ rsv->failfast = 1;
global_rsv = &root->fs_info->global_block_rsv;
btrfs_i_size_write(inode, 0);
/*
- * This is a bit simpler than btrfs_truncate since
- *
- * 1) We've already reserved our space for our orphan item in the
- * unlink.
- * 2) We're going to delete the inode item, so we don't need to update
- * it at all.
- *
- * So we just need to reserve some slack space in case we add bytes when
- * doing the truncate.
+ * This is a bit simpler than btrfs_truncate since we've already
+ * reserved our space for our orphan item in the unlink, so we just
+ * need to reserve some slack space in case we add bytes and update
+ * inode item when doing the truncate.
*/
while (1) {
ret = btrfs_block_rsv_refill_noflush(root, rsv, min_size);
@@ -3808,7 +3847,7 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
- trans = btrfs_start_transaction(root, 0);
+ trans = btrfs_start_transaction_noflush(root, 1);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
@@ -3818,9 +3857,13 @@ void btrfs_evict_inode(struct inode *inode)
trans->block_rsv = rsv;
ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
- if (ret != -EAGAIN)
+ if (ret != -ENOSPC)
break;
+ trans->block_rsv = &root->fs_info->trans_block_rsv;
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
trans = NULL;
@@ -4470,10 +4513,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- if (nolock)
- ret = btrfs_end_transaction_nolock(trans, root);
- else
- ret = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
}
return ret;
}
@@ -4671,6 +4711,14 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
+ /*
+ * We could have gotten an inode number from somebody who was fsynced
+ * and then removed in this same transaction, so let's just set full
+ * sync since it will be a full sync anyway and this will blow away the
+ * old info in the log.
+ */
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
+
if (S_ISDIR(mode))
owner = 0;
else
@@ -4680,6 +4728,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
key[0].offset = 0;
+ /*
+ * Start new inodes with an inode_ref. This is slightly more
+ * efficient for small numbers of hard links since they will
+ * be packed into one item. Extended refs will kick in if we
+ * add more hard links than can fit in the ref item.
+ */
key[1].objectid = objectid;
btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
key[1].offset = ref_objectid;
@@ -4986,7 +5040,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
if (root->objectid != BTRFS_I(inode)->root->objectid)
return -EXDEV;
- if (inode->i_nlink == ~0U)
+ if (inode->i_nlink >= BTRFS_LINK_MAX)
return -EMLINK;
err = btrfs_set_inode_index(dir, &index);
@@ -5450,7 +5504,8 @@ insert:
write_unlock(&em_tree->lock);
out:
- trace_btrfs_get_extent(root, em);
+ if (em)
+ trace_btrfs_get_extent(root, em);
if (path)
btrfs_free_path(path);
@@ -5836,6 +5891,48 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
return ret;
}
+static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
+ u64 len, u64 orig_start,
+ u64 block_start, u64 block_len,
+ int type)
+{
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ em_tree = &BTRFS_I(inode)->extent_tree;
+ em = alloc_extent_map();
+ if (!em)
+ return ERR_PTR(-ENOMEM);
+
+ em->start = start;
+ em->orig_start = orig_start;
+ em->len = len;
+ em->block_len = block_len;
+ em->block_start = block_start;
+ em->bdev = root->fs_info->fs_devices->latest_bdev;
+ set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ if (type == BTRFS_ORDERED_PREALLOC)
+ set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+
+ do {
+ btrfs_drop_extent_cache(inode, em->start,
+ em->start + em->len - 1, 0);
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+ } while (ret == -EEXIST);
+
+ if (ret) {
+ free_extent_map(em);
+ return ERR_PTR(ret);
+ }
+
+ return em;
+}
+
+
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -5950,6 +6047,19 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
goto must_cow;
if (can_nocow_odirect(trans, inode, start, len) == 1) {
+ u64 orig_start = em->start;
+
+ if (type == BTRFS_ORDERED_PREALLOC) {
+ free_extent_map(em);
+ em = create_pinned_em(inode, start, len,
+ orig_start,
+ block_start, len, type);
+ if (IS_ERR(em)) {
+ btrfs_end_transaction(trans, root);
+ goto unlock_err;
+ }
+ }
+
ret = btrfs_add_ordered_extent_dio(inode, start,
block_start, len, len, type);
btrfs_end_transaction(trans, root);
@@ -5999,7 +6109,8 @@ unlock:
if (lockstart < lockend) {
if (create && len < lockend - lockstart) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockstart + len - 1, unlock_bits, 1, 0,
+ lockstart + len - 1,
+ unlock_bits | EXTENT_DEFRAG, 1, 0,
&cached_state, GFP_NOFS);
/*
* Beside unlock, we also need to cleanup reserved space
@@ -6007,8 +6118,8 @@ unlock:
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree,
lockstart + len, lockend,
- unlock_bits | EXTENT_DO_ACCOUNTING,
- 1, 0, NULL, GFP_NOFS);
+ unlock_bits | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS);
} else {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, unlock_bits, 1, 0,
@@ -6573,8 +6684,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
*/
clear_extent_bit(tree, page_start, page_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
- &cached_state, GFP_NOFS);
+ EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, 1, 0, &cached_state, GFP_NOFS);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
@@ -6590,7 +6701,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
}
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
+ &cached_state, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
ClearPageChecked(page);
@@ -6687,7 +6799,8 @@ again:
* prepare_pages in the normal write path.
*/
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
- EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
@@ -6718,6 +6831,7 @@ again:
BTRFS_I(inode)->last_trans = root->fs_info->generation;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
+ BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
@@ -6745,7 +6859,7 @@ static int btrfs_truncate(struct inode *inode)
u64 mask = root->sectorsize - 1;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
- ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
+ ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
if (ret)
return ret;
@@ -6788,10 +6902,11 @@ static int btrfs_truncate(struct inode *inode)
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
- rsv = btrfs_alloc_block_rsv(root);
+ rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
+ rsv->failfast = 1;
/*
* 1 for the truncate slack space
@@ -6837,36 +6952,21 @@ static int btrfs_truncate(struct inode *inode)
&BTRFS_I(inode)->runtime_flags))
btrfs_add_ordered_operation(trans, root, inode);
- while (1) {
- ret = btrfs_block_rsv_refill(root, rsv, min_size);
- if (ret) {
- /*
- * This can only happen with the original transaction we
- * started above, every other time we shouldn't have a
- * transaction started yet.
- */
- if (ret == -EAGAIN)
- goto end_trans;
- err = ret;
- break;
- }
-
- if (!trans) {
- /* Just need the 1 for updating the inode */
- trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = err = PTR_ERR(trans);
- trans = NULL;
- break;
- }
- }
-
- trans->block_rsv = rsv;
+ /*
+ * So if we truncate and then write and fsync we normally would just
+ * write the extents that changed, which is a problem if we need to
+ * first truncate that entire inode. So set this flag so we write out
+ * all of the extents in the inode to the sync log so we're completely
+ * safe.
+ */
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
+ trans->block_rsv = rsv;
+ while (1) {
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
- if (ret != -EAGAIN) {
+ if (ret != -ENOSPC) {
err = ret;
break;
}
@@ -6877,11 +6977,22 @@ static int btrfs_truncate(struct inode *inode)
err = ret;
break;
}
-end_trans:
+
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
- trans = NULL;
btrfs_btree_balance_dirty(root, nr);
+
+ trans = btrfs_start_transaction(root, 2);
+ if (IS_ERR(trans)) {
+ ret = err = PTR_ERR(trans);
+ trans = NULL;
+ break;
+ }
+
+ ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
+ rsv, min_size);
+ BUG_ON(ret); /* shouldn't happen */
+ trans->block_rsv = rsv;
}
if (ret == 0 && inode->i_nlink > 0) {
@@ -6965,6 +7076,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->csum_bytes = 0;
ei->index_cnt = (u64)-1;
ei->last_unlink_trans = 0;
+ ei->last_log_commit = 0;
spin_lock_init(&ei->lock);
ei->outstanding_extents = 0;
@@ -7095,31 +7207,31 @@ void btrfs_destroy_cachep(void)
int btrfs_init_cachep(void)
{
- btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
+ btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
sizeof(struct btrfs_inode), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
if (!btrfs_inode_cachep)
goto fail;
- btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
+ btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
sizeof(struct btrfs_trans_handle), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_trans_handle_cachep)
goto fail;
- btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
+ btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
sizeof(struct btrfs_transaction), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_transaction_cachep)
goto fail;
- btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
+ btrfs_path_cachep = kmem_cache_create("btrfs_path",
sizeof(struct btrfs_path), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_path_cachep)
goto fail;
- btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space_cache",
+ btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
sizeof(struct btrfs_free_space), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
@@ -7513,6 +7625,8 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
loff_t actual_len, u64 *alloc_hint,
struct btrfs_trans_handle *trans)
{
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
@@ -7553,6 +7667,37 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
+ em = alloc_extent_map();
+ if (!em) {
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+ goto next;
+ }
+
+ em->start = cur_offset;
+ em->orig_start = cur_offset;
+ em->len = ins.offset;
+ em->block_start = ins.objectid;
+ em->block_len = ins.offset;
+ em->bdev = root->fs_info->fs_devices->latest_bdev;
+ set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
+ em->generation = trans->transid;
+
+ while (1) {
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em);
+ if (!ret)
+ list_move(&em->list,
+ &em_tree->modified_extents);
+ write_unlock(&em_tree->lock);
+ if (ret != -EEXIST)
+ break;
+ btrfs_drop_extent_cache(inode, cur_offset,
+ cur_offset + ins.offset - 1,
+ 0);
+ }
+ free_extent_map(em);
+next:
num_bytes -= ins.offset;
cur_offset += ins.offset;
*alloc_hint = ins.objectid + ins.offset;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 47127c1bd290..8fcf9a59c28d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -181,6 +181,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
int ret;
u64 ip_oldflags;
unsigned int i_oldflags;
+ umode_t mode;
if (btrfs_root_readonly(root))
return -EROFS;
@@ -203,6 +204,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip_oldflags = ip->flags;
i_oldflags = inode->i_flags;
+ mode = inode->i_mode;
flags = btrfs_mask_flags(inode->i_mode, flags);
oldflags = btrfs_flags_to_ioctl(ip->flags);
@@ -237,10 +239,31 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip->flags |= BTRFS_INODE_DIRSYNC;
else
ip->flags &= ~BTRFS_INODE_DIRSYNC;
- if (flags & FS_NOCOW_FL)
- ip->flags |= BTRFS_INODE_NODATACOW;
- else
- ip->flags &= ~BTRFS_INODE_NODATACOW;
+ if (flags & FS_NOCOW_FL) {
+ if (S_ISREG(mode)) {
+ /*
+ * It's safe to turn csums off here, no extents exist.
+ * Otherwise we want the flag to reflect the real COW
+ * status of the file and will not set it.
+ */
+ if (inode->i_size == 0)
+ ip->flags |= BTRFS_INODE_NODATACOW
+ | BTRFS_INODE_NODATASUM;
+ } else {
+ ip->flags |= BTRFS_INODE_NODATACOW;
+ }
+ } else {
+ /*
+ * Revert back under same assuptions as above
+ */
+ if (S_ISREG(mode)) {
+ if (inode->i_size == 0)
+ ip->flags &= ~(BTRFS_INODE_NODATACOW
+ | BTRFS_INODE_NODATASUM);
+ } else {
+ ip->flags &= ~BTRFS_INODE_NODATACOW;
+ }
+ }
/*
* The COMPRESS flag can only be changed by users, while the NOCOMPRESS
@@ -320,7 +343,8 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return -EOPNOTSUPP;
if (copy_from_user(&range, arg, sizeof(range)))
return -EFAULT;
- if (range.start > total_bytes)
+ if (range.start > total_bytes ||
+ range.len < fs_info->sb->s_blocksize)
return -EINVAL;
range.len = min(range.len, total_bytes - range.start);
@@ -516,7 +540,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
if (!pending_snapshot)
return -ENOMEM;
- btrfs_init_block_rsv(&pending_snapshot->block_rsv);
+ btrfs_init_block_rsv(&pending_snapshot->block_rsv,
+ BTRFS_BLOCK_RSV_TEMP);
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
@@ -525,7 +550,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
*inherit = NULL; /* take responsibility to free it */
}
- trans = btrfs_start_transaction(root->fs_info->extent_root, 5);
+ trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto fail;
@@ -546,7 +571,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
ret = btrfs_commit_transaction(trans,
root->fs_info->extent_root);
}
- BUG_ON(ret);
+ if (ret)
+ goto fail;
ret = pending_snapshot->error;
if (ret)
@@ -614,7 +640,7 @@ static int btrfs_may_delete(struct inode *dir,struct dentry *victim,int isdir)
return -ENOENT;
BUG_ON(victim->d_parent->d_inode != dir);
- audit_inode_child(victim, dir);
+ audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
@@ -1022,8 +1048,8 @@ again:
page_start, page_end - 1, 0, &cached_state);
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
- GFP_NOFS);
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
+ &cached_state, GFP_NOFS);
if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
@@ -1034,8 +1060,8 @@ again:
}
- btrfs_set_extent_delalloc(inode, page_start, page_end - 1,
- &cached_state);
+ set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
+ &cached_state, GFP_NOFS);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state,
@@ -2351,7 +2377,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
int ret;
u64 len = olen;
u64 bs = root->fs_info->sb->s_blocksize;
- u64 hint_byte;
/*
* TODO:
@@ -2456,13 +2481,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
another, and lock file content */
while (1) {
struct btrfs_ordered_extent *ordered;
- lock_extent(&BTRFS_I(src)->io_tree, off, off+len);
- ordered = btrfs_lookup_first_ordered_extent(src, off+len);
+ lock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
+ ordered = btrfs_lookup_first_ordered_extent(src, off + len - 1);
if (!ordered &&
- !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
- EXTENT_DELALLOC, 0, NULL))
+ !test_range_bit(&BTRFS_I(src)->io_tree, off, off + len - 1,
+ EXTENT_DELALLOC, 0, NULL))
break;
- unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
+ unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
if (ordered)
btrfs_put_ordered_extent(ordered);
btrfs_wait_ordered_range(src, off, len);
@@ -2536,7 +2561,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_release_path(path);
if (key.offset + datal <= off ||
- key.offset >= off+len)
+ key.offset >= off + len - 1)
goto next;
memcpy(&new_key, &key, sizeof(new_key));
@@ -2574,10 +2599,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
datal -= off - key.offset;
}
- ret = btrfs_drop_extents(trans, inode,
+ ret = btrfs_drop_extents(trans, root, inode,
new_key.offset,
new_key.offset + datal,
- &hint_byte, 1);
+ 1);
if (ret) {
btrfs_abort_transaction(trans, root,
ret);
@@ -2637,8 +2662,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
new_key.offset += skip;
}
- if (key.offset + datal > off+len)
- trim = key.offset + datal - (off+len);
+ if (key.offset + datal > off + len)
+ trim = key.offset + datal - (off + len);
if (comp && (skip || trim)) {
ret = -EINVAL;
@@ -2648,10 +2673,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
size -= skip + trim;
datal -= skip + trim;
- ret = btrfs_drop_extents(trans, inode,
+ ret = btrfs_drop_extents(trans, root, inode,
new_key.offset,
new_key.offset + datal,
- &hint_byte, 1);
+ 1);
if (ret) {
btrfs_abort_transaction(trans, root,
ret);
@@ -2715,7 +2740,7 @@ next:
ret = 0;
out:
btrfs_release_path(path);
- unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
+ unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
out_unlock:
mutex_unlock(&src->i_mutex);
mutex_unlock(&inode->i_mutex);
@@ -2850,8 +2875,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
return 0;
}
-static void get_block_group_info(struct list_head *groups_list,
- struct btrfs_ioctl_space_info *space)
+void btrfs_get_block_group_info(struct list_head *groups_list,
+ struct btrfs_ioctl_space_info *space)
{
struct btrfs_block_group_cache *block_group;
@@ -2959,8 +2984,8 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
down_read(&info->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&info->block_groups[c])) {
- get_block_group_info(&info->block_groups[c],
- &space);
+ btrfs_get_block_group_info(
+ &info->block_groups[c], &space);
memcpy(dest, &space, sizeof(space));
dest++;
space_args.total_spaces++;
@@ -3208,11 +3233,9 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
{
int ret = 0;
int size;
- u64 extent_item_pos;
struct btrfs_ioctl_logical_ino_args *loi;
struct btrfs_data_container *inodes = NULL;
struct btrfs_path *path = NULL;
- struct btrfs_key key;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3230,7 +3253,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out;
}
- size = min_t(u32, loi->size, 4096);
+ size = min_t(u32, loi->size, 64 * 1024);
inodes = init_data_container(size);
if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes);
@@ -3238,22 +3261,13 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out;
}
- ret = extent_from_logical(root->fs_info, loi->logical, path, &key);
- btrfs_release_path(path);
-
- if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
+ build_ino_list, inodes);
+ if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
goto out;
- extent_item_pos = loi->logical - key.objectid;
- ret = iterate_extent_inodes(root->fs_info, key.objectid,
- extent_item_pos, 0, build_ino_list,
- inodes);
-
- if (ret < 0)
- goto out;
-
ret = copy_to_user((void *)(unsigned long)loi->inodes,
(void *)(unsigned long)inodes, size);
if (ret)
@@ -3261,7 +3275,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
out:
btrfs_free_path(path);
- kfree(inodes);
+ vfree(inodes);
kfree(loi);
return ret;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 051c7fe551dd..7772f02ba28e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -25,6 +25,8 @@
#include "btrfs_inode.h"
#include "extent_io.h"
+static struct kmem_cache *btrfs_ordered_extent_cache;
+
static u64 entry_end(struct btrfs_ordered_extent *entry)
{
if (entry->file_offset + entry->len < entry->file_offset)
@@ -187,7 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
struct btrfs_ordered_extent *entry;
tree = &BTRFS_I(inode)->ordered_tree;
- entry = kzalloc(sizeof(*entry), GFP_NOFS);
+ entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
if (!entry)
return -ENOMEM;
@@ -421,7 +423,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
list_del(&sum->list);
kfree(sum);
}
- kfree(entry);
+ kmem_cache_free(btrfs_ordered_extent_cache, entry);
}
}
@@ -466,8 +468,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
* wait for all the ordered extents in a root. This is done when balancing
* space between drives.
*/
-void btrfs_wait_ordered_extents(struct btrfs_root *root,
- int nocow_only, int delay_iput)
+void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
{
struct list_head splice;
struct list_head *cur;
@@ -482,15 +483,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root,
cur = splice.next;
ordered = list_entry(cur, struct btrfs_ordered_extent,
root_extent_list);
- if (nocow_only &&
- !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
- !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
- list_move(&ordered->root_extent_list,
- &root->fs_info->ordered_extents);
- cond_resched_lock(&root->fs_info->ordered_extent_lock);
- continue;
- }
-
list_del_init(&ordered->root_extent_list);
atomic_inc(&ordered->refs);
@@ -775,7 +767,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
u64 disk_i_size;
u64 new_i_size;
- u64 i_size_test;
u64 i_size = i_size_read(inode);
struct rb_node *node;
struct rb_node *prev = NULL;
@@ -835,55 +826,30 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
break;
if (test->file_offset >= i_size)
break;
- if (test->file_offset >= disk_i_size)
+ if (test->file_offset >= disk_i_size) {
+ /*
+ * we don't update disk_i_size now, so record this
+ * undealt i_size. Or we will not know the real
+ * i_size.
+ */
+ if (test->outstanding_isize < offset)
+ test->outstanding_isize = offset;
+ if (ordered &&
+ ordered->outstanding_isize >
+ test->outstanding_isize)
+ test->outstanding_isize =
+ ordered->outstanding_isize;
goto out;
- }
- new_i_size = min_t(u64, offset, i_size);
-
- /*
- * at this point, we know we can safely update i_size to at least
- * the offset from this ordered extent. But, we need to
- * walk forward and see if ios from higher up in the file have
- * finished.
- */
- if (ordered) {
- node = rb_next(&ordered->rb_node);
- } else {
- if (prev)
- node = rb_next(prev);
- else
- node = rb_first(&tree->tree);
- }
-
- /*
- * We are looking for an area between our current extent and the next
- * ordered extent to update the i_size to. There are 3 cases here
- *
- * 1) We don't actually have anything and we can update to i_size.
- * 2) We have stuff but they already did their i_size update so again we
- * can just update to i_size.
- * 3) We have an outstanding ordered extent so the most we can update
- * our disk_i_size to is the start of the next offset.
- */
- i_size_test = i_size;
- for (; node; node = rb_next(node)) {
- test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
-
- if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
- continue;
- if (test->file_offset > offset) {
- i_size_test = test->file_offset;
- break;
}
}
+ new_i_size = min_t(u64, offset, i_size);
/*
- * i_size_test is the end of a region after this ordered
- * extent where there are no ordered extents, we can safely set
- * disk_i_size to this.
+ * Some ordered extents may completed before the current one, and
+ * we hold the real i_size in ->outstanding_isize.
*/
- if (i_size_test > offset)
- new_i_size = min_t(u64, i_size_test, i_size);
+ if (ordered && ordered->outstanding_isize > new_i_size)
+ new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
BTRFS_I(inode)->disk_i_size = new_i_size;
ret = 0;
out:
@@ -984,3 +950,20 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
}
spin_unlock(&root->fs_info->ordered_extent_lock);
}
+
+int __init ordered_data_init(void)
+{
+ btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
+ sizeof(struct btrfs_ordered_extent), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ NULL);
+ if (!btrfs_ordered_extent_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void ordered_data_exit(void)
+{
+ if (btrfs_ordered_extent_cache)
+ kmem_cache_destroy(btrfs_ordered_extent_cache);
+}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index e03c560d2997..dd27a0b46a37 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -96,6 +96,13 @@ struct btrfs_ordered_extent {
/* number of bytes that still need writing */
u64 bytes_left;
+ /*
+ * the end of the ordered extent which is behind it but
+ * didn't update disk_i_size. Please see the comment of
+ * btrfs_ordered_update_i_size();
+ */
+ u64 outstanding_isize;
+
/* flags (described above) */
unsigned long flags;
@@ -183,6 +190,7 @@ void btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
-void btrfs_wait_ordered_extents(struct btrfs_root *root,
- int nocow_only, int delay_iput);
+void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
+int __init ordered_data_init(void);
+void ordered_data_exit(void);
#endif
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index b65015581744..fe9d02c45f8e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -790,8 +790,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
}
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ ret = -ENOMEM;
+ goto out_free_root;
+ }
key.objectid = 0;
key.type = BTRFS_QGROUP_STATUS_KEY;
@@ -800,7 +802,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
sizeof(*ptr));
if (ret)
- goto out;
+ goto out_free_path;
leaf = path->nodes[0];
ptr = btrfs_item_ptr(leaf, path->slots[0],
@@ -818,8 +820,15 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
fs_info->quota_root = quota_root;
fs_info->pending_quota_state = 1;
spin_unlock(&fs_info->qgroup_lock);
-out:
+out_free_path:
btrfs_free_path(path);
+out_free_root:
+ if (ret) {
+ free_extent_buffer(quota_root->node);
+ free_extent_buffer(quota_root->commit_root);
+ kfree(quota_root);
+ }
+out:
return ret;
}
@@ -1145,12 +1154,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
ulist_reinit(tmp);
/* XXX id not needed */
- ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC);
+ ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC);
ULIST_ITER_INIT(&tmp_uiter);
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
struct btrfs_qgroup_list *glist;
- qg = (struct btrfs_qgroup *)tmp_unode->aux;
+ qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
if (qg->refcnt < seq)
qg->refcnt = seq + 1;
else
@@ -1158,7 +1167,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(tmp, glist->group->qgroupid,
- (unsigned long)glist->group,
+ (u64)(uintptr_t)glist->group,
GFP_ATOMIC);
}
}
@@ -1168,13 +1177,13 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
* step 2: walk from the new root
*/
ulist_reinit(tmp);
- ulist_add(tmp, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(tmp, &uiter))) {
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
- qg = (struct btrfs_qgroup *)unode->aux;
+ qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
if (qg->refcnt < seq) {
/* not visited by step 1 */
qg->rfer += sgn * node->num_bytes;
@@ -1190,7 +1199,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(tmp, glist->group->qgroupid,
- (unsigned long)glist->group, GFP_ATOMIC);
+ (uintptr_t)glist->group, GFP_ATOMIC);
}
}
@@ -1208,12 +1217,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
continue;
ulist_reinit(tmp);
- ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC);
+ ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
ULIST_ITER_INIT(&tmp_uiter);
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
struct btrfs_qgroup_list *glist;
- qg = (struct btrfs_qgroup *)tmp_unode->aux;
+ qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
if (qg->tag == seq)
continue;
@@ -1225,7 +1234,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(tmp, glist->group->qgroupid,
- (unsigned long)glist->group,
+ (uintptr_t)glist->group,
GFP_ATOMIC);
}
}
@@ -1469,13 +1478,17 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
* be exceeded
*/
ulist = ulist_alloc(GFP_ATOMIC);
- ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ if (!ulist) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
- qg = (struct btrfs_qgroup *)unode->aux;
+ qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
qg->reserved + qg->rfer + num_bytes >
@@ -1489,7 +1502,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(ulist, glist->group->qgroupid,
- (unsigned long)glist->group, GFP_ATOMIC);
+ (uintptr_t)glist->group, GFP_ATOMIC);
}
}
if (ret)
@@ -1502,7 +1515,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg;
- qg = (struct btrfs_qgroup *)unode->aux;
+ qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
qg->reserved += num_bytes;
}
@@ -1541,19 +1554,23 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
goto out;
ulist = ulist_alloc(GFP_ATOMIC);
- ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ if (!ulist) {
+ btrfs_std_error(fs_info, -ENOMEM);
+ goto out;
+ }
+ ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
- qg = (struct btrfs_qgroup *)unode->aux;
+ qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
qg->reserved -= num_bytes;
list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(ulist, glist->group->qgroupid,
- (unsigned long)glist->group, GFP_ATOMIC);
+ (uintptr_t)glist->group, GFP_ATOMIC);
}
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 4da08652004d..776f0aa128fc 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3270,8 +3270,8 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
- if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
- if (inode && !IS_ERR(inode))
+ if (IS_ERR(inode) || is_bad_inode(inode)) {
+ if (!IS_ERR(inode))
iput(inode);
return -ENOENT;
}
@@ -3621,7 +3621,7 @@ next:
ret = find_first_extent_bit(&rc->processed_blocks,
key.objectid, &start, &end,
- EXTENT_DIRTY);
+ EXTENT_DIRTY, NULL);
if (ret == 0 && start <= key.objectid) {
btrfs_release_path(path);
@@ -3674,7 +3674,8 @@ int prepare_to_relocate(struct reloc_control *rc)
struct btrfs_trans_handle *trans;
int ret;
- rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root);
+ rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
+ BTRFS_BLOCK_RSV_TEMP);
if (!rc->block_rsv)
return -ENOMEM;
@@ -4057,7 +4058,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
(unsigned long long)rc->block_group->flags);
btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
- btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0);
+ btrfs_wait_ordered_extents(fs_info->tree_root, 0);
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 10d8e4d88071..eb923d087da7 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -141,8 +141,10 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- if (ret < 0)
- goto out_abort;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
@@ -166,16 +168,23 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, key, path,
-1, 1);
- if (ret < 0)
- goto out_abort;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
+
ret = btrfs_del_item(trans, root, path);
- if (ret < 0)
- goto out_abort;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path,
key, sizeof(*item));
- if (ret < 0)
- goto out_abort;
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
l = path->nodes[0];
slot = path->slots[0];
ptr = btrfs_item_ptr_offset(l, slot);
@@ -192,10 +201,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
out:
btrfs_free_path(path);
return ret;
-
-out_abort:
- btrfs_abort_transaction(trans, root, ret);
- goto out;
}
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b223620cd5a6..27892f67e69b 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -352,13 +352,14 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
struct extent_buffer *eb;
struct btrfs_extent_item *ei;
struct scrub_warning swarn;
- u32 item_size;
- int ret;
+ unsigned long ptr = 0;
+ u64 extent_item_pos;
+ u64 flags = 0;
u64 ref_root;
+ u32 item_size;
u8 ref_level;
- unsigned long ptr = 0;
const int bufsize = 4096;
- u64 extent_item_pos;
+ int ret;
path = btrfs_alloc_path();
@@ -375,7 +376,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
if (!path || !swarn.scratch_buf || !swarn.msg_buf)
goto out;
- ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
+ ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
+ &flags);
if (ret < 0)
goto out;
@@ -387,7 +389,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
item_size = btrfs_item_size_nr(eb, path->slots[0]);
btrfs_release_path(path);
- if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
do {
ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
&ref_root, &ref_level);
@@ -1029,6 +1031,7 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
spin_lock(&sdev->stat_lock);
sdev->stat.malloc_errors++;
spin_unlock(&sdev->stat_lock);
+ kfree(bbio);
return -ENOMEM;
}
sblock->page_count++;
@@ -1666,21 +1669,6 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
scrub_block_put(sblock);
}
- if (sbio->err) {
- /* what is this good for??? */
- sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
- sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
- sbio->bio->bi_phys_segments = 0;
- sbio->bio->bi_idx = 0;
-
- for (i = 0; i < sbio->page_count; i++) {
- struct bio_vec *bi;
- bi = &sbio->bio->bi_io_vec[i];
- bi->bv_offset = 0;
- bi->bv_len = PAGE_SIZE;
- }
- }
-
bio_put(sbio->bio);
sbio->bio = NULL;
spin_lock(&sdev->list_lock);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index fb5ffe95f869..e78b297b0b00 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -107,7 +107,6 @@ struct send_ctx {
int cur_inode_new;
int cur_inode_new_gen;
int cur_inode_deleted;
- int cur_inode_first_ref_orphan;
u64 cur_inode_size;
u64 cur_inode_mode;
@@ -126,7 +125,15 @@ struct send_ctx {
struct name_cache_entry {
struct list_head list;
- struct list_head use_list;
+ /*
+ * radix_tree has only 32bit entries but we need to handle 64bit inums.
+ * We use the lower 32bit of the 64bit inum to store it in the tree. If
+ * more then one inum would fall into the same entry, we use radix_list
+ * to store the additional entries. radix_list is also used to store
+ * entries where two entries have the same inum but different
+ * generations.
+ */
+ struct list_head radix_list;
u64 ino;
u64 gen;
u64 parent_ino;
@@ -328,6 +335,7 @@ out:
return ret;
}
+#if 0
static void fs_path_remove(struct fs_path *p)
{
BUG_ON(p->reversed);
@@ -335,6 +343,7 @@ static void fs_path_remove(struct fs_path *p)
p->end--;
*p->end = 0;
}
+#endif
static int fs_path_copy(struct fs_path *p, struct fs_path *from)
{
@@ -377,7 +386,7 @@ static struct btrfs_path *alloc_path_for_send(void)
return path;
}
-static int write_buf(struct send_ctx *sctx, const void *buf, u32 len)
+int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
{
int ret;
mm_segment_t old_fs;
@@ -387,8 +396,7 @@ static int write_buf(struct send_ctx *sctx, const void *buf, u32 len)
set_fs(KERNEL_DS);
while (pos < len) {
- ret = vfs_write(sctx->send_filp, (char *)buf + pos, len - pos,
- &sctx->send_off);
+ ret = vfs_write(filp, (char *)buf + pos, len - pos, off);
/* TODO handle that correctly */
/*if (ret == -ERESTARTSYS) {
continue;
@@ -544,7 +552,8 @@ static int send_header(struct send_ctx *sctx)
strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
- return write_buf(sctx, &hdr, sizeof(hdr));
+ return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
+ &sctx->send_off);
}
/*
@@ -581,7 +590,8 @@ static int send_cmd(struct send_ctx *sctx)
crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
hdr->crc = cpu_to_le32(crc);
- ret = write_buf(sctx, sctx->send_buf, sctx->send_size);
+ ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
+ &sctx->send_off);
sctx->total_send_size += sctx->send_size;
sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
@@ -687,7 +697,8 @@ out:
*/
static int get_inode_info(struct btrfs_root *root,
u64 ino, u64 *size, u64 *gen,
- u64 *mode, u64 *uid, u64 *gid)
+ u64 *mode, u64 *uid, u64 *gid,
+ u64 *rdev)
{
int ret;
struct btrfs_inode_item *ii;
@@ -721,6 +732,8 @@ static int get_inode_info(struct btrfs_root *root,
*uid = btrfs_inode_uid(path->nodes[0], ii);
if (gid)
*gid = btrfs_inode_gid(path->nodes[0], ii);
+ if (rdev)
+ *rdev = btrfs_inode_rdev(path->nodes[0], ii);
out:
btrfs_free_path(path);
@@ -732,31 +745,36 @@ typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
void *ctx);
/*
- * Helper function to iterate the entries in ONE btrfs_inode_ref.
+ * Helper function to iterate the entries in ONE btrfs_inode_ref or
+ * btrfs_inode_extref.
* The iterate callback may return a non zero value to stop iteration. This can
* be a negative value for error codes or 1 to simply stop it.
*
- * path must point to the INODE_REF when called.
+ * path must point to the INODE_REF or INODE_EXTREF when called.
*/
static int iterate_inode_ref(struct send_ctx *sctx,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *found_key, int resolve,
iterate_inode_ref_t iterate, void *ctx)
{
- struct extent_buffer *eb;
+ struct extent_buffer *eb = path->nodes[0];
struct btrfs_item *item;
struct btrfs_inode_ref *iref;
+ struct btrfs_inode_extref *extref;
struct btrfs_path *tmp_path;
struct fs_path *p;
- u32 cur;
- u32 len;
+ u32 cur = 0;
u32 total;
- int slot;
+ int slot = path->slots[0];
u32 name_len;
char *start;
int ret = 0;
- int num;
+ int num = 0;
int index;
+ u64 dir;
+ unsigned long name_off;
+ unsigned long elem_size;
+ unsigned long ptr;
p = fs_path_alloc_reversed(sctx);
if (!p)
@@ -768,24 +786,40 @@ static int iterate_inode_ref(struct send_ctx *sctx,
return -ENOMEM;
}
- eb = path->nodes[0];
- slot = path->slots[0];
- item = btrfs_item_nr(eb, slot);
- iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
- cur = 0;
- len = 0;
- total = btrfs_item_size(eb, item);
- num = 0;
+ if (found_key->type == BTRFS_INODE_REF_KEY) {
+ ptr = (unsigned long)btrfs_item_ptr(eb, slot,
+ struct btrfs_inode_ref);
+ item = btrfs_item_nr(eb, slot);
+ total = btrfs_item_size(eb, item);
+ elem_size = sizeof(*iref);
+ } else {
+ ptr = btrfs_item_ptr_offset(eb, slot);
+ total = btrfs_item_size_nr(eb, slot);
+ elem_size = sizeof(*extref);
+ }
+
while (cur < total) {
fs_path_reset(p);
- name_len = btrfs_inode_ref_name_len(eb, iref);
- index = btrfs_inode_ref_index(eb, iref);
+ if (found_key->type == BTRFS_INODE_REF_KEY) {
+ iref = (struct btrfs_inode_ref *)(ptr + cur);
+ name_len = btrfs_inode_ref_name_len(eb, iref);
+ name_off = (unsigned long)(iref + 1);
+ index = btrfs_inode_ref_index(eb, iref);
+ dir = found_key->offset;
+ } else {
+ extref = (struct btrfs_inode_extref *)(ptr + cur);
+ name_len = btrfs_inode_extref_name_len(eb, extref);
+ name_off = (unsigned long)&extref->name;
+ index = btrfs_inode_extref_index(eb, extref);
+ dir = btrfs_inode_extref_parent(eb, extref);
+ }
+
if (resolve) {
- start = btrfs_iref_to_path(root, tmp_path, iref, eb,
- found_key->offset, p->buf,
- p->buf_len);
+ start = btrfs_ref_to_path(root, tmp_path, name_len,
+ name_off, eb, dir,
+ p->buf, p->buf_len);
if (IS_ERR(start)) {
ret = PTR_ERR(start);
goto out;
@@ -796,9 +830,10 @@ static int iterate_inode_ref(struct send_ctx *sctx,
p->buf_len + p->buf - start);
if (ret < 0)
goto out;
- start = btrfs_iref_to_path(root, tmp_path, iref,
- eb, found_key->offset, p->buf,
- p->buf_len);
+ start = btrfs_ref_to_path(root, tmp_path,
+ name_len, name_off,
+ eb, dir,
+ p->buf, p->buf_len);
if (IS_ERR(start)) {
ret = PTR_ERR(start);
goto out;
@@ -807,21 +842,16 @@ static int iterate_inode_ref(struct send_ctx *sctx,
}
p->start = start;
} else {
- ret = fs_path_add_from_extent_buffer(p, eb,
- (unsigned long)(iref + 1), name_len);
+ ret = fs_path_add_from_extent_buffer(p, eb, name_off,
+ name_len);
if (ret < 0)
goto out;
}
-
- len = sizeof(*iref) + name_len;
- iref = (struct btrfs_inode_ref *)((char *)iref + len);
- cur += len;
-
- ret = iterate(num, found_key->offset, index, p, ctx);
+ cur += elem_size + name_len;
+ ret = iterate(num, dir, index, p, ctx);
if (ret)
goto out;
-
num++;
}
@@ -852,7 +882,6 @@ static int iterate_dir_item(struct send_ctx *sctx,
struct extent_buffer *eb;
struct btrfs_item *item;
struct btrfs_dir_item *di;
- struct btrfs_path *tmp_path = NULL;
struct btrfs_key di_key;
char *buf = NULL;
char *buf2 = NULL;
@@ -874,12 +903,6 @@ static int iterate_dir_item(struct send_ctx *sctx,
goto out;
}
- tmp_path = alloc_path_for_send();
- if (!tmp_path) {
- ret = -ENOMEM;
- goto out;
- }
-
eb = path->nodes[0];
slot = path->slots[0];
item = btrfs_item_nr(eb, slot);
@@ -941,7 +964,6 @@ static int iterate_dir_item(struct send_ctx *sctx,
}
out:
- btrfs_free_path(tmp_path);
if (buf_virtual)
vfree(buf);
else
@@ -993,7 +1015,8 @@ static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
}
btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
if (found_key.objectid != ino ||
- found_key.type != BTRFS_INODE_REF_KEY) {
+ (found_key.type != BTRFS_INODE_REF_KEY &&
+ found_key.type != BTRFS_INODE_EXTREF_KEY)) {
ret = -ENOENT;
goto out;
}
@@ -1026,12 +1049,12 @@ struct backref_ctx {
u64 extent_len;
/* Just to check for bugs in backref resolving */
- int found_in_send_root;
+ int found_itself;
};
static int __clone_root_cmp_bsearch(const void *key, const void *elt)
{
- u64 root = (u64)key;
+ u64 root = (u64)(uintptr_t)key;
struct clone_root *cr = (struct clone_root *)elt;
if (root < cr->root->objectid)
@@ -1055,6 +1078,7 @@ static int __clone_root_cmp_sort(const void *e1, const void *e2)
/*
* Called for every backref that is found for the current extent.
+ * Results are collected in sctx->clone_roots->ino/offset/found_refs
*/
static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
{
@@ -1064,7 +1088,7 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
u64 i_size;
/* First check if the root is in the list of accepted clone sources */
- found = bsearch((void *)root, bctx->sctx->clone_roots,
+ found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
bctx->sctx->clone_roots_cnt,
sizeof(struct clone_root),
__clone_root_cmp_bsearch);
@@ -1074,14 +1098,15 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
if (found->root == bctx->sctx->send_root &&
ino == bctx->cur_objectid &&
offset == bctx->cur_offset) {
- bctx->found_in_send_root = 1;
+ bctx->found_itself = 1;
}
/*
- * There are inodes that have extents that lie behind it's i_size. Don't
+ * There are inodes that have extents that lie behind its i_size. Don't
* accept clones from these extents.
*/
- ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL);
+ ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
+ NULL);
if (ret < 0)
return ret;
@@ -1101,16 +1126,12 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
*/
if (ino >= bctx->cur_objectid)
return 0;
- /*if (ino > ctx->cur_objectid)
+#if 0
+ if (ino > bctx->cur_objectid)
return 0;
- if (offset + ctx->extent_len > ctx->cur_offset)
- return 0;*/
-
- bctx->found++;
- found->found_refs++;
- found->ino = ino;
- found->offset = offset;
- return 0;
+ if (offset + bctx->extent_len > bctx->cur_offset)
+ return 0;
+#endif
}
bctx->found++;
@@ -1130,6 +1151,12 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
}
/*
+ * Given an inode, offset and extent item, it finds a good clone for a clone
+ * instruction. Returns -ENOENT when none could be found. The function makes
+ * sure that the returned clone is usable at the point where sending is at the
+ * moment. This means, that no clones are accepted which lie behind the current
+ * inode+offset.
+ *
* path must point to the extent item when called.
*/
static int find_extent_clone(struct send_ctx *sctx,
@@ -1141,20 +1168,29 @@ static int find_extent_clone(struct send_ctx *sctx,
int ret;
int extent_type;
u64 logical;
+ u64 disk_byte;
u64 num_bytes;
u64 extent_item_pos;
+ u64 flags = 0;
struct btrfs_file_extent_item *fi;
struct extent_buffer *eb = path->nodes[0];
- struct backref_ctx backref_ctx;
+ struct backref_ctx *backref_ctx = NULL;
struct clone_root *cur_clone_root;
struct btrfs_key found_key;
struct btrfs_path *tmp_path;
+ int compressed;
u32 i;
tmp_path = alloc_path_for_send();
if (!tmp_path)
return -ENOMEM;
+ backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
+ if (!backref_ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
if (data_offset >= ino_size) {
/*
* There may be extents that lie behind the file's size.
@@ -1172,22 +1208,23 @@ static int find_extent_clone(struct send_ctx *sctx,
ret = -ENOENT;
goto out;
}
+ compressed = btrfs_file_extent_compression(eb, fi);
num_bytes = btrfs_file_extent_num_bytes(eb, fi);
- logical = btrfs_file_extent_disk_bytenr(eb, fi);
- if (logical == 0) {
+ disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+ if (disk_byte == 0) {
ret = -ENOENT;
goto out;
}
- logical += btrfs_file_extent_offset(eb, fi);
+ logical = disk_byte + btrfs_file_extent_offset(eb, fi);
- ret = extent_from_logical(sctx->send_root->fs_info,
- logical, tmp_path, &found_key);
+ ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
+ &found_key, &flags);
btrfs_release_path(tmp_path);
if (ret < 0)
goto out;
- if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
ret = -EIO;
goto out;
}
@@ -1202,12 +1239,12 @@ static int find_extent_clone(struct send_ctx *sctx,
cur_clone_root->found_refs = 0;
}
- backref_ctx.sctx = sctx;
- backref_ctx.found = 0;
- backref_ctx.cur_objectid = ino;
- backref_ctx.cur_offset = data_offset;
- backref_ctx.found_in_send_root = 0;
- backref_ctx.extent_len = num_bytes;
+ backref_ctx->sctx = sctx;
+ backref_ctx->found = 0;
+ backref_ctx->cur_objectid = ino;
+ backref_ctx->cur_offset = data_offset;
+ backref_ctx->found_itself = 0;
+ backref_ctx->extent_len = num_bytes;
/*
* The last extent of a file may be too large due to page alignment.
@@ -1215,25 +1252,31 @@ static int find_extent_clone(struct send_ctx *sctx,
* __iterate_backrefs work.
*/
if (data_offset + num_bytes >= ino_size)
- backref_ctx.extent_len = ino_size - data_offset;
+ backref_ctx->extent_len = ino_size - data_offset;
/*
* Now collect all backrefs.
*/
+ if (compressed == BTRFS_COMPRESS_NONE)
+ extent_item_pos = logical - found_key.objectid;
+ else
+ extent_item_pos = 0;
+
extent_item_pos = logical - found_key.objectid;
ret = iterate_extent_inodes(sctx->send_root->fs_info,
found_key.objectid, extent_item_pos, 1,
- __iterate_backrefs, &backref_ctx);
+ __iterate_backrefs, backref_ctx);
+
if (ret < 0)
goto out;
- if (!backref_ctx.found_in_send_root) {
+ if (!backref_ctx->found_itself) {
/* found a bug in backref code? */
ret = -EIO;
printk(KERN_ERR "btrfs: ERROR did not find backref in "
"send_root. inode=%llu, offset=%llu, "
- "logical=%llu\n",
- ino, data_offset, logical);
+ "disk_byte=%llu found extent=%llu\n",
+ ino, data_offset, disk_byte, found_key.objectid);
goto out;
}
@@ -1242,7 +1285,7 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
"num_bytes=%llu, logical=%llu\n",
data_offset, ino, num_bytes, logical);
- if (!backref_ctx.found)
+ if (!backref_ctx->found)
verbose_printk("btrfs: no clones found\n");
cur_clone_root = NULL;
@@ -1253,7 +1296,6 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
else if (sctx->clone_roots[i].root == sctx->send_root)
/* prefer clones from send_root over others */
cur_clone_root = sctx->clone_roots + i;
- break;
}
}
@@ -1267,6 +1309,7 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
out:
btrfs_free_path(tmp_path);
+ kfree(backref_ctx);
return ret;
}
@@ -1307,8 +1350,6 @@ static int read_symlink(struct send_ctx *sctx,
len = btrfs_file_extent_inline_len(path->nodes[0], ei);
ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
- if (ret < 0)
- goto out;
out:
btrfs_free_path(path);
@@ -1404,7 +1445,7 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
u64 right_gen;
ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
- NULL);
+ NULL, NULL);
if (ret < 0 && ret != -ENOENT)
goto out;
left_ret = ret;
@@ -1413,16 +1454,16 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
right_ret = -ENOENT;
} else {
ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if (ret < 0 && ret != -ENOENT)
goto out;
right_ret = ret;
}
if (!left_ret && !right_ret) {
- if (left_gen == gen && right_gen == gen)
+ if (left_gen == gen && right_gen == gen) {
ret = inode_state_no_change;
- else if (left_gen == gen) {
+ } else if (left_gen == gen) {
if (ino < sctx->send_progress)
ret = inode_state_did_create;
else
@@ -1516,6 +1557,10 @@ out:
return ret;
}
+/*
+ * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
+ * generation of the parent dir and the name of the dir entry.
+ */
static int get_first_ref(struct send_ctx *sctx,
struct btrfs_root *root, u64 ino,
u64 *dir, u64 *dir_gen, struct fs_path *name)
@@ -1524,8 +1569,8 @@ static int get_first_ref(struct send_ctx *sctx,
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
- struct btrfs_inode_ref *iref;
int len;
+ u64 parent_dir;
path = alloc_path_for_send();
if (!path)
@@ -1541,27 +1586,41 @@ static int get_first_ref(struct send_ctx *sctx,
if (!ret)
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
- if (ret || found_key.objectid != key.objectid ||
- found_key.type != key.type) {
+ if (ret || found_key.objectid != ino ||
+ (found_key.type != BTRFS_INODE_REF_KEY &&
+ found_key.type != BTRFS_INODE_EXTREF_KEY)) {
ret = -ENOENT;
goto out;
}
- iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_inode_ref);
- len = btrfs_inode_ref_name_len(path->nodes[0], iref);
- ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
- (unsigned long)(iref + 1), len);
+ if (key.type == BTRFS_INODE_REF_KEY) {
+ struct btrfs_inode_ref *iref;
+ iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_ref);
+ len = btrfs_inode_ref_name_len(path->nodes[0], iref);
+ ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+ (unsigned long)(iref + 1),
+ len);
+ parent_dir = found_key.offset;
+ } else {
+ struct btrfs_inode_extref *extref;
+ extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_extref);
+ len = btrfs_inode_extref_name_len(path->nodes[0], extref);
+ ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+ (unsigned long)&extref->name, len);
+ parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
+ }
if (ret < 0)
goto out;
btrfs_release_path(path);
- ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL,
- NULL);
+ ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
+ NULL, NULL);
if (ret < 0)
goto out;
- *dir = found_key.offset;
+ *dir = parent_dir;
out:
btrfs_free_path(path);
@@ -1586,22 +1645,28 @@ static int is_first_ref(struct send_ctx *sctx,
if (ret < 0)
goto out;
- if (name_len != fs_path_len(tmp_name)) {
+ if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
ret = 0;
goto out;
}
- ret = memcmp(tmp_name->start, name, name_len);
- if (ret)
- ret = 0;
- else
- ret = 1;
+ ret = !memcmp(tmp_name->start, name, name_len);
out:
fs_path_free(sctx, tmp_name);
return ret;
}
+/*
+ * Used by process_recorded_refs to determine if a new ref would overwrite an
+ * already existing ref. In case it detects an overwrite, it returns the
+ * inode/gen in who_ino/who_gen.
+ * When an overwrite is detected, process_recorded_refs does proper orphanizing
+ * to make sure later references to the overwritten inode are possible.
+ * Orphanizing is however only required for the first ref of an inode.
+ * process_recorded_refs does an additional is_first_ref check to see if
+ * orphanizing is really required.
+ */
static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
const char *name, int name_len,
u64 *who_ino, u64 *who_gen)
@@ -1626,9 +1691,14 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
goto out;
}
+ /*
+ * Check if the overwritten ref was already processed. If yes, the ref
+ * was already unlinked/moved, so we can safely assume that we will not
+ * overwrite anything at this point in time.
+ */
if (other_inode > sctx->send_progress) {
ret = get_inode_info(sctx->parent_root, other_inode, NULL,
- who_gen, NULL, NULL, NULL);
+ who_gen, NULL, NULL, NULL, NULL);
if (ret < 0)
goto out;
@@ -1642,6 +1712,13 @@ out:
return ret;
}
+/*
+ * Checks if the ref was overwritten by an already processed inode. This is
+ * used by __get_cur_name_and_parent to find out if the ref was orphanized and
+ * thus the orphan name needs be used.
+ * process_recorded_refs also uses it to avoid unlinking of refs that were
+ * overwritten.
+ */
static int did_overwrite_ref(struct send_ctx *sctx,
u64 dir, u64 dir_gen,
u64 ino, u64 ino_gen,
@@ -1671,7 +1748,7 @@ static int did_overwrite_ref(struct send_ctx *sctx,
}
ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
- NULL);
+ NULL, NULL);
if (ret < 0)
goto out;
@@ -1690,6 +1767,11 @@ out:
return ret;
}
+/*
+ * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
+ * that got overwritten. This is used by process_recorded_refs to determine
+ * if it has to use the path as returned by get_cur_path or the orphan name.
+ */
static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
{
int ret = 0;
@@ -1710,39 +1792,40 @@ static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
name->start, fs_path_len(name));
- if (ret < 0)
- goto out;
out:
fs_path_free(sctx, name);
return ret;
}
+/*
+ * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
+ * so we need to do some special handling in case we have clashes. This function
+ * takes care of this with the help of name_cache_entry::radix_list.
+ * In case of error, nce is kfreed.
+ */
static int name_cache_insert(struct send_ctx *sctx,
struct name_cache_entry *nce)
{
int ret = 0;
- struct name_cache_entry **ncea;
-
- ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
- if (ncea) {
- if (!ncea[0])
- ncea[0] = nce;
- else if (!ncea[1])
- ncea[1] = nce;
- else
- BUG();
- } else {
- ncea = kmalloc(sizeof(void *) * 2, GFP_NOFS);
- if (!ncea)
+ struct list_head *nce_head;
+
+ nce_head = radix_tree_lookup(&sctx->name_cache,
+ (unsigned long)nce->ino);
+ if (!nce_head) {
+ nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
+ if (!nce_head)
return -ENOMEM;
+ INIT_LIST_HEAD(nce_head);
- ncea[0] = nce;
- ncea[1] = NULL;
- ret = radix_tree_insert(&sctx->name_cache, nce->ino, ncea);
- if (ret < 0)
+ ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
+ if (ret < 0) {
+ kfree(nce_head);
+ kfree(nce);
return ret;
+ }
}
+ list_add_tail(&nce->radix_list, nce_head);
list_add_tail(&nce->list, &sctx->name_cache_list);
sctx->name_cache_size++;
@@ -1752,50 +1835,52 @@ static int name_cache_insert(struct send_ctx *sctx,
static void name_cache_delete(struct send_ctx *sctx,
struct name_cache_entry *nce)
{
- struct name_cache_entry **ncea;
-
- ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
- BUG_ON(!ncea);
-
- if (ncea[0] == nce)
- ncea[0] = NULL;
- else if (ncea[1] == nce)
- ncea[1] = NULL;
- else
- BUG();
+ struct list_head *nce_head;
- if (!ncea[0] && !ncea[1]) {
- radix_tree_delete(&sctx->name_cache, nce->ino);
- kfree(ncea);
- }
+ nce_head = radix_tree_lookup(&sctx->name_cache,
+ (unsigned long)nce->ino);
+ BUG_ON(!nce_head);
+ list_del(&nce->radix_list);
list_del(&nce->list);
-
sctx->name_cache_size--;
+
+ if (list_empty(nce_head)) {
+ radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
+ kfree(nce_head);
+ }
}
static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
u64 ino, u64 gen)
{
- struct name_cache_entry **ncea;
+ struct list_head *nce_head;
+ struct name_cache_entry *cur;
- ncea = radix_tree_lookup(&sctx->name_cache, ino);
- if (!ncea)
+ nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
+ if (!nce_head)
return NULL;
- if (ncea[0] && ncea[0]->gen == gen)
- return ncea[0];
- else if (ncea[1] && ncea[1]->gen == gen)
- return ncea[1];
+ list_for_each_entry(cur, nce_head, radix_list) {
+ if (cur->ino == ino && cur->gen == gen)
+ return cur;
+ }
return NULL;
}
+/*
+ * Removes the entry from the list and adds it back to the end. This marks the
+ * entry as recently used so that name_cache_clean_unused does not remove it.
+ */
static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
{
list_del(&nce->list);
list_add_tail(&nce->list, &sctx->name_cache_list);
}
+/*
+ * Remove some entries from the beginning of name_cache_list.
+ */
static void name_cache_clean_unused(struct send_ctx *sctx)
{
struct name_cache_entry *nce;
@@ -1814,13 +1899,23 @@ static void name_cache_clean_unused(struct send_ctx *sctx)
static void name_cache_free(struct send_ctx *sctx)
{
struct name_cache_entry *nce;
- struct name_cache_entry *tmp;
- list_for_each_entry_safe(nce, tmp, &sctx->name_cache_list, list) {
+ while (!list_empty(&sctx->name_cache_list)) {
+ nce = list_entry(sctx->name_cache_list.next,
+ struct name_cache_entry, list);
name_cache_delete(sctx, nce);
+ kfree(nce);
}
}
+/*
+ * Used by get_cur_path for each ref up to the root.
+ * Returns 0 if it succeeded.
+ * Returns 1 if the inode is not existent or got overwritten. In that case, the
+ * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
+ * is returned, parent_ino/parent_gen are not guaranteed to be valid.
+ * Returns <0 in case of error.
+ */
static int __get_cur_name_and_parent(struct send_ctx *sctx,
u64 ino, u64 gen,
u64 *parent_ino,
@@ -1832,6 +1927,11 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
struct btrfs_path *path = NULL;
struct name_cache_entry *nce = NULL;
+ /*
+ * First check if we already did a call to this function with the same
+ * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
+ * return the cached result.
+ */
nce = name_cache_search(sctx, ino, gen);
if (nce) {
if (ino < sctx->send_progress && nce->need_later_update) {
@@ -1854,6 +1954,11 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
if (!path)
return -ENOMEM;
+ /*
+ * If the inode is not existent yet, add the orphan name and return 1.
+ * This should only happen for the parent dir that we determine in
+ * __record_new_ref
+ */
ret = is_inode_existent(sctx, ino, gen);
if (ret < 0)
goto out;
@@ -1866,6 +1971,10 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
goto out_cache;
}
+ /*
+ * Depending on whether the inode was already processed or not, use
+ * send_root or parent_root for ref lookup.
+ */
if (ino < sctx->send_progress)
ret = get_first_ref(sctx, sctx->send_root, ino,
parent_ino, parent_gen, dest);
@@ -1875,6 +1984,10 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
if (ret < 0)
goto out;
+ /*
+ * Check if the ref was overwritten by an inode's ref that was processed
+ * earlier. If yes, treat as orphan and return 1.
+ */
ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
dest->start, dest->end - dest->start);
if (ret < 0)
@@ -1888,6 +2001,9 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
}
out_cache:
+ /*
+ * Store the result of the lookup in the name cache.
+ */
nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
if (!nce) {
ret = -ENOMEM;
@@ -1901,7 +2017,6 @@ out_cache:
nce->name_len = fs_path_len(dest);
nce->ret = ret;
strcpy(nce->name, dest->start);
- memset(&nce->use_list, 0, sizeof(nce->use_list));
if (ino < sctx->send_progress)
nce->need_later_update = 0;
@@ -2107,9 +2222,6 @@ static int send_subvol_begin(struct send_ctx *sctx)
read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
btrfs_release_path(path);
- if (ret < 0)
- goto out;
-
if (parent_root) {
ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
if (ret < 0)
@@ -2276,7 +2388,7 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
btrfs_inode_mtime(ii));
TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
btrfs_inode_ctime(ii));
- /* TODO otime? */
+ /* TODO Add otime support when the otime patches get into upstream */
ret = send_cmd(sctx);
@@ -2292,39 +2404,39 @@ out:
* a valid path yet because we did not process the refs yet. So, the inode
* is created as orphan.
*/
-static int send_create_inode(struct send_ctx *sctx, struct btrfs_path *path,
- struct btrfs_key *key)
+static int send_create_inode(struct send_ctx *sctx, u64 ino)
{
int ret = 0;
- struct extent_buffer *eb = path->nodes[0];
- struct btrfs_inode_item *ii;
struct fs_path *p;
- int slot = path->slots[0];
int cmd;
+ u64 gen;
u64 mode;
+ u64 rdev;
-verbose_printk("btrfs: send_create_inode %llu\n", sctx->cur_ino);
+verbose_printk("btrfs: send_create_inode %llu\n", ino);
p = fs_path_alloc(sctx);
if (!p)
return -ENOMEM;
- ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
- mode = btrfs_inode_mode(eb, ii);
+ ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL,
+ NULL, &rdev);
+ if (ret < 0)
+ goto out;
- if (S_ISREG(mode))
+ if (S_ISREG(mode)) {
cmd = BTRFS_SEND_C_MKFILE;
- else if (S_ISDIR(mode))
+ } else if (S_ISDIR(mode)) {
cmd = BTRFS_SEND_C_MKDIR;
- else if (S_ISLNK(mode))
+ } else if (S_ISLNK(mode)) {
cmd = BTRFS_SEND_C_SYMLINK;
- else if (S_ISCHR(mode) || S_ISBLK(mode))
+ } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
cmd = BTRFS_SEND_C_MKNOD;
- else if (S_ISFIFO(mode))
+ } else if (S_ISFIFO(mode)) {
cmd = BTRFS_SEND_C_MKFIFO;
- else if (S_ISSOCK(mode))
+ } else if (S_ISSOCK(mode)) {
cmd = BTRFS_SEND_C_MKSOCK;
- else {
+ } else {
printk(KERN_WARNING "btrfs: unexpected inode type %o",
(int)(mode & S_IFMT));
ret = -ENOTSUPP;
@@ -2335,22 +2447,23 @@ verbose_printk("btrfs: send_create_inode %llu\n", sctx->cur_ino);
if (ret < 0)
goto out;
- ret = gen_unique_name(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ ret = gen_unique_name(sctx, ino, gen, p);
if (ret < 0)
goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
- TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, sctx->cur_ino);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
if (S_ISLNK(mode)) {
fs_path_reset(p);
- ret = read_symlink(sctx, sctx->send_root, sctx->cur_ino, p);
+ ret = read_symlink(sctx, sctx->send_root, ino, p);
if (ret < 0)
goto out;
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
} else if (S_ISCHR(mode) || S_ISBLK(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode)) {
- TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, btrfs_inode_rdev(eb, ii));
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
}
ret = send_cmd(sctx);
@@ -2364,6 +2477,92 @@ out:
return ret;
}
+/*
+ * We need some special handling for inodes that get processed before the parent
+ * directory got created. See process_recorded_refs for details.
+ * This function does the check if we already created the dir out of order.
+ */
+static int did_create_dir(struct send_ctx *sctx, u64 dir)
+{
+ int ret = 0;
+ struct btrfs_path *path = NULL;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_key di_key;
+ struct extent_buffer *eb;
+ struct btrfs_dir_item *di;
+ int slot;
+
+ path = alloc_path_for_send();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key.objectid = dir;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(sctx->send_root, &key, path,
+ 1, 0);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+ }
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+ btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+
+ if (di_key.objectid < sctx->send_progress) {
+ ret = 1;
+ goto out;
+ }
+
+ key.offset = found_key.offset + 1;
+ btrfs_release_path(path);
+ }
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Only creates the inode if it is:
+ * 1. Not a directory
+ * 2. Or a directory which was not created already due to out of order
+ * directories. See did_create_dir and process_recorded_refs for details.
+ */
+static int send_create_inode_if_needed(struct send_ctx *sctx)
+{
+ int ret;
+
+ if (S_ISDIR(sctx->cur_inode_mode)) {
+ ret = did_create_dir(sctx, sctx->cur_ino);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = send_create_inode(sctx, sctx->cur_ino);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
+}
+
struct recorded_ref {
struct list_head list;
char *dir_path;
@@ -2416,13 +2615,13 @@ static int record_ref(struct list_head *head, u64 dir,
static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head)
{
struct recorded_ref *cur;
- struct recorded_ref *tmp;
- list_for_each_entry_safe(cur, tmp, head, list) {
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct recorded_ref, list);
fs_path_free(sctx, cur->full_path);
+ list_del(&cur->list);
kfree(cur);
}
- INIT_LIST_HEAD(head);
}
static void free_recorded_refs(struct send_ctx *sctx)
@@ -2432,7 +2631,7 @@ static void free_recorded_refs(struct send_ctx *sctx)
}
/*
- * Renames/moves a file/dir to it's orphan name. Used when the first
+ * Renames/moves a file/dir to its orphan name. Used when the first
* ref of an unprocessed inode gets overwritten and for all non empty
* directories.
*/
@@ -2472,6 +2671,12 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress)
struct btrfs_key loc;
struct btrfs_dir_item *di;
+ /*
+ * Don't try to rmdir the top/root subvolume dir.
+ */
+ if (dir == BTRFS_FIRST_FREE_OBJECTID)
+ return 0;
+
path = alloc_path_for_send();
if (!path)
return -ENOMEM;
@@ -2513,160 +2718,6 @@ out:
return ret;
}
-struct finish_unordered_dir_ctx {
- struct send_ctx *sctx;
- struct fs_path *cur_path;
- struct fs_path *dir_path;
- u64 dir_ino;
- int need_delete;
- int delete_pass;
-};
-
-int __finish_unordered_dir(int num, struct btrfs_key *di_key,
- const char *name, int name_len,
- const char *data, int data_len,
- u8 type, void *ctx)
-{
- int ret = 0;
- struct finish_unordered_dir_ctx *fctx = ctx;
- struct send_ctx *sctx = fctx->sctx;
- u64 di_gen;
- u64 di_mode;
- int is_orphan = 0;
-
- if (di_key->objectid >= fctx->dir_ino)
- goto out;
-
- fs_path_reset(fctx->cur_path);
-
- ret = get_inode_info(sctx->send_root, di_key->objectid,
- NULL, &di_gen, &di_mode, NULL, NULL);
- if (ret < 0)
- goto out;
-
- ret = is_first_ref(sctx, sctx->send_root, di_key->objectid,
- fctx->dir_ino, name, name_len);
- if (ret < 0)
- goto out;
- if (ret) {
- is_orphan = 1;
- ret = gen_unique_name(sctx, di_key->objectid, di_gen,
- fctx->cur_path);
- } else {
- ret = get_cur_path(sctx, di_key->objectid, di_gen,
- fctx->cur_path);
- }
- if (ret < 0)
- goto out;
-
- ret = fs_path_add(fctx->dir_path, name, name_len);
- if (ret < 0)
- goto out;
-
- if (!fctx->delete_pass) {
- if (S_ISDIR(di_mode)) {
- ret = send_rename(sctx, fctx->cur_path,
- fctx->dir_path);
- } else {
- ret = send_link(sctx, fctx->dir_path,
- fctx->cur_path);
- if (is_orphan)
- fctx->need_delete = 1;
- }
- } else if (!S_ISDIR(di_mode)) {
- ret = send_unlink(sctx, fctx->cur_path);
- } else {
- ret = 0;
- }
-
- fs_path_remove(fctx->dir_path);
-
-out:
- return ret;
-}
-
-/*
- * Go through all dir items and see if we find refs which could not be created
- * in the past because the dir did not exist at that time.
- */
-static int finish_outoforder_dir(struct send_ctx *sctx, u64 dir, u64 dir_gen)
-{
- int ret = 0;
- struct btrfs_path *path = NULL;
- struct btrfs_key key;
- struct btrfs_key found_key;
- struct extent_buffer *eb;
- struct finish_unordered_dir_ctx fctx;
- int slot;
-
- path = alloc_path_for_send();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
- memset(&fctx, 0, sizeof(fctx));
- fctx.sctx = sctx;
- fctx.cur_path = fs_path_alloc(sctx);
- fctx.dir_path = fs_path_alloc(sctx);
- if (!fctx.cur_path || !fctx.dir_path) {
- ret = -ENOMEM;
- goto out;
- }
- fctx.dir_ino = dir;
-
- ret = get_cur_path(sctx, dir, dir_gen, fctx.dir_path);
- if (ret < 0)
- goto out;
-
- /*
- * We do two passes. The first links in the new refs and the second
- * deletes orphans if required. Deletion of orphans is not required for
- * directory inodes, as we always have only one ref and use rename
- * instead of link for those.
- */
-
-again:
- key.objectid = dir;
- key.type = BTRFS_DIR_ITEM_KEY;
- key.offset = 0;
- while (1) {
- ret = btrfs_search_slot_for_read(sctx->send_root, &key, path,
- 1, 0);
- if (ret < 0)
- goto out;
- eb = path->nodes[0];
- slot = path->slots[0];
- btrfs_item_key_to_cpu(eb, &found_key, slot);
-
- if (found_key.objectid != key.objectid ||
- found_key.type != key.type) {
- btrfs_release_path(path);
- break;
- }
-
- ret = iterate_dir_item(sctx, sctx->send_root, path,
- &found_key, __finish_unordered_dir,
- &fctx);
- if (ret < 0)
- goto out;
-
- key.offset = found_key.offset + 1;
- btrfs_release_path(path);
- }
-
- if (!fctx.delete_pass && fctx.need_delete) {
- fctx.delete_pass = 1;
- goto again;
- }
-
-out:
- btrfs_free_path(path);
- fs_path_free(sctx, fctx.cur_path);
- fs_path_free(sctx, fctx.dir_path);
- return ret;
-}
-
/*
* This does all the move/link/unlink/rmdir magic.
*/
@@ -2674,6 +2725,7 @@ static int process_recorded_refs(struct send_ctx *sctx)
{
int ret = 0;
struct recorded_ref *cur;
+ struct recorded_ref *cur2;
struct ulist *check_dirs = NULL;
struct ulist_iterator uit;
struct ulist_node *un;
@@ -2685,6 +2737,12 @@ static int process_recorded_refs(struct send_ctx *sctx)
verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
+ /*
+ * This should never happen as the root dir always has the same ref
+ * which is always '..'
+ */
+ BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
+
valid_path = fs_path_alloc(sctx);
if (!valid_path) {
ret = -ENOMEM;
@@ -2731,6 +2789,46 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
list_for_each_entry(cur, &sctx->new_refs, list) {
/*
+ * We may have refs where the parent directory does not exist
+ * yet. This happens if the parent directories inum is higher
+ * the the current inum. To handle this case, we create the
+ * parent directory out of order. But we need to check if this
+ * did already happen before due to other refs in the same dir.
+ */
+ ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
+ if (ret < 0)
+ goto out;
+ if (ret == inode_state_will_create) {
+ ret = 0;
+ /*
+ * First check if any of the current inodes refs did
+ * already create the dir.
+ */
+ list_for_each_entry(cur2, &sctx->new_refs, list) {
+ if (cur == cur2)
+ break;
+ if (cur2->dir == cur->dir) {
+ ret = 1;
+ break;
+ }
+ }
+
+ /*
+ * If that did not happen, check if a previous inode
+ * did already create the dir.
+ */
+ if (!ret)
+ ret = did_create_dir(sctx, cur->dir);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ ret = send_create_inode(sctx, cur->dir);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ /*
* Check if this new ref would overwrite the first ref of
* another unprocessed inode. If yes, orphanize the
* overwritten inode. If we find an overwritten ref that is
@@ -2764,7 +2862,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
* inode, move it and update valid_path. If not, link or move
* it depending on the inode mode.
*/
- if (is_orphan && !sctx->cur_inode_first_ref_orphan) {
+ if (is_orphan) {
ret = send_rename(sctx, valid_path, cur->full_path);
if (ret < 0)
goto out;
@@ -2827,6 +2925,17 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
if (ret < 0)
goto out;
}
+ } else if (S_ISDIR(sctx->cur_inode_mode) &&
+ !list_empty(&sctx->deleted_refs)) {
+ /*
+ * We have a moved dir. Add the old parent to check_dirs
+ */
+ cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
+ list);
+ ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
} else if (!S_ISDIR(sctx->cur_inode_mode)) {
/*
* We have a non dir inode. Go through all deleted refs and
@@ -2840,35 +2949,9 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
if (ret < 0)
goto out;
if (!ret) {
- /*
- * In case the inode was moved to a directory
- * that was not created yet (see
- * __record_new_ref), we can not unlink the ref
- * as it will be needed later when the parent
- * directory is created, so that we can move in
- * the inode to the new dir.
- */
- if (!is_orphan &&
- sctx->cur_inode_first_ref_orphan) {
- ret = orphanize_inode(sctx,
- sctx->cur_ino,
- sctx->cur_inode_gen,
- cur->full_path);
- if (ret < 0)
- goto out;
- ret = gen_unique_name(sctx,
- sctx->cur_ino,
- sctx->cur_inode_gen,
- valid_path);
- if (ret < 0)
- goto out;
- is_orphan = 1;
-
- } else {
- ret = send_unlink(sctx, cur->full_path);
- if (ret < 0)
- goto out;
- }
+ ret = send_unlink(sctx, cur->full_path);
+ if (ret < 0)
+ goto out;
}
ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
GFP_NOFS);
@@ -2880,12 +2963,11 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
* If the inode is still orphan, unlink the orphan. This may
* happen when a previous inode did overwrite the first ref
* of this inode and no new refs were added for the current
- * inode.
- * We can however not delete the orphan in case the inode relies
- * in a directory that was not created yet (see
- * __record_new_ref)
+ * inode. Unlinking does not mean that the inode is deleted in
+ * all cases. There may still be links to this inode in other
+ * places.
*/
- if (is_orphan && !sctx->cur_inode_first_ref_orphan) {
+ if (is_orphan) {
ret = send_unlink(sctx, valid_path);
if (ret < 0)
goto out;
@@ -2900,6 +2982,11 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
*/
ULIST_ITER_INIT(&uit);
while ((un = ulist_next(check_dirs, &uit))) {
+ /*
+ * In case we had refs into dirs that were not processed yet,
+ * we don't need to do the utime and rmdir logic for these dirs.
+ * The dir will be processed later.
+ */
if (un->val > sctx->cur_ino)
continue;
@@ -2929,25 +3016,6 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
}
}
- /*
- * Current inode is now at it's new position, so we must increase
- * send_progress
- */
- sctx->send_progress = sctx->cur_ino + 1;
-
- /*
- * We may have a directory here that has pending refs which could not
- * be created before (because the dir did not exist before, see
- * __record_new_ref). finish_outoforder_dir will link/move the pending
- * refs.
- */
- if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_new) {
- ret = finish_outoforder_dir(sctx, sctx->cur_ino,
- sctx->cur_inode_gen);
- if (ret < 0)
- goto out;
- }
-
ret = 0;
out:
@@ -2971,34 +3039,9 @@ static int __record_new_ref(int num, u64 dir, int index,
return -ENOMEM;
ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL,
- NULL);
- if (ret < 0)
- goto out;
-
- /*
- * The parent may be non-existent at this point in time. This happens
- * if the ino of the parent dir is higher then the current ino. In this
- * case, we can not process this ref until the parent dir is finally
- * created. If we reach the parent dir later, process_recorded_refs
- * will go through all dir items and process the refs that could not be
- * processed before. In case this is the first ref, we set
- * cur_inode_first_ref_orphan to 1 to inform process_recorded_refs to
- * keep an orphan of the inode so that it later can be used for
- * link/move
- */
- ret = is_inode_existent(sctx, dir, gen);
+ NULL, NULL);
if (ret < 0)
goto out;
- if (!ret) {
- ret = is_first_ref(sctx, sctx->send_root, sctx->cur_ino, dir,
- name->start, fs_path_len(name));
- if (ret < 0)
- goto out;
- if (ret)
- sctx->cur_inode_first_ref_orphan = 1;
- ret = 0;
- goto out;
- }
ret = get_cur_path(sctx, dir, gen, p);
if (ret < 0)
@@ -3029,7 +3072,7 @@ static int __record_deleted_ref(int num, u64 dir, int index,
return -ENOMEM;
ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL,
- NULL);
+ NULL, NULL);
if (ret < 0)
goto out;
@@ -3206,33 +3249,29 @@ static int process_all_refs(struct send_ctx *sctx,
key.offset = 0;
while (1) {
ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
- if (ret < 0) {
- btrfs_release_path(path);
+ if (ret < 0)
goto out;
- }
- if (ret) {
- btrfs_release_path(path);
+ if (ret)
break;
- }
eb = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(eb, &found_key, slot);
if (found_key.objectid != key.objectid ||
- found_key.type != key.type) {
- btrfs_release_path(path);
+ (found_key.type != BTRFS_INODE_REF_KEY &&
+ found_key.type != BTRFS_INODE_EXTREF_KEY))
break;
- }
- ret = iterate_inode_ref(sctx, sctx->parent_root, path,
- &found_key, 0, cb, sctx);
+ ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb,
+ sctx);
btrfs_release_path(path);
if (ret < 0)
goto out;
key.offset = found_key.offset + 1;
}
+ btrfs_release_path(path);
ret = process_recorded_refs(sctx);
@@ -3555,7 +3594,7 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
int ret = 0;
struct fs_path *p;
loff_t pos = offset;
- int readed = 0;
+ int num_read = 0;
mm_segment_t old_fs;
p = fs_path_alloc(sctx);
@@ -3580,8 +3619,8 @@ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos);
if (ret < 0)
goto out;
- readed = ret;
- if (!readed)
+ num_read = ret;
+ if (!num_read)
goto out;
ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
@@ -3594,7 +3633,7 @@ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
- TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, readed);
+ TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
ret = send_cmd(sctx);
@@ -3604,7 +3643,7 @@ out:
set_fs(old_fs);
if (ret < 0)
return ret;
- return readed;
+ return num_read;
}
/*
@@ -3615,7 +3654,6 @@ static int send_clone(struct send_ctx *sctx,
struct clone_root *clone_root)
{
int ret = 0;
- struct btrfs_root *clone_root2 = clone_root->root;
struct fs_path *p;
u64 gen;
@@ -3640,22 +3678,23 @@ verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
- if (clone_root2 == sctx->send_root) {
+ if (clone_root->root == sctx->send_root) {
ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
- &gen, NULL, NULL, NULL);
+ &gen, NULL, NULL, NULL, NULL);
if (ret < 0)
goto out;
ret = get_cur_path(sctx, clone_root->ino, gen, p);
} else {
- ret = get_inode_path(sctx, clone_root2, clone_root->ino, p);
+ ret = get_inode_path(sctx, clone_root->root,
+ clone_root->ino, p);
}
if (ret < 0)
goto out;
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
- clone_root2->root_item.uuid);
+ clone_root->root->root_item.uuid);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
- clone_root2->root_item.ctransid);
+ clone_root->root->root_item.ctransid);
TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
clone_root->offset);
@@ -3684,10 +3723,17 @@ static int send_write_or_clone(struct send_ctx *sctx,
ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_file_extent_item);
type = btrfs_file_extent_type(path->nodes[0], ei);
- if (type == BTRFS_FILE_EXTENT_INLINE)
+ if (type == BTRFS_FILE_EXTENT_INLINE) {
len = btrfs_file_extent_inline_len(path->nodes[0], ei);
- else
+ /*
+ * it is possible the inline item won't cover the whole page,
+ * but there may be items after this page. Make
+ * sure to send the whole thing
+ */
+ len = PAGE_CACHE_ALIGN(len);
+ } else {
len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
+ }
if (offset + len > sctx->cur_inode_size)
len = sctx->cur_inode_size - offset;
@@ -3735,6 +3781,8 @@ static int is_extent_unchanged(struct send_ctx *sctx,
u64 left_offset_fixed;
u64 left_len;
u64 right_len;
+ u64 left_gen;
+ u64 right_gen;
u8 left_type;
u8 right_type;
@@ -3744,17 +3792,17 @@ static int is_extent_unchanged(struct send_ctx *sctx,
eb = left_path->nodes[0];
slot = left_path->slots[0];
-
ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
left_type = btrfs_file_extent_type(eb, ei);
- left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
- left_len = btrfs_file_extent_num_bytes(eb, ei);
- left_offset = btrfs_file_extent_offset(eb, ei);
if (left_type != BTRFS_FILE_EXTENT_REG) {
ret = 0;
goto out;
}
+ left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+ left_len = btrfs_file_extent_num_bytes(eb, ei);
+ left_offset = btrfs_file_extent_offset(eb, ei);
+ left_gen = btrfs_file_extent_generation(eb, ei);
/*
* Following comments will refer to these graphics. L is the left
@@ -3810,6 +3858,7 @@ static int is_extent_unchanged(struct send_ctx *sctx,
right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
right_len = btrfs_file_extent_num_bytes(eb, ei);
right_offset = btrfs_file_extent_offset(eb, ei);
+ right_gen = btrfs_file_extent_generation(eb, ei);
if (right_type != BTRFS_FILE_EXTENT_REG) {
ret = 0;
@@ -3820,7 +3869,7 @@ static int is_extent_unchanged(struct send_ctx *sctx,
* Are we at extent 8? If yes, we know the extent is changed.
* This may only happen on the first iteration.
*/
- if (found_key.offset + right_len < ekey->offset) {
+ if (found_key.offset + right_len <= ekey->offset) {
ret = 0;
goto out;
}
@@ -3837,8 +3886,9 @@ static int is_extent_unchanged(struct send_ctx *sctx,
/*
* Check if we have the same extent.
*/
- if (left_disknr + left_offset_fixed !=
- right_disknr + right_offset) {
+ if (left_disknr != right_disknr ||
+ left_offset_fixed != right_offset ||
+ left_gen != right_gen) {
ret = 0;
goto out;
}
@@ -3971,12 +4021,21 @@ static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
if (sctx->cur_ino == 0)
goto out;
if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
- sctx->cmp_key->type <= BTRFS_INODE_REF_KEY)
+ sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
goto out;
if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
goto out;
ret = process_recorded_refs(sctx);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * We have processed the refs and thus need to advance send_progress.
+ * Now, calls to get_cur_xxx will take the updated refs of the current
+ * inode into account.
+ */
+ sctx->send_progress = sctx->cur_ino + 1;
out:
return ret;
@@ -4004,26 +4063,25 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
goto out;
ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
- &left_mode, &left_uid, &left_gid);
+ &left_mode, &left_uid, &left_gid, NULL);
if (ret < 0)
goto out;
- if (!S_ISLNK(sctx->cur_inode_mode)) {
- if (!sctx->parent_root || sctx->cur_inode_new) {
+ if (!sctx->parent_root || sctx->cur_inode_new) {
+ need_chown = 1;
+ if (!S_ISLNK(sctx->cur_inode_mode))
need_chmod = 1;
- need_chown = 1;
- } else {
- ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
- NULL, NULL, &right_mode, &right_uid,
- &right_gid);
- if (ret < 0)
- goto out;
+ } else {
+ ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
+ NULL, NULL, &right_mode, &right_uid,
+ &right_gid, NULL);
+ if (ret < 0)
+ goto out;
- if (left_uid != right_uid || left_gid != right_gid)
- need_chown = 1;
- if (left_mode != right_mode)
- need_chmod = 1;
- }
+ if (left_uid != right_uid || left_gid != right_gid)
+ need_chown = 1;
+ if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
+ need_chmod = 1;
}
if (S_ISREG(sctx->cur_inode_mode)) {
@@ -4074,7 +4132,12 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_ino = key->objectid;
sctx->cur_inode_new_gen = 0;
- sctx->cur_inode_first_ref_orphan = 0;
+
+ /*
+ * Set send_progress to current inode. This will tell all get_cur_xxx
+ * functions that the current inode's refs are not updated yet. Later,
+ * when process_recorded_refs is finished, it is set to cur_ino + 1.
+ */
sctx->send_progress = sctx->cur_ino;
if (result == BTRFS_COMPARE_TREE_NEW ||
@@ -4098,7 +4161,14 @@ static int changed_inode(struct send_ctx *sctx,
right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
right_ii);
- if (left_gen != right_gen)
+
+ /*
+ * The cur_ino = root dir case is special here. We can't treat
+ * the inode as deleted+reused because it would generate a
+ * stream that tries to delete/mkdir the root dir.
+ */
+ if (left_gen != right_gen &&
+ sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
sctx->cur_inode_new_gen = 1;
}
@@ -4111,8 +4181,7 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_inode_mode = btrfs_inode_mode(
sctx->left_path->nodes[0], left_ii);
if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
- ret = send_create_inode(sctx, sctx->left_path,
- sctx->cmp_key);
+ ret = send_create_inode_if_needed(sctx);
} else if (result == BTRFS_COMPARE_TREE_DELETED) {
sctx->cur_inode_gen = right_gen;
sctx->cur_inode_new = 0;
@@ -4122,7 +4191,17 @@ static int changed_inode(struct send_ctx *sctx,
sctx->cur_inode_mode = btrfs_inode_mode(
sctx->right_path->nodes[0], right_ii);
} else if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ /*
+ * We need to do some special handling in case the inode was
+ * reported as changed with a changed generation number. This
+ * means that the original inode was deleted and new inode
+ * reused the same inum. So we have to treat the old inode as
+ * deleted and the new one as new.
+ */
if (sctx->cur_inode_new_gen) {
+ /*
+ * First, process the inode as if it was deleted.
+ */
sctx->cur_inode_gen = right_gen;
sctx->cur_inode_new = 0;
sctx->cur_inode_deleted = 1;
@@ -4135,6 +4214,9 @@ static int changed_inode(struct send_ctx *sctx,
if (ret < 0)
goto out;
+ /*
+ * Now process the inode as if it was new.
+ */
sctx->cur_inode_gen = left_gen;
sctx->cur_inode_new = 1;
sctx->cur_inode_deleted = 0;
@@ -4142,14 +4224,23 @@ static int changed_inode(struct send_ctx *sctx,
sctx->left_path->nodes[0], left_ii);
sctx->cur_inode_mode = btrfs_inode_mode(
sctx->left_path->nodes[0], left_ii);
- ret = send_create_inode(sctx, sctx->left_path,
- sctx->cmp_key);
+ ret = send_create_inode_if_needed(sctx);
if (ret < 0)
goto out;
ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
if (ret < 0)
goto out;
+ /*
+ * Advance send_progress now as we did not get into
+ * process_recorded_refs_if_needed in the new_gen case.
+ */
+ sctx->send_progress = sctx->cur_ino + 1;
+
+ /*
+ * Now process all extents and xattrs of the inode as if
+ * they were all new.
+ */
ret = process_all_extents(sctx);
if (ret < 0)
goto out;
@@ -4172,6 +4263,16 @@ out:
return ret;
}
+/*
+ * We have to process new refs before deleted refs, but compare_trees gives us
+ * the new and deleted refs mixed. To fix this, we record the new/deleted refs
+ * first and later process them in process_recorded_refs.
+ * For the cur_inode_new_gen case, we skip recording completely because
+ * changed_inode did already initiate processing of refs. The reason for this is
+ * that in this case, compare_tree actually compares the refs of 2 different
+ * inodes. To fix this, process_all_refs is used in changed_inode to handle all
+ * refs of the right tree as deleted and all refs of the left tree as new.
+ */
static int changed_ref(struct send_ctx *sctx,
enum btrfs_compare_tree_result result)
{
@@ -4192,6 +4293,11 @@ static int changed_ref(struct send_ctx *sctx,
return ret;
}
+/*
+ * Process new/deleted/changed xattrs. We skip processing in the
+ * cur_inode_new_gen case because changed_inode did already initiate processing
+ * of xattrs. The reason is the same as in changed_ref
+ */
static int changed_xattr(struct send_ctx *sctx,
enum btrfs_compare_tree_result result)
{
@@ -4211,6 +4317,11 @@ static int changed_xattr(struct send_ctx *sctx,
return ret;
}
+/*
+ * Process new/deleted/changed extents. We skip processing in the
+ * cur_inode_new_gen case because changed_inode did already initiate processing
+ * of extents. The reason is the same as in changed_ref
+ */
static int changed_extent(struct send_ctx *sctx,
enum btrfs_compare_tree_result result)
{
@@ -4227,7 +4338,10 @@ static int changed_extent(struct send_ctx *sctx,
return ret;
}
-
+/*
+ * Updates compare related fields in sctx and simply forwards to the actual
+ * changed_xxx functions.
+ */
static int changed_cb(struct btrfs_root *left_root,
struct btrfs_root *right_root,
struct btrfs_path *left_path,
@@ -4247,9 +4361,15 @@ static int changed_cb(struct btrfs_root *left_root,
if (ret < 0)
goto out;
+ /* Ignore non-FS objects */
+ if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
+ key->objectid == BTRFS_FREE_SPACE_OBJECTID)
+ goto out;
+
if (key->type == BTRFS_INODE_ITEM_KEY)
ret = changed_inode(sctx, result);
- else if (key->type == BTRFS_INODE_REF_KEY)
+ else if (key->type == BTRFS_INODE_REF_KEY ||
+ key->type == BTRFS_INODE_EXTREF_KEY)
ret = changed_ref(sctx, result);
else if (key->type == BTRFS_XATTR_ITEM_KEY)
ret = changed_xattr(sctx, result);
@@ -4299,7 +4419,8 @@ join_trans:
}
/*
- * Make sure the tree has not changed
+ * Make sure the tree has not changed after re-joining. We detect this
+ * by comparing start_ctransid and ctransid. They should always match.
*/
spin_lock(&send_root->root_times_lock);
ctransid = btrfs_root_ctransid(&send_root->root_item);
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 9934e948e57f..1bf4f32fd4ef 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -130,4 +130,5 @@ enum {
#ifdef __KERNEL__
long btrfs_ioctl_send(struct file *mnt_file, void __user *arg);
+int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off);
#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 83d6f9f9c220..915ac14c2064 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -243,12 +243,18 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *function,
unsigned int line, int errno)
{
- WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted");
+ WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted\n");
trans->aborted = errno;
/* Nothing used. The other threads that have joined this
* transaction may be able to continue. */
if (!trans->blocks_used) {
- btrfs_printk(root->fs_info, "Aborting unused transaction.\n");
+ char nbuf[16];
+ const char *errstr;
+
+ errstr = btrfs_decode_error(root->fs_info, errno, nbuf);
+ btrfs_printk(root->fs_info,
+ "%s:%d: Aborting unused transaction(%s).\n",
+ function, line, errstr);
return;
}
trans->transaction->aborted = errno;
@@ -407,7 +413,15 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_opt(info->mount_opt, NODATASUM);
break;
case Opt_nodatacow:
- printk(KERN_INFO "btrfs: setting nodatacow\n");
+ if (!btrfs_test_opt(root, COMPRESS) ||
+ !btrfs_test_opt(root, FORCE_COMPRESS)) {
+ printk(KERN_INFO "btrfs: setting nodatacow, compression disabled\n");
+ } else {
+ printk(KERN_INFO "btrfs: setting nodatacow\n");
+ }
+ info->compress_type = BTRFS_COMPRESS_NONE;
+ btrfs_clear_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
btrfs_set_opt(info->mount_opt, NODATACOW);
btrfs_set_opt(info->mount_opt, NODATASUM);
break;
@@ -422,10 +436,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
compress_type = "zlib";
info->compress_type = BTRFS_COMPRESS_ZLIB;
btrfs_set_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, NODATACOW);
+ btrfs_clear_opt(info->mount_opt, NODATASUM);
} else if (strcmp(args[0].from, "lzo") == 0) {
compress_type = "lzo";
info->compress_type = BTRFS_COMPRESS_LZO;
btrfs_set_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, NODATACOW);
+ btrfs_clear_opt(info->mount_opt, NODATASUM);
btrfs_set_fs_incompat(info, COMPRESS_LZO);
} else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no";
@@ -543,11 +561,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
break;
case Opt_defrag:
- printk(KERN_INFO "btrfs: enabling auto defrag");
+ printk(KERN_INFO "btrfs: enabling auto defrag\n");
btrfs_set_opt(info->mount_opt, AUTO_DEFRAG);
break;
case Opt_recovery:
- printk(KERN_INFO "btrfs: enabling auto recovery");
+ printk(KERN_INFO "btrfs: enabling auto recovery\n");
btrfs_set_opt(info->mount_opt, RECOVERY);
break;
case Opt_skip_balance:
@@ -846,18 +864,15 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return 0;
}
- btrfs_wait_ordered_extents(root, 0, 0);
-
- spin_lock(&fs_info->trans_lock);
- if (!fs_info->running_transaction) {
- spin_unlock(&fs_info->trans_lock);
- return 0;
- }
- spin_unlock(&fs_info->trans_lock);
+ btrfs_wait_ordered_extents(root, 0);
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
+ trans = btrfs_attach_transaction(root);
+ if (IS_ERR(trans)) {
+ /* no transaction, don't bother */
+ if (PTR_ERR(trans) == -ENOENT)
+ return 0;
return PTR_ERR(trans);
+ }
return btrfs_commit_transaction(trans, root);
}
@@ -1508,17 +1523,21 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
static int btrfs_freeze(struct super_block *sb)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- mutex_lock(&fs_info->transaction_kthread_mutex);
- mutex_lock(&fs_info->cleaner_mutex);
- return 0;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = btrfs_sb(sb)->tree_root;
+
+ trans = btrfs_attach_transaction(root);
+ if (IS_ERR(trans)) {
+ /* no transaction, don't bother */
+ if (PTR_ERR(trans) == -ENOENT)
+ return 0;
+ return PTR_ERR(trans);
+ }
+ return btrfs_commit_transaction(trans, root);
}
static int btrfs_unfreeze(struct super_block *sb)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- mutex_unlock(&fs_info->cleaner_mutex);
- mutex_unlock(&fs_info->transaction_kthread_mutex);
return 0;
}
@@ -1595,7 +1614,7 @@ static int btrfs_interface_init(void)
static void btrfs_interface_exit(void)
{
if (misc_deregister(&btrfs_misc) < 0)
- printk(KERN_INFO "misc_deregister failed for control device");
+ printk(KERN_INFO "btrfs: misc_deregister failed for control device\n");
}
static int __init init_btrfs_fs(void)
@@ -1620,10 +1639,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_extent_io;
- err = btrfs_delayed_inode_init();
+ err = ordered_data_init();
if (err)
goto free_extent_map;
+ err = btrfs_delayed_inode_init();
+ if (err)
+ goto free_ordered_data;
+
err = btrfs_interface_init();
if (err)
goto free_delayed_inode;
@@ -1641,6 +1664,8 @@ unregister_ioctl:
btrfs_interface_exit();
free_delayed_inode:
btrfs_delayed_inode_exit();
+free_ordered_data:
+ ordered_data_exit();
free_extent_map:
extent_map_exit();
free_extent_io:
@@ -1657,6 +1682,7 @@ static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
btrfs_delayed_inode_exit();
+ ordered_data_exit();
extent_map_exit();
extent_io_exit();
btrfs_interface_exit();
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 27c26004e050..04bbfb1052eb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -53,7 +53,7 @@ static noinline void switch_commit_root(struct btrfs_root *root)
/*
* either allocate a new transaction or hop into the existing one
*/
-static noinline int join_transaction(struct btrfs_root *root, int nofail)
+static noinline int join_transaction(struct btrfs_root *root, int type)
{
struct btrfs_transaction *cur_trans;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -67,7 +67,13 @@ loop:
}
if (fs_info->trans_no_join) {
- if (!nofail) {
+ /*
+ * If we are JOIN_NOLOCK we're already committing a current
+ * transaction, we just need a handle to deal with something
+ * when committing the transaction, such as inode cache and
+ * space cache. It is a special case.
+ */
+ if (type != TRANS_JOIN_NOLOCK) {
spin_unlock(&fs_info->trans_lock);
return -EBUSY;
}
@@ -87,6 +93,13 @@ loop:
}
spin_unlock(&fs_info->trans_lock);
+ /*
+ * If we are ATTACH, we just want to catch the current transaction,
+ * and commit it. If there is no transaction, just return ENOENT.
+ */
+ if (type == TRANS_ATTACH)
+ return -ENOENT;
+
cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
if (!cur_trans)
return -ENOMEM;
@@ -267,13 +280,6 @@ static void wait_current_trans(struct btrfs_root *root)
}
}
-enum btrfs_trans_type {
- TRANS_START,
- TRANS_JOIN,
- TRANS_USERSPACE,
- TRANS_JOIN_NOLOCK,
-};
-
static int may_wait_transaction(struct btrfs_root *root, int type)
{
if (root->fs_info->log_root_recovering)
@@ -290,7 +296,8 @@ static int may_wait_transaction(struct btrfs_root *root, int type)
}
static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
- u64 num_items, int type)
+ u64 num_items, int type,
+ int noflush)
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
@@ -324,9 +331,14 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
}
num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
- ret = btrfs_block_rsv_add(root,
- &root->fs_info->trans_block_rsv,
- num_bytes);
+ if (noflush)
+ ret = btrfs_block_rsv_add_noflush(root,
+ &root->fs_info->trans_block_rsv,
+ num_bytes);
+ else
+ ret = btrfs_block_rsv_add(root,
+ &root->fs_info->trans_block_rsv,
+ num_bytes);
if (ret)
return ERR_PTR(ret);
}
@@ -335,19 +347,34 @@ again:
if (!h)
return ERR_PTR(-ENOMEM);
- sb_start_intwrite(root->fs_info->sb);
+ /*
+ * If we are JOIN_NOLOCK we're already committing a transaction and
+ * waiting on this guy, so we don't need to do the sb_start_intwrite
+ * because we're already holding a ref. We need this because we could
+ * have raced in and did an fsync() on a file which can kick a commit
+ * and then we deadlock with somebody doing a freeze.
+ *
+ * If we are ATTACH, it means we just want to catch the current
+ * transaction and commit it, so we needn't do sb_start_intwrite().
+ */
+ if (type < TRANS_JOIN_NOLOCK)
+ sb_start_intwrite(root->fs_info->sb);
if (may_wait_transaction(root, type))
wait_current_trans(root);
do {
- ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
+ ret = join_transaction(root, type);
if (ret == -EBUSY)
wait_current_trans(root);
} while (ret == -EBUSY);
if (ret < 0) {
- sb_end_intwrite(root->fs_info->sb);
+ /* We must get the transaction if we are JOIN_NOLOCK. */
+ BUG_ON(type == TRANS_JOIN_NOLOCK);
+
+ if (type < TRANS_JOIN_NOLOCK)
+ sb_end_intwrite(root->fs_info->sb);
kmem_cache_free(btrfs_trans_handle_cachep, h);
return ERR_PTR(ret);
}
@@ -367,7 +394,9 @@ again:
h->aborted = 0;
h->qgroup_reserved = qgroup_reserved;
h->delayed_ref_elem.seq = 0;
+ h->type = type;
INIT_LIST_HEAD(&h->qgroup_ref_list);
+ INIT_LIST_HEAD(&h->new_bgs);
smp_mb();
if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -393,21 +422,33 @@ got_it:
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
int num_items)
{
- return start_transaction(root, num_items, TRANS_START);
+ return start_transaction(root, num_items, TRANS_START, 0);
+}
+
+struct btrfs_trans_handle *btrfs_start_transaction_noflush(
+ struct btrfs_root *root, int num_items)
+{
+ return start_transaction(root, num_items, TRANS_START, 1);
}
+
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
{
- return start_transaction(root, 0, TRANS_JOIN);
+ return start_transaction(root, 0, TRANS_JOIN, 0);
}
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
{
- return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
+ return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
}
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
{
- return start_transaction(root, 0, TRANS_USERSPACE);
+ return start_transaction(root, 0, TRANS_USERSPACE, 0);
+}
+
+struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
+{
+ return start_transaction(root, 0, TRANS_ATTACH, 0);
}
/* wait for a transaction commit to be fully complete */
@@ -506,11 +547,12 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int throttle, int lock)
+ struct btrfs_root *root, int throttle)
{
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *info = root->fs_info;
int count = 0;
+ int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
if (--trans->use_count) {
@@ -536,6 +578,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
trans->qgroup_reserved = 0;
}
+ if (!list_empty(&trans->new_bgs))
+ btrfs_create_pending_block_groups(trans, root);
+
while (count < 2) {
unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
@@ -551,7 +596,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
- sb_end_intwrite(root->fs_info->sb);
+ if (!list_empty(&trans->new_bgs))
+ btrfs_create_pending_block_groups(trans, root);
if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
should_end_transaction(trans, root)) {
@@ -573,6 +619,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
}
}
+ if (trans->type < TRANS_JOIN_NOLOCK)
+ sb_end_intwrite(root->fs_info->sb);
+
WARN_ON(cur_trans != info->running_transaction);
WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
atomic_dec(&cur_trans->num_writers);
@@ -604,7 +653,7 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans,
{
int ret;
- ret = __btrfs_end_transaction(trans, root, 0, 1);
+ ret = __btrfs_end_transaction(trans, root, 0);
if (ret)
return ret;
return 0;
@@ -615,18 +664,7 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
{
int ret;
- ret = __btrfs_end_transaction(trans, root, 1, 1);
- if (ret)
- return ret;
- return 0;
-}
-
-int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
-{
- int ret;
-
- ret = __btrfs_end_transaction(trans, root, 0, 0);
+ ret = __btrfs_end_transaction(trans, root, 1);
if (ret)
return ret;
return 0;
@@ -635,7 +673,7 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- return __btrfs_end_transaction(trans, root, 1, 1);
+ return __btrfs_end_transaction(trans, root, 1);
}
/*
@@ -649,13 +687,15 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
int err = 0;
int werr = 0;
struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+ struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
- mark)) {
- convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
- GFP_NOFS);
+ mark, &cached_state)) {
+ convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
+ mark, &cached_state, GFP_NOFS);
+ cached_state = NULL;
err = filemap_fdatawrite_range(mapping, start, end);
if (err)
werr = err;
@@ -679,12 +719,14 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
int err = 0;
int werr = 0;
struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+ struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
- EXTENT_NEED_WAIT)) {
- clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
+ EXTENT_NEED_WAIT, &cached_state)) {
+ clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
+ 0, 0, &cached_state, GFP_NOFS);
err = filemap_fdatawait_range(mapping, start, end);
if (err)
werr = err;
@@ -955,6 +997,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_root *parent_root;
struct btrfs_block_rsv *rsv;
struct inode *parent_inode;
+ struct btrfs_path *path;
+ struct btrfs_dir_item *dir_item;
struct dentry *parent;
struct dentry *dentry;
struct extent_buffer *tmp;
@@ -967,18 +1011,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
u64 root_flags;
uuid_le new_uuid;
- rsv = trans->block_rsv;
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = pending->error = -ENOMEM;
+ goto path_alloc_fail;
+ }
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
if (!new_root_item) {
ret = pending->error = -ENOMEM;
- goto fail;
+ goto root_item_alloc_fail;
}
ret = btrfs_find_free_objectid(tree_root, &objectid);
if (ret) {
pending->error = ret;
- goto fail;
+ goto no_free_objectid;
}
btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
@@ -988,22 +1036,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
to_reserve);
if (ret) {
pending->error = ret;
- goto fail;
+ goto no_free_objectid;
}
}
ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
objectid, pending->inherit);
- kfree(pending->inherit);
if (ret) {
pending->error = ret;
- goto fail;
+ goto no_free_objectid;
}
key.objectid = objectid;
key.offset = (u64)-1;
key.type = BTRFS_ROOT_ITEM_KEY;
+ rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv;
dentry = pending->dentry;
@@ -1017,24 +1065,21 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
*/
ret = btrfs_set_inode_index(parent_inode, &index);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_insert_dir_item(trans, parent_root,
- dentry->d_name.name, dentry->d_name.len,
- parent_inode, &key,
- BTRFS_FT_DIR, index);
- if (ret == -EEXIST) {
+
+ /* check if there is a file/dir which has the same name. */
+ dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
+ btrfs_ino(parent_inode),
+ dentry->d_name.name,
+ dentry->d_name.len, 0);
+ if (dir_item != NULL && !IS_ERR(dir_item)) {
pending->error = -EEXIST;
- dput(parent);
goto fail;
- } else if (ret) {
- goto abort_trans_dput;
+ } else if (IS_ERR(dir_item)) {
+ ret = PTR_ERR(dir_item);
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
}
-
- btrfs_i_size_write(parent_inode, parent_inode->i_size +
- dentry->d_name.len * 2);
- parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
- ret = btrfs_update_inode(trans, parent_root, parent_inode);
- if (ret)
- goto abort_trans_dput;
+ btrfs_release_path(path);
/*
* pull in the delayed directory update
@@ -1043,8 +1088,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* snapshot
*/
ret = btrfs_run_delayed_items(trans, root);
- if (ret) { /* Transaction aborted */
- dput(parent);
+ if (ret) { /* Transaction aborted */
+ btrfs_abort_transaction(trans, root, ret);
goto fail;
}
@@ -1079,7 +1124,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_tree_unlock(old);
free_extent_buffer(old);
- goto abort_trans_dput;
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
}
btrfs_set_lock_blocking(old);
@@ -1088,8 +1134,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/* clean up in any case */
btrfs_tree_unlock(old);
free_extent_buffer(old);
- if (ret)
- goto abort_trans_dput;
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
+ }
/* see comments in should_cow_block() */
root->force_cow = 1;
@@ -1101,8 +1149,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
btrfs_tree_unlock(tmp);
free_extent_buffer(tmp);
- if (ret)
- goto abort_trans_dput;
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
+ }
/*
* insert root back/forward references
@@ -1111,32 +1161,58 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
parent_root->root_key.objectid,
btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
- dput(parent);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
goto fail;
+ }
key.offset = (u64)-1;
pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
- goto abort_trans;
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
}
ret = btrfs_reloc_post_snapshot(trans, pending);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
+ }
+
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
+ }
+
+ ret = btrfs_insert_dir_item(trans, parent_root,
+ dentry->d_name.name, dentry->d_name.len,
+ parent_inode, &key,
+ BTRFS_FT_DIR, index);
+ /* We have check then name at the beginning, so it is impossible. */
+ BUG_ON(ret == -EEXIST);
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto fail;
+ }
+
+ btrfs_i_size_write(parent_inode, parent_inode->i_size +
+ dentry->d_name.len * 2);
+ parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
+ ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
if (ret)
- goto abort_trans;
- ret = 0;
+ btrfs_abort_transaction(trans, root, ret);
fail:
- kfree(new_root_item);
+ dput(parent);
trans->block_rsv = rsv;
+no_free_objectid:
+ kfree(new_root_item);
+root_item_alloc_fail:
+ btrfs_free_path(path);
+path_alloc_fail:
btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
return ret;
-
-abort_trans_dput:
- dput(parent);
-abort_trans:
- btrfs_abort_transaction(trans, root, ret);
- goto fail;
}
/*
@@ -1229,6 +1305,16 @@ static void do_async_commit(struct work_struct *work)
struct btrfs_async_commit *ac =
container_of(work, struct btrfs_async_commit, work.work);
+ /*
+ * We've got freeze protection passed with the transaction.
+ * Tell lockdep about it.
+ */
+ rwsem_acquire_read(
+ &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
+
+ current->journal_info = ac->newtrans;
+
btrfs_commit_transaction(ac->newtrans, ac->root);
kfree(ac);
}
@@ -1258,6 +1344,14 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
atomic_inc(&cur_trans->use_count);
btrfs_end_transaction(trans, root);
+
+ /*
+ * Tell lockdep we've released the freeze rwsem, since the
+ * async commit thread will be the one to unlock it.
+ */
+ rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 1, _THIS_IP_);
+
schedule_delayed_work(&ac->work, 0);
/* wait for transaction to start and unblock */
@@ -1348,6 +1442,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/
cur_trans->delayed_refs.flushing = 1;
+ if (!list_empty(&trans->new_bgs))
+ btrfs_create_pending_block_groups(trans, root);
+
ret = btrfs_run_delayed_refs(trans, root, 0);
if (ret)
goto cleanup_transaction;
@@ -1403,7 +1500,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (flush_on_commit || snap_pending) {
btrfs_start_delalloc_inodes(root, 1);
- btrfs_wait_ordered_extents(root, 0, 1);
+ btrfs_wait_ordered_extents(root, 1);
}
ret = btrfs_run_delayed_items(trans, root);
@@ -1456,13 +1553,28 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/
mutex_lock(&root->fs_info->reloc_mutex);
- ret = btrfs_run_delayed_items(trans, root);
+ /*
+ * We needn't worry about the delayed items because we will
+ * deal with them in create_pending_snapshot(), which is the
+ * core function of the snapshot creation.
+ */
+ ret = create_pending_snapshots(trans, root->fs_info);
if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex);
goto cleanup_transaction;
}
- ret = create_pending_snapshots(trans, root->fs_info);
+ /*
+ * We insert the dir indexes of the snapshots and update the inode
+ * of the snapshots' parents after the snapshot creation, so there
+ * are some delayed items which are not dealt with. Now deal with
+ * them.
+ *
+ * We needn't worry that this operation will corrupt the snapshots,
+ * because all the tree which are snapshoted will be forced to COW
+ * the nodes and leaves.
+ */
+ ret = btrfs_run_delayed_items(trans, root);
if (ret) {
mutex_unlock(&root->fs_info->reloc_mutex);
goto cleanup_transaction;
@@ -1584,7 +1696,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
put_transaction(cur_trans);
put_transaction(cur_trans);
- sb_end_intwrite(root->fs_info->sb);
+ if (trans->type < TRANS_JOIN_NOLOCK)
+ sb_end_intwrite(root->fs_info->sb);
trace_btrfs_transaction_commit(root);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index e8b8416c688b..80961947a6b2 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -47,6 +47,14 @@ struct btrfs_transaction {
int aborted;
};
+enum btrfs_trans_type {
+ TRANS_START,
+ TRANS_JOIN,
+ TRANS_USERSPACE,
+ TRANS_JOIN_NOLOCK,
+ TRANS_ATTACH,
+};
+
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
@@ -58,8 +66,9 @@ struct btrfs_trans_handle {
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
- int aborted;
- int adding_csums;
+ short aborted;
+ short adding_csums;
+ enum btrfs_trans_type type;
/*
* this root is only needed to validate that the root passed to
* start_transaction is the same as the one passed to end_transaction.
@@ -68,6 +77,7 @@ struct btrfs_trans_handle {
struct btrfs_root *root;
struct seq_list delayed_ref_elem;
struct list_head qgroup_ref_list;
+ struct list_head new_bgs;
};
struct btrfs_pending_snapshot {
@@ -88,16 +98,18 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
{
BTRFS_I(inode)->last_trans = trans->transaction->transid;
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
+ BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
- struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
int num_items);
+struct btrfs_trans_handle *btrfs_start_transaction_noflush(
+ struct btrfs_root *root, int num_items);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c86670f4f285..81e407d9677a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -18,13 +18,16 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/list_sort.h>
#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "locking.h"
#include "print-tree.h"
+#include "backref.h"
#include "compat.h"
#include "tree-log.h"
+#include "hash.h"
/* magic values for the inode_only field in btrfs_log_inode:
*
@@ -146,7 +149,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
root->log_multiple_pids = true;
}
- root->log_batch++;
+ atomic_inc(&root->log_batch);
atomic_inc(&root->log_writers);
mutex_unlock(&root->log_mutex);
return 0;
@@ -165,7 +168,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
err = ret;
}
mutex_unlock(&root->fs_info->tree_log_mutex);
- root->log_batch++;
+ atomic_inc(&root->log_batch);
atomic_inc(&root->log_writers);
mutex_unlock(&root->log_mutex);
return err;
@@ -484,7 +487,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
int found_type;
u64 mask = root->sectorsize - 1;
u64 extent_end;
- u64 alloc_hint;
u64 start = key->offset;
u64 saved_nbytes;
struct btrfs_file_extent_item *item;
@@ -550,8 +552,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
- ret = btrfs_drop_extents(trans, inode, start, extent_end,
- &alloc_hint, 1);
+ ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
BUG_ON(ret);
if (found_type == BTRFS_FILE_EXTENT_REG ||
@@ -744,6 +745,7 @@ out:
*/
static noinline int backref_in_log(struct btrfs_root *log,
struct btrfs_key *key,
+ u64 ref_objectid,
char *name, int namelen)
{
struct btrfs_path *path;
@@ -764,8 +766,17 @@ static noinline int backref_in_log(struct btrfs_root *log,
if (ret != 0)
goto out;
- item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
+
+ if (key->type == BTRFS_INODE_EXTREF_KEY) {
+ if (btrfs_find_name_in_ext_backref(path, ref_objectid,
+ name, namelen, NULL))
+ match = 1;
+
+ goto out;
+ }
+
+ item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
ref = (struct btrfs_inode_ref *)ptr;
@@ -786,91 +797,42 @@ out:
return match;
}
-
-/*
- * replay one inode back reference item found in the log tree.
- * eb, slot and key refer to the buffer and key found in the log tree.
- * root is the destination we are replaying into, and path is for temp
- * use by this function. (it should be released on return).
- */
-static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct btrfs_root *log,
struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+ struct btrfs_root *log_root,
+ struct inode *dir, struct inode *inode,
+ struct extent_buffer *eb,
+ u64 inode_objectid, u64 parent_objectid,
+ u64 ref_index, char *name, int namelen,
+ int *search_done)
{
- struct btrfs_inode_ref *ref;
- struct btrfs_dir_item *di;
- struct inode *dir;
- struct inode *inode;
- unsigned long ref_ptr;
- unsigned long ref_end;
- char *name;
- int namelen;
int ret;
- int search_done = 0;
-
- /*
- * it is possible that we didn't log all the parent directories
- * for a given inode. If we don't find the dir, just don't
- * copy the back ref in. The link count fixup code will take
- * care of the rest
- */
- dir = read_one_inode(root, key->offset);
- if (!dir)
- return -ENOENT;
-
- inode = read_one_inode(root, key->objectid);
- if (!inode) {
- iput(dir);
- return -EIO;
- }
-
- ref_ptr = btrfs_item_ptr_offset(eb, slot);
- ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
+ char *victim_name;
+ int victim_name_len;
+ struct extent_buffer *leaf;
+ struct btrfs_dir_item *di;
+ struct btrfs_key search_key;
+ struct btrfs_inode_extref *extref;
again:
- ref = (struct btrfs_inode_ref *)ref_ptr;
-
- namelen = btrfs_inode_ref_name_len(eb, ref);
- name = kmalloc(namelen, GFP_NOFS);
- BUG_ON(!name);
-
- read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
-
- /* if we already have a perfect match, we're done */
- if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
- btrfs_inode_ref_index(eb, ref),
- name, namelen)) {
- goto out;
- }
-
- /*
- * look for a conflicting back reference in the metadata.
- * if we find one we have to unlink that name of the file
- * before we add our new link. Later on, we overwrite any
- * existing back reference, and we don't want to create
- * dangling pointers in the directory.
- */
-
- if (search_done)
- goto insert;
-
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ /* Search old style refs */
+ search_key.objectid = inode_objectid;
+ search_key.type = BTRFS_INODE_REF_KEY;
+ search_key.offset = parent_objectid;
+ ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret == 0) {
- char *victim_name;
- int victim_name_len;
struct btrfs_inode_ref *victim_ref;
unsigned long ptr;
unsigned long ptr_end;
- struct extent_buffer *leaf = path->nodes[0];
+
+ leaf = path->nodes[0];
/* are we trying to overwrite a back ref for the root directory
* if so, just jump out, we're done
*/
- if (key->objectid == key->offset)
- goto out_nowrite;
+ if (search_key.objectid == search_key.offset)
+ return 1;
/* check all the names in this back reference to see
* if they are in the log. if so, we allow them to stay
@@ -889,7 +851,9 @@ again:
(unsigned long)(victim_ref + 1),
victim_name_len);
- if (!backref_in_log(log, key, victim_name,
+ if (!backref_in_log(log_root, &search_key,
+ parent_objectid,
+ victim_name,
victim_name_len)) {
btrfs_inc_nlink(inode);
btrfs_release_path(path);
@@ -897,9 +861,14 @@ again:
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
victim_name_len);
+ BUG_ON(ret);
btrfs_run_delayed_items(trans, root);
+ kfree(victim_name);
+ *search_done = 1;
+ goto again;
}
kfree(victim_name);
+
ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
}
BUG_ON(ret);
@@ -908,14 +877,78 @@ again:
* NOTE: we have searched root tree and checked the
* coresponding ref, it does not need to check again.
*/
- search_done = 1;
+ *search_done = 1;
+ }
+ btrfs_release_path(path);
+
+ /* Same search but for extended refs */
+ extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
+ inode_objectid, parent_objectid, 0,
+ 0);
+ if (!IS_ERR_OR_NULL(extref)) {
+ u32 item_size;
+ u32 cur_offset = 0;
+ unsigned long base;
+ struct inode *victim_parent;
+
+ leaf = path->nodes[0];
+
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+
+ while (cur_offset < item_size) {
+ extref = (struct btrfs_inode_extref *)base + cur_offset;
+
+ victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
+
+ if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
+ goto next;
+
+ victim_name = kmalloc(victim_name_len, GFP_NOFS);
+ read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
+ victim_name_len);
+
+ search_key.objectid = inode_objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+ search_key.offset = btrfs_extref_hash(parent_objectid,
+ victim_name,
+ victim_name_len);
+ ret = 0;
+ if (!backref_in_log(log_root, &search_key,
+ parent_objectid, victim_name,
+ victim_name_len)) {
+ ret = -ENOENT;
+ victim_parent = read_one_inode(root,
+ parent_objectid);
+ if (victim_parent) {
+ btrfs_inc_nlink(inode);
+ btrfs_release_path(path);
+
+ ret = btrfs_unlink_inode(trans, root,
+ victim_parent,
+ inode,
+ victim_name,
+ victim_name_len);
+ btrfs_run_delayed_items(trans, root);
+ }
+ BUG_ON(ret);
+ iput(victim_parent);
+ kfree(victim_name);
+ *search_done = 1;
+ goto again;
+ }
+ kfree(victim_name);
+ BUG_ON(ret);
+next:
+ cur_offset += victim_name_len + sizeof(*extref);
+ }
+ *search_done = 1;
}
btrfs_release_path(path);
/* look for a conflicting sequence number */
di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
- btrfs_inode_ref_index(eb, ref),
- name, namelen, 0);
+ ref_index, name, namelen, 0);
if (di && !IS_ERR(di)) {
ret = drop_one_dir_item(trans, root, path, dir, di);
BUG_ON(ret);
@@ -931,25 +964,173 @@ again:
}
btrfs_release_path(path);
-insert:
- /* insert our name */
- ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
- btrfs_inode_ref_index(eb, ref));
- BUG_ON(ret);
+ return 0;
+}
- btrfs_update_inode(trans, root, inode);
+static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index,
+ u64 *parent_objectid)
+{
+ struct btrfs_inode_extref *extref;
-out:
- ref_ptr = (unsigned long)(ref + 1) + namelen;
- kfree(name);
- if (ref_ptr < ref_end)
- goto again;
+ extref = (struct btrfs_inode_extref *)ref_ptr;
+
+ *namelen = btrfs_inode_extref_name_len(eb, extref);
+ *name = kmalloc(*namelen, GFP_NOFS);
+ if (*name == NULL)
+ return -ENOMEM;
+
+ read_extent_buffer(eb, *name, (unsigned long)&extref->name,
+ *namelen);
+
+ *index = btrfs_inode_extref_index(eb, extref);
+ if (parent_objectid)
+ *parent_objectid = btrfs_inode_extref_parent(eb, extref);
+
+ return 0;
+}
+
+static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index)
+{
+ struct btrfs_inode_ref *ref;
+
+ ref = (struct btrfs_inode_ref *)ref_ptr;
+
+ *namelen = btrfs_inode_ref_name_len(eb, ref);
+ *name = kmalloc(*namelen, GFP_NOFS);
+ if (*name == NULL)
+ return -ENOMEM;
+
+ read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
+
+ *index = btrfs_inode_ref_index(eb, ref);
+
+ return 0;
+}
+
+/*
+ * replay one inode back reference item found in the log tree.
+ * eb, slot and key refer to the buffer and key found in the log tree.
+ * root is the destination we are replaying into, and path is for temp
+ * use by this function. (it should be released on return).
+ */
+static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_root *log,
+ struct btrfs_path *path,
+ struct extent_buffer *eb, int slot,
+ struct btrfs_key *key)
+{
+ struct inode *dir;
+ struct inode *inode;
+ unsigned long ref_ptr;
+ unsigned long ref_end;
+ char *name;
+ int namelen;
+ int ret;
+ int search_done = 0;
+ int log_ref_ver = 0;
+ u64 parent_objectid;
+ u64 inode_objectid;
+ u64 ref_index = 0;
+ int ref_struct_size;
+
+ ref_ptr = btrfs_item_ptr_offset(eb, slot);
+ ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
+
+ if (key->type == BTRFS_INODE_EXTREF_KEY) {
+ struct btrfs_inode_extref *r;
+
+ ref_struct_size = sizeof(struct btrfs_inode_extref);
+ log_ref_ver = 1;
+ r = (struct btrfs_inode_extref *)ref_ptr;
+ parent_objectid = btrfs_inode_extref_parent(eb, r);
+ } else {
+ ref_struct_size = sizeof(struct btrfs_inode_ref);
+ parent_objectid = key->offset;
+ }
+ inode_objectid = key->objectid;
+
+ /*
+ * it is possible that we didn't log all the parent directories
+ * for a given inode. If we don't find the dir, just don't
+ * copy the back ref in. The link count fixup code will take
+ * care of the rest
+ */
+ dir = read_one_inode(root, parent_objectid);
+ if (!dir)
+ return -ENOENT;
+
+ inode = read_one_inode(root, inode_objectid);
+ if (!inode) {
+ iput(dir);
+ return -EIO;
+ }
+
+ while (ref_ptr < ref_end) {
+ if (log_ref_ver) {
+ ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index, &parent_objectid);
+ /*
+ * parent object can change from one array
+ * item to another.
+ */
+ if (!dir)
+ dir = read_one_inode(root, parent_objectid);
+ if (!dir)
+ return -ENOENT;
+ } else {
+ ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index);
+ }
+ if (ret)
+ return ret;
+
+ /* if we already have a perfect match, we're done */
+ if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
+ ref_index, name, namelen)) {
+ /*
+ * look for a conflicting back reference in the
+ * metadata. if we find one we have to unlink that name
+ * of the file before we add our new link. Later on, we
+ * overwrite any existing back reference, and we don't
+ * want to create dangling pointers in the directory.
+ */
+
+ if (!search_done) {
+ ret = __add_inode_ref(trans, root, path, log,
+ dir, inode, eb,
+ inode_objectid,
+ parent_objectid,
+ ref_index, name, namelen,
+ &search_done);
+ if (ret == 1)
+ goto out;
+ BUG_ON(ret);
+ }
+
+ /* insert our name */
+ ret = btrfs_add_link(trans, dir, inode, name, namelen,
+ 0, ref_index);
+ BUG_ON(ret);
+
+ btrfs_update_inode(trans, root, inode);
+ }
+
+ ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
+ kfree(name);
+ if (log_ref_ver) {
+ iput(dir);
+ dir = NULL;
+ }
+ }
/* finally write the back reference in the inode */
ret = overwrite_item(trans, root, path, eb, slot, key);
BUG_ON(ret);
-out_nowrite:
+out:
btrfs_release_path(path);
iput(dir);
iput(inode);
@@ -966,25 +1147,55 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans,
return ret;
}
+static int count_inode_extrefs(struct btrfs_root *root,
+ struct inode *inode, struct btrfs_path *path)
+{
+ int ret = 0;
+ int name_len;
+ unsigned int nlink = 0;
+ u32 item_size;
+ u32 cur_offset = 0;
+ u64 inode_objectid = btrfs_ino(inode);
+ u64 offset = 0;
+ unsigned long ptr;
+ struct btrfs_inode_extref *extref;
+ struct extent_buffer *leaf;
-/*
- * There are a few corners where the link count of the file can't
- * be properly maintained during replay. So, instead of adding
- * lots of complexity to the log code, we just scan the backrefs
- * for any file that has been through replay.
- *
- * The scan will update the link count on the inode to reflect the
- * number of back refs found. If it goes down to zero, the iput
- * will free the inode.
- */
-static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode)
+ while (1) {
+ ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
+ &extref, &offset);
+ if (ret)
+ break;
+
+ leaf = path->nodes[0];
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+
+ while (cur_offset < item_size) {
+ extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
+ name_len = btrfs_inode_extref_name_len(leaf, extref);
+
+ nlink++;
+
+ cur_offset += name_len + sizeof(*extref);
+ }
+
+ offset++;
+ btrfs_release_path(path);
+ }
+ btrfs_release_path(path);
+
+ if (ret < 0)
+ return ret;
+ return nlink;
+}
+
+static int count_inode_refs(struct btrfs_root *root,
+ struct inode *inode, struct btrfs_path *path)
{
- struct btrfs_path *path;
int ret;
struct btrfs_key key;
- u64 nlink = 0;
+ unsigned int nlink = 0;
unsigned long ptr;
unsigned long ptr_end;
int name_len;
@@ -994,10 +1205,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -1031,6 +1238,50 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
}
btrfs_release_path(path);
+
+ return nlink;
+}
+
+/*
+ * There are a few corners where the link count of the file can't
+ * be properly maintained during replay. So, instead of adding
+ * lots of complexity to the log code, we just scan the backrefs
+ * for any file that has been through replay.
+ *
+ * The scan will update the link count on the inode to reflect the
+ * number of back refs found. If it goes down to zero, the iput
+ * will free the inode.
+ */
+static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode)
+{
+ struct btrfs_path *path;
+ int ret;
+ u64 nlink = 0;
+ u64 ino = btrfs_ino(inode);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = count_inode_refs(root, inode, path);
+ if (ret < 0)
+ goto out;
+
+ nlink = ret;
+
+ ret = count_inode_extrefs(root, inode, path);
+ if (ret == -ENOENT)
+ ret = 0;
+
+ if (ret < 0)
+ goto out;
+
+ nlink += ret;
+
+ ret = 0;
+
if (nlink != inode->i_nlink) {
set_nlink(inode, nlink);
btrfs_update_inode(trans, root, inode);
@@ -1046,9 +1297,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
ret = insert_orphan_item(trans, root, ino);
BUG_ON(ret);
}
- btrfs_free_path(path);
- return 0;
+out:
+ btrfs_free_path(path);
+ return ret;
}
static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
@@ -1695,6 +1947,10 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
ret = add_inode_ref(wc->trans, root, log, path,
eb, i, &key);
BUG_ON(ret && ret != -ENOENT);
+ } else if (key.type == BTRFS_INODE_EXTREF_KEY) {
+ ret = add_inode_ref(wc->trans, root, log, path,
+ eb, i, &key);
+ BUG_ON(ret && ret != -ENOENT);
} else if (key.type == BTRFS_EXTENT_DATA_KEY) {
ret = replay_one_extent(wc->trans, root, path,
eb, i, &key);
@@ -2037,7 +2293,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
wait_log_commit(trans, root, root->log_transid - 1);
while (1) {
- unsigned long batch = root->log_batch;
+ int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */
if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
mutex_unlock(&root->log_mutex);
@@ -2045,7 +2301,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_lock(&root->log_mutex);
}
wait_for_writer(trans, root);
- if (batch == root->log_batch)
+ if (batch == atomic_read(&root->log_batch))
break;
}
@@ -2074,7 +2330,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_root_node(&log->root_item, log->node);
- root->log_batch = 0;
root->log_transid++;
log->log_transid = root->log_transid;
root->log_start_pid = 0;
@@ -2087,7 +2342,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex);
mutex_lock(&log_root_tree->log_mutex);
- log_root_tree->log_batch++;
+ atomic_inc(&log_root_tree->log_batch);
atomic_inc(&log_root_tree->log_writers);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2157,7 +2412,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
btrfs_header_level(log_root_tree->node));
- log_root_tree->log_batch = 0;
log_root_tree->log_transid++;
smp_mb();
@@ -2171,9 +2425,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* in and cause problems either.
*/
btrfs_scrub_pause_super(root);
- write_ctree_super(trans, root->fs_info->tree_root, 1);
+ ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
btrfs_scrub_continue_super(root);
- ret = 0;
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_wake_log_root;
+ }
mutex_lock(&root->log_mutex);
if (root->last_log_commit < log_transid)
@@ -2209,7 +2466,8 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
+ 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
+ NULL);
if (ret)
break;
@@ -2646,6 +2904,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
+ int start_slot;
key.objectid = objectid;
key.type = max_key_type;
@@ -2667,8 +2926,18 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
if (found_key.objectid != objectid)
break;
- ret = btrfs_del_item(trans, log, path);
- if (ret)
+ found_key.offset = 0;
+ found_key.type = 0;
+ ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
+ &start_slot);
+
+ ret = btrfs_del_items(trans, log, path, start_slot,
+ path->slots[0] - start_slot + 1);
+ /*
+ * If start slot isn't 0 then we don't need to re-search, we've
+ * found the last guy with the objectid in this tree.
+ */
+ if (ret || start_slot != 0)
break;
btrfs_release_path(path);
}
@@ -2678,14 +2947,64 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
return ret;
}
+static void fill_inode_item(struct btrfs_trans_handle *trans,
+ struct extent_buffer *leaf,
+ struct btrfs_inode_item *item,
+ struct inode *inode, int log_inode_only)
+{
+ btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
+ btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
+ btrfs_set_inode_mode(leaf, item, inode->i_mode);
+ btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+
+ btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_sec);
+ btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_nsec);
+
+ btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_sec);
+ btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_nsec);
+
+ btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_sec);
+ btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_nsec);
+
+ btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
+
+ btrfs_set_inode_sequence(leaf, item, inode->i_version);
+ btrfs_set_inode_transid(leaf, item, trans->transid);
+ btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
+ btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
+ btrfs_set_inode_block_group(leaf, item, 0);
+
+ if (log_inode_only) {
+ /* set the generation to zero so the recover code
+ * can tell the difference between an logging
+ * just to say 'this inode exists' and a logging
+ * to say 'update this inode with these values'
+ */
+ btrfs_set_inode_generation(leaf, item, 0);
+ btrfs_set_inode_size(leaf, item, 0);
+ } else {
+ btrfs_set_inode_generation(leaf, item,
+ BTRFS_I(inode)->generation);
+ btrfs_set_inode_size(leaf, item, inode->i_size);
+ }
+
+}
+
static noinline int copy_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *log,
+ struct inode *inode,
struct btrfs_path *dst_path,
struct extent_buffer *src,
int start_slot, int nr, int inode_only)
{
unsigned long src_offset;
unsigned long dst_offset;
+ struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
struct btrfs_file_extent_item *extent;
struct btrfs_inode_item *inode_item;
int ret;
@@ -2694,6 +3013,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
char *ins_data;
int i;
struct list_head ordered_sums;
+ int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
INIT_LIST_HEAD(&ordered_sums);
@@ -2722,29 +3042,23 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
src_offset = btrfs_item_ptr_offset(src, start_slot + i);
- copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
- src_offset, ins_sizes[i]);
-
- if (inode_only == LOG_INODE_EXISTS &&
- ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
+ if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
inode_item = btrfs_item_ptr(dst_path->nodes[0],
dst_path->slots[0],
struct btrfs_inode_item);
- btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
-
- /* set the generation to zero so the recover code
- * can tell the difference between an logging
- * just to say 'this inode exists' and a logging
- * to say 'update this inode with these values'
- */
- btrfs_set_inode_generation(dst_path->nodes[0],
- inode_item, 0);
+ fill_inode_item(trans, dst_path->nodes[0], inode_item,
+ inode, inode_only == LOG_INODE_EXISTS);
+ } else {
+ copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
+ src_offset, ins_sizes[i]);
}
+
/* take a reference on file data extents so that truncates
* or deletes of this inode don't have to relog the inode
* again
*/
- if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
+ if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
+ !skip_csum) {
int found_type;
extent = btrfs_item_ptr(src, start_slot + i,
struct btrfs_file_extent_item);
@@ -2753,8 +3067,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
continue;
found_type = btrfs_file_extent_type(src, extent);
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ if (found_type == BTRFS_FILE_EXTENT_REG) {
u64 ds, dl, cs, cl;
ds = btrfs_file_extent_disk_bytenr(src,
extent);
@@ -2803,6 +3116,239 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
return ret;
}
+static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct extent_map *em1, *em2;
+
+ em1 = list_entry(a, struct extent_map, list);
+ em2 = list_entry(b, struct extent_map, list);
+
+ if (em1->start < em2->start)
+ return -1;
+ else if (em1->start > em2->start)
+ return 1;
+ return 0;
+}
+
+struct log_args {
+ struct extent_buffer *src;
+ u64 next_offset;
+ int start_slot;
+ int nr;
+};
+
+static int log_one_extent(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct btrfs_root *root,
+ struct extent_map *em, struct btrfs_path *path,
+ struct btrfs_path *dst_path, struct log_args *args)
+{
+ struct btrfs_root *log = root->log_root;
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_key key;
+ u64 start = em->mod_start;
+ u64 search_start = start;
+ u64 len = em->mod_len;
+ u64 num_bytes;
+ int nritems;
+ int ret;
+
+ if (BTRFS_I(inode)->logged_trans == trans->transid) {
+ ret = __btrfs_drop_extents(trans, log, inode, dst_path, start,
+ start + len, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
+ while (len) {
+ if (args->nr)
+ goto next_slot;
+again:
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = search_start;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ /*
+ * A rare case were we can have an em for a section of a
+ * larger extent so we need to make sure that this em
+ * falls within the extent we've found. If not we just
+ * bail and go back to ye-olde way of doing things but
+ * it happens often enough in testing that we need to do
+ * this dance to make sure.
+ */
+ do {
+ if (path->slots[0] == 0) {
+ btrfs_release_path(path);
+ if (search_start == 0)
+ return -ENOENT;
+ search_start--;
+ goto again;
+ }
+
+ path->slots[0]--;
+ btrfs_item_key_to_cpu(path->nodes[0], &key,
+ path->slots[0]);
+ if (key.objectid != btrfs_ino(inode) ||
+ key.type != BTRFS_EXTENT_DATA_KEY) {
+ btrfs_release_path(path);
+ return -ENOENT;
+ }
+ } while (key.offset > start);
+
+ fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ num_bytes = btrfs_file_extent_num_bytes(path->nodes[0],
+ fi);
+ if (key.offset + num_bytes <= start) {
+ btrfs_release_path(path);
+ return -ENOENT;
+ }
+ }
+ args->src = path->nodes[0];
+next_slot:
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ fi = btrfs_item_ptr(args->src, path->slots[0],
+ struct btrfs_file_extent_item);
+ if (args->nr &&
+ args->start_slot + args->nr == path->slots[0]) {
+ args->nr++;
+ } else if (args->nr) {
+ ret = copy_items(trans, inode, dst_path, args->src,
+ args->start_slot, args->nr,
+ LOG_INODE_ALL);
+ if (ret)
+ return ret;
+ args->nr = 1;
+ args->start_slot = path->slots[0];
+ } else if (!args->nr) {
+ args->nr = 1;
+ args->start_slot = path->slots[0];
+ }
+ nritems = btrfs_header_nritems(path->nodes[0]);
+ path->slots[0]++;
+ num_bytes = btrfs_file_extent_num_bytes(args->src, fi);
+ if (len < num_bytes) {
+ /* I _think_ this is ok, envision we write to a
+ * preallocated space that is adjacent to a previously
+ * written preallocated space that gets merged when we
+ * mark this preallocated space written. If we do not
+ * have the adjacent extent in cache then when we copy
+ * this extent it could end up being larger than our EM
+ * thinks it is, which is a-ok, so just set len to 0.
+ */
+ len = 0;
+ } else {
+ len -= num_bytes;
+ }
+ start = key.offset + num_bytes;
+ args->next_offset = start;
+ search_start = start;
+
+ if (path->slots[0] < nritems) {
+ if (len)
+ goto next_slot;
+ break;
+ }
+
+ if (args->nr) {
+ ret = copy_items(trans, inode, dst_path, args->src,
+ args->start_slot, args->nr,
+ LOG_INODE_ALL);
+ if (ret)
+ return ret;
+ args->nr = 0;
+ btrfs_release_path(path);
+ }
+ }
+
+ return 0;
+}
+
+static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode,
+ struct btrfs_path *path,
+ struct btrfs_path *dst_path)
+{
+ struct log_args args;
+ struct extent_map *em, *n;
+ struct list_head extents;
+ struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
+ u64 test_gen;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&extents);
+
+ memset(&args, 0, sizeof(args));
+
+ write_lock(&tree->lock);
+ test_gen = root->fs_info->last_trans_committed;
+
+ list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
+ list_del_init(&em->list);
+ if (em->generation <= test_gen)
+ continue;
+ /* Need a ref to keep it from getting evicted from cache */
+ atomic_inc(&em->refs);
+ set_bit(EXTENT_FLAG_LOGGING, &em->flags);
+ list_add_tail(&em->list, &extents);
+ }
+
+ list_sort(NULL, &extents, extent_cmp);
+
+ while (!list_empty(&extents)) {
+ em = list_entry(extents.next, struct extent_map, list);
+
+ list_del_init(&em->list);
+ clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
+
+ /*
+ * If we had an error we just need to delete everybody from our
+ * private list.
+ */
+ if (ret) {
+ free_extent_map(em);
+ continue;
+ }
+
+ write_unlock(&tree->lock);
+
+ /*
+ * If the previous EM and the last extent we left off on aren't
+ * sequential then we need to copy the items we have and redo
+ * our search
+ */
+ if (args.nr && em->mod_start != args.next_offset) {
+ ret = copy_items(trans, inode, dst_path, args.src,
+ args.start_slot, args.nr,
+ LOG_INODE_ALL);
+ if (ret) {
+ free_extent_map(em);
+ write_lock(&tree->lock);
+ continue;
+ }
+ btrfs_release_path(path);
+ args.nr = 0;
+ }
+
+ ret = log_one_extent(trans, inode, root, em, path, dst_path, &args);
+ free_extent_map(em);
+ write_lock(&tree->lock);
+ }
+ WARN_ON(!list_empty(&extents));
+ write_unlock(&tree->lock);
+
+ if (!ret && args.nr)
+ ret = copy_items(trans, inode, dst_path, args.src,
+ args.start_slot, args.nr, LOG_INODE_ALL);
+ btrfs_release_path(path);
+ return ret;
+}
+
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@@ -2832,6 +3378,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
int nritems;
int ins_start_slot = 0;
int ins_nr;
+ bool fast_search = false;
u64 ino = btrfs_ino(inode);
log = root->log_root;
@@ -2851,21 +3398,23 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
max_key.objectid = ino;
- /* today the code can only do partial logging of directories */
- if (!S_ISDIR(inode->i_mode))
- inode_only = LOG_INODE_ALL;
+ /* today the code can only do partial logging of directories */
if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
max_key.type = BTRFS_XATTR_ITEM_KEY;
else
max_key.type = (u8)-1;
max_key.offset = (u64)-1;
- ret = btrfs_commit_inode_delayed_items(trans, inode);
- if (ret) {
- btrfs_free_path(path);
- btrfs_free_path(dst_path);
- return ret;
+ /* Only run delayed items if we are a dir or a new file */
+ if (S_ISDIR(inode->i_mode) ||
+ BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+ if (ret) {
+ btrfs_free_path(path);
+ btrfs_free_path(dst_path);
+ return ret;
+ }
}
mutex_lock(&BTRFS_I(inode)->log_mutex);
@@ -2881,7 +3430,16 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
max_key_type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino, max_key_type);
} else {
- ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
+ if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ ret = btrfs_truncate_inode_items(trans, log,
+ inode, 0, 0);
+ } else {
+ fast_search = true;
+ max_key.type = BTRFS_XATTR_ITEM_KEY;
+ ret = drop_objectid_items(trans, log, path, ino,
+ BTRFS_XATTR_ITEM_KEY);
+ }
}
if (ret) {
err = ret;
@@ -2912,7 +3470,7 @@ again:
goto next_slot;
}
- ret = copy_items(trans, log, dst_path, src, ins_start_slot,
+ ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
ins_nr, inode_only);
if (ret) {
err = ret;
@@ -2930,7 +3488,7 @@ next_slot:
goto again;
}
if (ins_nr) {
- ret = copy_items(trans, log, dst_path, src,
+ ret = copy_items(trans, inode, dst_path, src,
ins_start_slot,
ins_nr, inode_only);
if (ret) {
@@ -2951,8 +3509,7 @@ next_slot:
break;
}
if (ins_nr) {
- ret = copy_items(trans, log, dst_path, src,
- ins_start_slot,
+ ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
ins_nr, inode_only);
if (ret) {
err = ret;
@@ -2960,7 +3517,24 @@ next_slot:
}
ins_nr = 0;
}
- WARN_ON(ins_nr);
+
+ if (fast_search) {
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
+ ret = btrfs_log_changed_extents(trans, root, inode, path,
+ dst_path);
+ if (ret) {
+ err = ret;
+ goto out_unlock;
+ }
+ } else {
+ struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *em, *n;
+
+ list_for_each_entry_safe(em, n, &tree->modified_extents, list)
+ list_del_init(&em->list);
+ }
+
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
btrfs_release_path(path);
btrfs_release_path(dst_path);
@@ -2971,6 +3545,7 @@ next_slot:
}
}
BTRFS_I(inode)->logged_trans = trans->transid;
+ BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
out_unlock:
mutex_unlock(&BTRFS_I(inode)->log_mutex);
@@ -3138,7 +3713,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans:
dput(old_parent);
if (ret < 0) {
- BUG_ON(ret != -ENOSPC);
+ WARN_ON(ret != -ENOSPC);
root->fs_info->last_trans_log_full_commit = trans->transid;
ret = 1;
}
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index ab942f46b3dd..99be4c138db6 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -143,14 +143,13 @@ EXPORT_SYMBOL(ulist_free);
* In case of allocation failure -ENOMEM is returned and the ulist stays
* unaltered.
*/
-int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
- gfp_t gfp_mask)
+int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
{
return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
}
-int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
- unsigned long *old_aux, gfp_t gfp_mask)
+int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
+ u64 *old_aux, gfp_t gfp_mask)
{
int i;
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index 21bdc8ec8130..21a1963439c3 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -33,7 +33,7 @@ struct ulist_iterator {
*/
struct ulist_node {
u64 val; /* value to store */
- unsigned long aux; /* auxiliary value saved along with the val */
+ u64 aux; /* auxiliary value saved along with the val */
};
struct ulist {
@@ -65,10 +65,9 @@ void ulist_fini(struct ulist *ulist);
void ulist_reinit(struct ulist *ulist);
struct ulist *ulist_alloc(gfp_t gfp_mask);
void ulist_free(struct ulist *ulist);
-int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
- gfp_t gfp_mask);
-int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
- unsigned long *old_aux, gfp_t gfp_mask);
+int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
+int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
+ u64 *old_aux, gfp_t gfp_mask);
struct ulist_node *ulist_next(struct ulist *ulist,
struct ulist_iterator *uiter);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 88b969aeeb71..0f5ebb72a5ea 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -639,7 +639,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
bdev = blkdev_get_by_path(device->name->str, flags, holder);
if (IS_ERR(bdev)) {
- printk(KERN_INFO "open %s failed\n", device->name->str);
+ printk(KERN_INFO "btrfs: open %s failed\n", device->name->str);
goto error;
}
filemap_write_and_wait(bdev->bd_inode->i_mapping);
@@ -1475,6 +1475,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
free_fs_devices(cur_devices);
}
+ root->fs_info->num_tolerated_disk_barrier_failures =
+ btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
+
/*
* at this point, the device is zero sized. We want to
* remove it from the devices list and zero out the old super
@@ -1775,15 +1778,21 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if (seeding_dev) {
ret = init_first_rw_device(trans, root, device);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
goto error_trans;
+ }
ret = btrfs_finish_sprout(trans, root);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
goto error_trans;
+ }
} else {
ret = btrfs_add_device(trans, root, device);
- if (ret)
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
goto error_trans;
+ }
}
/*
@@ -1793,6 +1802,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
btrfs_clear_space_info_full(root->fs_info);
unlock_chunks(root);
+ root->fs_info->num_tolerated_disk_barrier_failures =
+ btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
ret = btrfs_commit_transaction(trans, root);
if (seeding_dev) {
@@ -1808,13 +1819,19 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
"Failed to relocate sys chunks after "
"device initialization. This can be fixed "
"using the \"btrfs balance\" command.");
+ trans = btrfs_attach_transaction(root);
+ if (IS_ERR(trans)) {
+ if (PTR_ERR(trans) == -ENOENT)
+ return 0;
+ return PTR_ERR(trans);
+ }
+ ret = btrfs_commit_transaction(trans, root);
}
return ret;
error_trans:
unlock_chunks(root);
- btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
rcu_string_free(device->name);
kfree(device);
@@ -2804,6 +2821,26 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
}
+ if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ int num_tolerated_disk_barrier_failures;
+ u64 target = bctl->sys.target;
+
+ num_tolerated_disk_barrier_failures =
+ btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
+ if (num_tolerated_disk_barrier_failures > 0 &&
+ (target &
+ (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
+ num_tolerated_disk_barrier_failures = 0;
+ else if (num_tolerated_disk_barrier_failures > 1 &&
+ (target &
+ (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
+ num_tolerated_disk_barrier_failures = 1;
+
+ fs_info->num_tolerated_disk_barrier_failures =
+ num_tolerated_disk_barrier_failures;
+ }
+
ret = insert_balance_item(fs_info->tree_root, bctl);
if (ret && ret != -EEXIST)
goto out;
@@ -2836,6 +2873,11 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
__cancel_balance(fs_info);
}
+ if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ fs_info->num_tolerated_disk_barrier_failures =
+ btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
+ }
+
wake_up(&fs_info->balance_wait_q);
return ret;
@@ -3608,12 +3650,16 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
&sys_chunk_size, &sys_stripe_size,
sys_chunk_offset, alloc_profile);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
ret = btrfs_add_device(trans, fs_info->chunk_root, device);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
/*
* Modifying chunk tree needs allocating new blocks from both
@@ -3623,19 +3669,19 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
*/
ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
chunk_size, stripe_size);
- if (ret)
- goto abort;
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
+ }
ret = __finish_chunk_alloc(trans, extent_root, sys_map,
sys_chunk_offset, sys_chunk_size,
sys_stripe_size);
if (ret)
- goto abort;
+ btrfs_abort_transaction(trans, root, ret);
- return 0;
+out:
-abort:
- btrfs_abort_transaction(trans, root, ret);
return ret;
}
@@ -3760,7 +3806,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
read_unlock(&em_tree->lock);
if (!em) {
- printk(KERN_CRIT "unable to find logical %llu len %llu\n",
+ printk(KERN_CRIT "btrfs: unable to find logical %llu len %llu\n",
(unsigned long long)logical,
(unsigned long long)*length);
BUG();
@@ -4217,7 +4263,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
total_devs = bbio->num_stripes;
if (map_length < length) {
- printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
+ printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
"len %llu\n", (unsigned long long)logical,
(unsigned long long)length,
(unsigned long long)map_length);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 92c20654cc55..9acb846c3e7f 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -97,7 +97,7 @@ static int zlib_compress_pages(struct list_head *ws,
*total_in = 0;
if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) {
- printk(KERN_WARNING "deflateInit failed\n");
+ printk(KERN_WARNING "btrfs: deflateInit failed\n");
ret = -1;
goto out;
}
@@ -125,7 +125,7 @@ static int zlib_compress_pages(struct list_head *ws,
while (workspace->def_strm.total_in < len) {
ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH);
if (ret != Z_OK) {
- printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
+ printk(KERN_DEBUG "btrfs: deflate in loop returned %d\n",
ret);
zlib_deflateEnd(&workspace->def_strm);
ret = -1;
@@ -252,7 +252,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
}
if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
- printk(KERN_WARNING "inflateInit failed\n");
+ printk(KERN_WARNING "btrfs: inflateInit failed\n");
return -1;
}
while (workspace->inf_strm.total_in < srclen) {
@@ -336,7 +336,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
}
if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) {
- printk(KERN_WARNING "inflateInit failed\n");
+ printk(KERN_WARNING "btrfs: inflateInit failed\n");
return -1;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 58e2e7b77372..b5f044283edb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2312,12 +2312,6 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
loff_t size;
int ret;
- /*
- * Update file times before taking page lock. We may end up failing the
- * fault so this update may be superfluous but who really cares...
- */
- file_update_time(vma->vm_file);
-
lock_page(page);
size = i_size_read(inode);
if ((page->mapping != inode->i_mapping) ||
@@ -2355,6 +2349,13 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
sb_start_pagefault(sb);
+
+ /*
+ * Update file times before taking page lock. We may end up failing the
+ * fault so this update may be superfluous but who really cares...
+ */
+ file_update_time(vma->vm_file);
+
ret = __block_page_mkwrite(vma, vmf, get_block);
sb_end_pagefault(sb);
return block_page_mkwrite_return(ret);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 452e71a1b753..6690269f5dde 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -205,7 +205,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
- page->index << PAGE_CACHE_SHIFT, &len,
+ (u64) page_offset(page), &len,
ci->i_truncate_seq, ci->i_truncate_size,
&page, 1, 0);
if (err == -ENOENT)
@@ -286,7 +286,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
int nr_pages = 0;
int ret;
- off = page->index << PAGE_CACHE_SHIFT;
+ off = (u64) page_offset(page);
/* count pages */
next_index = page->index;
@@ -308,8 +308,8 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
NULL, 0,
ci->i_truncate_seq, ci->i_truncate_size,
NULL, false, 1, 0);
- if (!req)
- return -ENOMEM;
+ if (IS_ERR(req))
+ return PTR_ERR(req);
/* build page vector */
nr_pages = len >> PAGE_CACHE_SHIFT;
@@ -426,7 +426,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
struct ceph_inode_info *ci;
struct ceph_fs_client *fsc;
struct ceph_osd_client *osdc;
- loff_t page_off = page->index << PAGE_CACHE_SHIFT;
+ loff_t page_off = page_offset(page);
int len = PAGE_CACHE_SIZE;
loff_t i_size;
int err = 0;
@@ -817,8 +817,7 @@ get_more_pages:
/* ok */
if (locked_pages == 0) {
/* prepare async write request */
- offset = (unsigned long long)page->index
- << PAGE_CACHE_SHIFT;
+ offset = (u64) page_offset(page);
len = wsize;
req = ceph_osdc_new_request(&fsc->client->osdc,
&ci->i_layout,
@@ -832,8 +831,8 @@ get_more_pages:
ci->i_truncate_size,
&inode->i_mtime, true, 1, 0);
- if (!req) {
- rc = -ENOMEM;
+ if (IS_ERR(req)) {
+ rc = PTR_ERR(req);
unlock_page(page);
break;
}
@@ -1180,7 +1179,7 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct page *page = vmf->page;
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
- loff_t off = page->index << PAGE_CACHE_SHIFT;
+ loff_t off = page_offset(page);
loff_t size, len;
int ret;
@@ -1225,6 +1224,7 @@ out:
static struct vm_operations_struct ceph_vmops = {
.fault = filemap_fault,
.page_mkwrite = ceph_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
int ceph_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1235,6 +1235,5 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ceph_vmops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 620daad201db..3251e9cc6401 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1005,7 +1005,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
head = msg->front.iov_base;
- head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
+ le32_add_cpu(&head->num, 1);
item = msg->front.iov_base + msg->front.iov_len;
item->ino = cpu_to_le64(ino);
item->cap_id = cpu_to_le64(cap_id);
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 8e1b60e557b6..9349bb37a2fe 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -90,6 +90,8 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
*max_len = handle_length;
type = 255;
}
+ if (dentry)
+ dput(dentry);
return type;
}
@@ -99,7 +101,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
* FIXME: we should try harder by querying the mds for the ino.
*/
static struct dentry *__fh_to_dentry(struct super_block *sb,
- struct ceph_nfs_fh *fh)
+ struct ceph_nfs_fh *fh, int fh_len)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
struct inode *inode;
@@ -107,6 +109,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
struct ceph_vino vino;
int err;
+ if (fh_len < sizeof(*fh) / 4)
+ return ERR_PTR(-ESTALE);
+
dout("__fh_to_dentry %llx\n", fh->ino);
vino.ino = fh->ino;
vino.snap = CEPH_NOSNAP;
@@ -150,7 +155,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
* convert connectable fh to dentry
*/
static struct dentry *__cfh_to_dentry(struct super_block *sb,
- struct ceph_nfs_confh *cfh)
+ struct ceph_nfs_confh *cfh, int fh_len)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
struct inode *inode;
@@ -158,6 +163,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
struct ceph_vino vino;
int err;
+ if (fh_len < sizeof(*cfh) / 4)
+ return ERR_PTR(-ESTALE);
+
dout("__cfh_to_dentry %llx (%llx/%x)\n",
cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
@@ -207,9 +215,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
if (fh_type == 1)
- return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
+ return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
+ fh_len);
else
- return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
+ return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
+ fh_len);
}
/*
@@ -230,6 +240,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
if (fh_type == 1)
return ERR_PTR(-ESTALE);
+ if (fh_len < sizeof(*cfh) / 4)
+ return ERR_PTR(-ESTALE);
pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
cfh->parent_name_hash);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ecebbc09bfc7..5840d2aaed15 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -536,8 +536,8 @@ more:
do_sync,
ci->i_truncate_seq, ci->i_truncate_size,
&mtime, false, 2, page_align);
- if (!req)
- return -ENOMEM;
+ if (IS_ERR(req))
+ return PTR_ERR(req);
if (file->f_flags & O_DIRECT) {
pages = ceph_get_direct_page_vector(data, num_pages, false);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 1396ceb46797..36549a46e311 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -187,14 +187,18 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
u64 tmp;
struct ceph_object_layout ol;
struct ceph_pg pgid;
+ int r;
/* copy and validate */
if (copy_from_user(&dl, arg, sizeof(dl)))
return -EFAULT;
down_read(&osdc->map_sem);
- ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
- &dl.object_no, &dl.object_offset, &olen);
+ r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
+ &dl.object_no, &dl.object_offset,
+ &olen);
+ if (r < 0)
+ return -EIO;
dl.file_offset -= dl.object_offset;
dl.object_size = ceph_file_layout_object_size(ci->i_layout);
dl.block_size = ceph_file_layout_su(ci->i_layout);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a5a735422aa7..1bcf712655d9 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2625,7 +2625,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
session_state_name(s->s_state));
- if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
+ if (i >= newmap->m_max_mds ||
+ memcmp(ceph_mdsmap_get_addr(oldmap, i),
ceph_mdsmap_get_addr(newmap, i),
sizeof(struct ceph_entity_addr))) {
if (s->s_state == CEPH_MDS_SESSION_OPENING) {
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 3a42d9326378..2eb43f211325 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -307,7 +307,10 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
{
struct ceph_mount_options *fsopt;
const char *dev_name_end;
- int err = -ENOMEM;
+ int err;
+
+ if (!dev_name || !*dev_name)
+ return -EINVAL;
fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
if (!fsopt)
@@ -328,21 +331,33 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
fsopt->congestion_kb = default_congestion_kb();
- /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
+ /*
+ * Distinguish the server list from the path in "dev_name".
+ * Internally we do not include the leading '/' in the path.
+ *
+ * "dev_name" will look like:
+ * <server_spec>[,<server_spec>...]:[<path>]
+ * where
+ * <server_spec> is <ip>[:<port>]
+ * <path> is optional, but if present must begin with '/'
+ */
+ dev_name_end = strchr(dev_name, '/');
+ if (dev_name_end) {
+ /* skip over leading '/' for path */
+ *path = dev_name_end + 1;
+ } else {
+ /* path is empty */
+ dev_name_end = dev_name + strlen(dev_name);
+ *path = dev_name_end;
+ }
err = -EINVAL;
- if (!dev_name)
- goto out;
- *path = strstr(dev_name, ":/");
- if (*path == NULL) {
- pr_err("device name is missing path (no :/ in %s)\n",
+ dev_name_end--; /* back up to ':' separator */
+ if (*dev_name_end != ':') {
+ pr_err("device name is missing path (no : separator in %s)\n",
dev_name);
goto out;
}
- dev_name_end = *path;
dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
-
- /* path on server */
- *path += 2;
dout("server path '%s'\n", *path);
*popt = ceph_parse_options(options, dev_name, dev_name_end,
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 3f152b92a94a..afc2bb691780 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -471,9 +471,19 @@ static int exact_lock(dev_t dev, void *data)
*/
int cdev_add(struct cdev *p, dev_t dev, unsigned count)
{
+ int error;
+
p->dev = dev;
p->count = count;
- return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
+
+ error = kobj_map(cdev_map, dev, count, NULL,
+ exact_match, exact_lock, p);
+ if (error)
+ return error;
+
+ kobject_get(p->kobj.parent);
+
+ return 0;
}
static void cdev_unmap(dev_t dev, unsigned count)
@@ -498,14 +508,20 @@ void cdev_del(struct cdev *p)
static void cdev_default_release(struct kobject *kobj)
{
struct cdev *p = container_of(kobj, struct cdev, kobj);
+ struct kobject *parent = kobj->parent;
+
cdev_purge(p);
+ kobject_put(parent);
}
static void cdev_dynamic_release(struct kobject *kobj)
{
struct cdev *p = container_of(kobj, struct cdev, kobj);
+ struct kobject *parent = kobj->parent;
+
cdev_purge(p);
kfree(p);
+ kobject_put(parent);
}
static struct kobj_type ktype_cdev_default = {
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index e622863b292f..086f381d6489 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -31,18 +31,18 @@
/* create a new cifs key */
static int
-cifs_spnego_key_instantiate(struct key *key, const void *data, size_t datalen)
+cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
char *payload;
int ret;
ret = -ENOMEM;
- payload = kmalloc(datalen, GFP_KERNEL);
+ payload = kmalloc(prep->datalen, GFP_KERNEL);
if (!payload)
goto error;
/* attach the data */
- memcpy(payload, data, datalen);
+ memcpy(payload, prep->data, prep->datalen);
key->payload.data = payload;
ret = 0;
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 53cf2aabce87..71d5d0a5f6b2 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -203,6 +203,27 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
int i;
wchar_t wchar_to; /* needed to quiet sparse */
+ /* special case for utf8 to handle no plane0 chars */
+ if (!strcmp(codepage->charset, "utf8")) {
+ /*
+ * convert utf8 -> utf16, we assume we have enough space
+ * as caller should have assumed conversion does not overflow
+ * in destination len is length in wchar_t units (16bits)
+ */
+ i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
+ (wchar_t *) to, len);
+
+ /* if success terminate and exit */
+ if (i >= 0)
+ goto success;
+ /*
+ * if fails fall back to UCS encoding as this
+ * function should not return negative values
+ * currently can fail only if source contains
+ * invalid encoded characters
+ */
+ }
+
for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
charlen = codepage->char2uni(from, len, &wchar_to);
if (charlen < 1) {
@@ -215,6 +236,7 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
put_unaligned_le16(wchar_to, &to[i]);
}
+success:
put_unaligned_le16(0, &to[i]);
return i;
}
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 2ee5c54797fa..fc783e264420 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -167,17 +167,17 @@ static struct shrinker cifs_shrinker = {
};
static int
-cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
+cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
char *payload;
- payload = kmalloc(datalen, GFP_KERNEL);
+ payload = kmalloc(prep->datalen, GFP_KERNEL);
if (!payload)
return -ENOMEM;
- memcpy(payload, data, datalen);
+ memcpy(payload, prep->data, prep->datalen);
key->payload.data = payload;
- key->datalen = datalen;
+ key->datalen = prep->datalen;
return 0;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2fdbe08a7a23..5c670b998ffb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -67,6 +67,7 @@ enum {
/* Mount options that take no arguments */
Opt_user_xattr, Opt_nouser_xattr,
Opt_forceuid, Opt_noforceuid,
+ Opt_forcegid, Opt_noforcegid,
Opt_noblocksend, Opt_noautotune,
Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
Opt_mapchars, Opt_nomapchars, Opt_sfu,
@@ -117,6 +118,8 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_nouser_xattr, "nouser_xattr" },
{ Opt_forceuid, "forceuid" },
{ Opt_noforceuid, "noforceuid" },
+ { Opt_forcegid, "forcegid" },
+ { Opt_noforcegid, "noforcegid" },
{ Opt_noblocksend, "noblocksend" },
{ Opt_noautotune, "noautotune" },
{ Opt_hard, "hard" },
@@ -1195,6 +1198,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_noforceuid:
override_uid = 0;
break;
+ case Opt_forcegid:
+ override_gid = 1;
+ break;
+ case Opt_noforcegid:
+ override_gid = 0;
+ break;
case Opt_noblocksend:
vol->noblocksnd = 1;
break;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7d7bbdc4c8e7..edb25b4bbb95 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3003,6 +3003,7 @@ cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static struct vm_operations_struct cifs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = cifs_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 2126ab185045..76d974c952fe 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -183,6 +183,12 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
n_vec - first_vec, remaining);
if (rc == -ENOSPC || rc == -EAGAIN) {
+ /*
+ * Catch if a low level driver returns -ENOSPC. This
+ * WARN_ON will be removed by 3.10 if no one reports
+ * seeing this.
+ */
+ WARN_ON_ONCE(rc == -ENOSPC);
i++;
if (i >= 14 || (!server->noblocksnd && (i > 2))) {
cERROR(1, "sends on sock %p stuck for 15 "
diff --git a/fs/compat.c b/fs/compat.c
index b7a24d0ca30d..015e1e1f87c6 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -776,16 +776,16 @@ asmlinkage long compat_sys_mount(const char __user * dev_name,
char *kernel_type;
unsigned long data_page;
char *kernel_dev;
- char *dir_page;
+ struct filename *dir;
int retval;
retval = copy_mount_string(type, &kernel_type);
if (retval < 0)
goto out;
- dir_page = getname(dir_name);
- retval = PTR_ERR(dir_page);
- if (IS_ERR(dir_page))
+ dir = getname(dir_name);
+ retval = PTR_ERR(dir);
+ if (IS_ERR(dir))
goto out1;
retval = copy_mount_string(dev_name, &kernel_dev);
@@ -807,7 +807,7 @@ asmlinkage long compat_sys_mount(const char __user * dev_name,
}
}
- retval = do_mount(kernel_dev, dir_page, kernel_type,
+ retval = do_mount(kernel_dev, dir->name, kernel_type,
flags, (void*)data_page);
out4:
@@ -815,7 +815,7 @@ asmlinkage long compat_sys_mount(const char __user * dev_name,
out3:
kfree(kernel_dev);
out2:
- putname(dir_page);
+ putname(dir);
out1:
kfree(kernel_type);
out:
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 112e45a17e99..a81147e2e4ef 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -38,6 +38,13 @@
#define elf_addr_t Elf32_Addr
/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t compat_long_t
+#define user_siginfo_t compat_siginfo_t
+#define copy_siginfo_to_user copy_siginfo_to_user32
+
+/*
* The machine-dependent core note format types are defined in elfcore-compat.h,
* which requires asm/elf.h to define compat_elf_gregset_t et al.
*/
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index f5054025f9da..4c6285fff598 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
err = get_user(palp, &up->palette);
err |= get_user(length, &up->length);
+ if (err)
+ return -EFAULT;
up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
err = put_user(compat_ptr(palp), &up_native->palette);
diff --git a/fs/coredump.c b/fs/coredump.c
index f045bbad6822..ce47379bfa61 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -14,6 +14,7 @@
#include <linux/key.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
+#include <linux/coredump.h>
#include <linux/utsname.h>
#include <linux/pid_namespace.h>
#include <linux/module.h>
@@ -39,6 +40,7 @@
#include <trace/events/task.h>
#include "internal.h"
+#include "coredump.h"
#include <trace/events/sched.h>
@@ -147,7 +149,7 @@ put_exe_file:
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
-static int format_corename(struct core_name *cn, long signr)
+static int format_corename(struct core_name *cn, struct coredump_params *cprm)
{
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
@@ -192,9 +194,13 @@ static int format_corename(struct core_name *cn, long signr)
case 'g':
err = cn_printf(cn, "%d", cred->gid);
break;
+ case 'd':
+ err = cn_printf(cn, "%d",
+ __get_dumpable(cprm->mm_flags));
+ break;
/* signal that caused the coredump */
case 's':
- err = cn_printf(cn, "%ld", signr);
+ err = cn_printf(cn, "%ld", cprm->siginfo->si_signo);
break;
/* UNIX time of coredump */
case 't': {
@@ -444,14 +450,15 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
cp->file = files[1];
- replace_fd(0, files[0], 0);
+ err = replace_fd(0, files[0], 0);
+ fput(files[0]);
/* and disallow core files too */
current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
- return 0;
+ return err;
}
-void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
{
struct core_state core_state;
struct core_name cn;
@@ -466,7 +473,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
bool need_nonrelative = false;
static atomic_t core_dump_count = ATOMIC_INIT(0);
struct coredump_params cprm = {
- .signr = signr,
+ .siginfo = siginfo,
.regs = regs,
.limit = rlimit(RLIMIT_CORE),
/*
@@ -477,7 +484,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
.mm_flags = mm->flags,
};
- audit_core_dumps(signr);
+ audit_core_dumps(siginfo->si_signo);
binfmt = mm->binfmt;
if (!binfmt || !binfmt->core_dump)
@@ -501,7 +508,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
need_nonrelative = true;
}
- retval = coredump_wait(exit_code, &core_state);
+ retval = coredump_wait(siginfo->si_signo, &core_state);
if (retval < 0)
goto fail_creds;
@@ -513,7 +520,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
clear_thread_flag(TIF_SIGPENDING);
- ispipe = format_corename(&cn, signr);
+ ispipe = format_corename(&cn, &cprm);
if (ispipe) {
int dump_count;
diff --git a/fs/coredump.h b/fs/coredump.h
new file mode 100644
index 000000000000..e39ff072110d
--- /dev/null
+++ b/fs/coredump.h
@@ -0,0 +1,6 @@
+#ifndef _FS_COREDUMP_H
+#define _FS_COREDUMP_H
+
+extern int __get_dumpable(unsigned long mm_flags);
+
+#endif
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index cd96649bfe62..da72250ddc1c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -346,7 +346,7 @@ static inline struct epitem *ep_item_from_epqueue(poll_table *p)
/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
static inline int ep_op_has_event(int op)
{
- return op != EPOLL_CTL_DEL;
+ return op == EPOLL_CTL_ADD || op == EPOLL_CTL_MOD;
}
/* Initialize the poll safe wake up structure */
@@ -676,6 +676,34 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
return 0;
}
+/*
+ * Disables a "struct epitem" in the eventpoll set. Returns -EBUSY if the item
+ * had no event flags set, indicating that another thread may be currently
+ * handling that item's events (in the case that EPOLLONESHOT was being
+ * used). Otherwise a zero result indicates that the item has been disabled
+ * from receiving events. A disabled item may be re-enabled via
+ * EPOLL_CTL_MOD. Must be called with "mtx" held.
+ */
+static int ep_disable(struct eventpoll *ep, struct epitem *epi)
+{
+ int result = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ if (epi->event.events & ~EP_PRIVATE_BITS) {
+ if (ep_is_linked(&epi->rdllink))
+ list_del_init(&epi->rdllink);
+ /* Ensure ep_poll_callback will not add epi back onto ready
+ list: */
+ epi->event.events &= EP_PRIVATE_BITS;
+ }
+ else
+ result = -EBUSY;
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ return result;
+}
+
static void ep_free(struct eventpoll *ep)
{
struct rb_node *rbp;
@@ -1020,8 +1048,6 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
rb_insert_color(&epi->rbn, &ep->rbr);
}
-
-
#define PATH_ARR_SIZE 5
/*
* These are the number paths of length 1 to 5, that we are allowing to emanate
@@ -1787,6 +1813,12 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
} else
error = -ENOENT;
break;
+ case EPOLL_CTL_DISABLE:
+ if (epi)
+ error = ep_disable(ep, epi);
+ else
+ error = -ENOENT;
+ break;
}
mutex_unlock(&ep->mtx);
diff --git a/fs/exec.c b/fs/exec.c
index 48fb26ef8a1b..0039055b1fc6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -59,10 +59,10 @@
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
-#include <asm/exec.h>
#include <trace/events/task.h>
#include "internal.h"
+#include "coredump.h"
#include <trace/events/sched.h>
@@ -105,7 +105,7 @@ static inline void put_binfmt(struct linux_binfmt * fmt)
SYSCALL_DEFINE1(uselib, const char __user *, library)
{
struct file *file;
- char *tmp = getname(library);
+ struct filename *tmp = getname(library);
int error = PTR_ERR(tmp);
static const struct open_flags uselib_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
@@ -391,7 +391,7 @@ struct user_arg_ptr {
union {
const char __user *const __user *native;
#ifdef CONFIG_COMPAT
- compat_uptr_t __user *compat;
+ const compat_uptr_t __user *compat;
#endif
} ptr;
};
@@ -602,7 +602,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* process cleanup to remove whatever mess we made.
*/
if (length != move_page_tables(vma, old_start,
- vma, new_start, length))
+ vma, new_start, length, false))
return -ENOMEM;
lru_add_drain();
@@ -751,13 +751,14 @@ struct file *open_exec(const char *name)
{
struct file *file;
int err;
+ struct filename tmp = { .name = name };
static const struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC | MAY_OPEN,
.intent = LOOKUP_OPEN
};
- file = do_filp_open(AT_FDCWD, name, &open_exec_flags, LOOKUP_FOLLOW);
+ file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags, LOOKUP_FOLLOW);
if (IS_ERR(file))
goto out;
@@ -877,9 +878,11 @@ static int de_thread(struct task_struct *tsk)
sig->notify_count--;
while (sig->notify_count) {
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_KILLABLE);
spin_unlock_irq(lock);
schedule();
+ if (unlikely(__fatal_signal_pending(tsk)))
+ goto killed;
spin_lock_irq(lock);
}
spin_unlock_irq(lock);
@@ -897,9 +900,11 @@ static int de_thread(struct task_struct *tsk)
write_lock_irq(&tasklist_lock);
if (likely(leader->exit_state))
break;
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
schedule();
+ if (unlikely(__fatal_signal_pending(tsk)))
+ goto killed;
}
/*
@@ -993,6 +998,14 @@ no_thread_group:
BUG_ON(!thread_group_leader(tsk));
return 0;
+
+killed:
+ /* protects against exit_notify() and __exit_signal() */
+ read_lock(&tasklist_lock);
+ sig->group_exit_task = NULL;
+ sig->notify_count = 0;
+ read_unlock(&tasklist_lock);
+ return -EAGAIN;
}
char *get_task_comm(char *buf, struct task_struct *tsk)
@@ -1070,7 +1083,8 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
set_fs(USER_DS);
- current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD);
+ current->flags &=
+ ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE);
flush_thread();
current->personality &= ~bprm->per_clear;
@@ -1096,7 +1110,7 @@ void setup_new_exec(struct linux_binprm * bprm)
current->sas_ss_sp = current->sas_ss_size = 0;
if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
- set_dumpable(current->mm, 1);
+ set_dumpable(current->mm, SUID_DUMPABLE_ENABLED);
else
set_dumpable(current->mm, suid_dumpable);
@@ -1561,9 +1575,9 @@ int do_execve(const char *filename,
}
#ifdef CONFIG_COMPAT
-int compat_do_execve(char *filename,
- compat_uptr_t __user *__argv,
- compat_uptr_t __user *__envp,
+int compat_do_execve(const char *filename,
+ const compat_uptr_t __user *__argv,
+ const compat_uptr_t __user *__envp,
struct pt_regs *regs)
{
struct user_arg_ptr argv = {
@@ -1645,3 +1659,56 @@ int get_dumpable(struct mm_struct *mm)
{
return __get_dumpable(mm->flags);
}
+
+#ifdef __ARCH_WANT_SYS_EXECVE
+SYSCALL_DEFINE3(execve,
+ const char __user *, filename,
+ const char __user *const __user *, argv,
+ const char __user *const __user *, envp)
+{
+ struct filename *path = getname(filename);
+ int error = PTR_ERR(path);
+ if (!IS_ERR(path)) {
+ error = do_execve(path->name, argv, envp, current_pt_regs());
+ putname(path);
+ }
+ return error;
+}
+#ifdef CONFIG_COMPAT
+asmlinkage long compat_sys_execve(const char __user * filename,
+ const compat_uptr_t __user * argv,
+ const compat_uptr_t __user * envp)
+{
+ struct filename *path = getname(filename);
+ int error = PTR_ERR(path);
+ if (!IS_ERR(path)) {
+ error = compat_do_execve(path->name, argv, envp,
+ current_pt_regs());
+ putname(path);
+ }
+ return error;
+}
+#endif
+#endif
+
+#ifdef __ARCH_WANT_KERNEL_EXECVE
+int kernel_execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[])
+{
+ struct pt_regs *p = current_pt_regs();
+ int ret;
+
+ ret = do_execve(filename,
+ (const char __user *const __user *)argv,
+ (const char __user *const __user *)envp, p);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We were successful. We won't be returning to our caller, but
+ * instead to user space by manipulating the kernel stack.
+ */
+ ret_from_kernel_execve(p);
+}
+#endif
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 1585db1aa365..f936cb50dc0d 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -814,8 +814,8 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
struct bio *bio;
if (per_dev != master_dev) {
- bio = bio_kmalloc(GFP_KERNEL,
- master_dev->bio->bi_max_vecs);
+ bio = bio_clone_kmalloc(master_dev->bio,
+ GFP_KERNEL);
if (unlikely(!bio)) {
ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
@@ -824,7 +824,6 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
goto out;
}
- __bio_clone(bio, master_dev->bio);
bio->bi_bdev = NULL;
bio->bi_next = NULL;
per_dev->offset = master_dev->offset;
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
index 5f376d14fdcc..b963f38ac298 100644
--- a/fs/exofs/ore_raid.c
+++ b/fs/exofs/ore_raid.c
@@ -203,7 +203,7 @@ static unsigned _sp2d_min_pg(struct __stripe_pages_2d *sp2d)
static unsigned _sp2d_max_pg(struct __stripe_pages_2d *sp2d)
{
- unsigned p;
+ int p;
for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 59e3bbfac0b1..5e59280d42d7 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -389,8 +389,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
if (unlikely(ret))
goto out;
- lock_super(sb);
-
ios->length = offsetof(struct exofs_fscb, s_dev_table_oid);
memset(fscb, 0, ios->length);
fscb->s_nextid = cpu_to_le64(sbi->s_nextid);
@@ -406,8 +404,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
if (unlikely(ret))
EXOFS_ERR("%s: ore_write failed.\n", __func__);
-
- unlock_super(sb);
out:
EXOFS_DBGMSG("s_nextid=0x%llx ret=%d\n", _LLU(sbi->s_nextid), ret);
ore_put_io_state(ios);
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
index 5a7b691e748b..1b4f2f95fc37 100644
--- a/fs/exofs/sys.c
+++ b/fs/exofs/sys.c
@@ -80,8 +80,13 @@ static ssize_t uri_show(struct exofs_dev *edp, char *buf)
static ssize_t uri_store(struct exofs_dev *edp, const char *buf, size_t len)
{
+ uint8_t *new_uri;
+
edp->urilen = strlen(buf) + 1;
- edp->uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
+ new_uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
+ if (new_uri == NULL)
+ return -ENOMEM;
+ edp->uri = new_uri;
strncpy(edp->uri, buf, edp->urilen);
return edp->urilen;
}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 6c205d0c565b..fa04d023177e 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -469,7 +469,7 @@ static int parse_options(char *options, struct super_block *sb)
uid = make_kuid(current_user_ns(), option);
if (!uid_valid(uid)) {
ext2_msg(sb, KERN_ERR, "Invalid uid value %d", option);
- return -1;
+ return 0;
}
sbi->s_resuid = uid;
@@ -480,7 +480,7 @@ static int parse_options(char *options, struct super_block *sb)
gid = make_kgid(current_user_ns(), option);
if (!gid_valid(gid)) {
ext2_msg(sb, KERN_ERR, "Invalid gid value %d", option);
- return -1;
+ return 0;
}
sbi->s_resgid = gid;
break;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 8f4fddac01a6..890b8947c546 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -46,8 +46,7 @@ static struct buffer_head *ext3_append(handle_t *handle,
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
- bh = ext3_bread(handle, inode, *block, 1, err);
- if (bh) {
+ if ((bh = ext3_dir_bread(handle, inode, *block, 1, err))) {
inode->i_size += inode->i_sb->s_blocksize;
EXT3_I(inode)->i_disksize = inode->i_size;
*err = ext3_journal_get_write_access(handle, bh);
@@ -339,8 +338,10 @@ dx_probe(struct qstr *entry, struct inode *dir,
u32 hash;
frame->bh = NULL;
- if (!(bh = ext3_bread (NULL,dir, 0, 0, err)))
+ if (!(bh = ext3_dir_bread(NULL, dir, 0, 0, err))) {
+ *err = ERR_BAD_DX_DIR;
goto fail;
+ }
root = (struct dx_root *) bh->b_data;
if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 &&
@@ -436,8 +437,10 @@ dx_probe(struct qstr *entry, struct inode *dir,
frame->entries = entries;
frame->at = at;
if (!indirect--) return frame;
- if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
+ if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(at), 0, err))) {
+ *err = ERR_BAD_DX_DIR;
goto fail2;
+ }
at = entries = ((struct dx_node *) bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit (dir)) {
ext3_warning(dir->i_sb, __func__,
@@ -535,8 +538,8 @@ static int ext3_htree_next_block(struct inode *dir, __u32 hash,
* block so no check is necessary
*/
while (num_frames--) {
- if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at),
- 0, &err)))
+ if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(p->at),
+ 0, &err)))
return err; /* Failure */
p++;
brelse (p->bh);
@@ -559,10 +562,11 @@ static int htree_dirblock_to_tree(struct file *dir_file,
{
struct buffer_head *bh;
struct ext3_dir_entry_2 *de, *top;
- int err, count = 0;
+ int err = 0, count = 0;
dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
- if (!(bh = ext3_bread (NULL, dir, block, 0, &err)))
+
+ if (!(bh = ext3_dir_bread(NULL, dir, block, 0, &err)))
return err;
de = (struct ext3_dir_entry_2 *) bh->b_data;
@@ -976,7 +980,7 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
return NULL;
do {
block = dx_get_block(frame->at);
- if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
+ if (!(bh = ext3_dir_bread (NULL, dir, block, 0, err)))
goto errout;
retval = search_dirblock(bh, dir, entry,
@@ -1458,9 +1462,9 @@ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
}
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0; block < blocks; block++) {
- bh = ext3_bread(handle, dir, block, 0, &retval);
- if(!bh)
+ if (!(bh = ext3_dir_bread(handle, dir, block, 0, &retval)))
return retval;
+
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
if (retval != -ENOSPC)
return retval;
@@ -1500,7 +1504,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
entries = frame->entries;
at = frame->at;
- if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
+ if (!(bh = ext3_dir_bread(handle, dir, dx_get_block(frame->at), 0, &err)))
goto cleanup;
BUFFER_TRACE(bh, "get_write_access");
@@ -1790,8 +1794,7 @@ retry:
inode->i_op = &ext3_dir_inode_operations;
inode->i_fop = &ext3_dir_operations;
inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- dir_block = ext3_bread (handle, inode, 0, 1, &err);
- if (!dir_block)
+ if (!(dir_block = ext3_dir_bread(handle, inode, 0, 1, &err)))
goto out_clear_inode;
BUFFER_TRACE(dir_block, "get_write_access");
@@ -1859,7 +1862,7 @@ static int empty_dir (struct inode * inode)
sb = inode->i_sb;
if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
- !(bh = ext3_bread (NULL, inode, 0, 0, &err))) {
+ !(bh = ext3_dir_bread(NULL, inode, 0, 0, &err))) {
if (err)
ext3_error(inode->i_sb, __func__,
"error %d reading directory #%lu offset 0",
@@ -1890,9 +1893,8 @@ static int empty_dir (struct inode * inode)
(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
err = 0;
brelse (bh);
- bh = ext3_bread (NULL, inode,
- offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err);
- if (!bh) {
+ if (!(bh = ext3_dir_bread (NULL, inode,
+ offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err))) {
if (err)
ext3_error(sb, __func__,
"error %d reading directory"
@@ -2388,7 +2390,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
goto end_rename;
}
retval = -EIO;
- dir_bh = ext3_bread (handle, old_inode, 0, 0, &retval);
+ dir_bh = ext3_dir_bread(handle, old_inode, 0, 0, &retval);
if (!dir_bh)
goto end_rename;
if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
diff --git a/fs/ext3/namei.h b/fs/ext3/namei.h
index f2ce2b0065c9..46304d8c9f0a 100644
--- a/fs/ext3/namei.h
+++ b/fs/ext3/namei.h
@@ -6,3 +6,22 @@
*/
extern struct dentry *ext3_get_parent(struct dentry *child);
+
+static inline struct buffer_head *ext3_dir_bread(handle_t *handle,
+ struct inode *inode,
+ int block, int create,
+ int *err)
+{
+ struct buffer_head *bh;
+
+ bh = ext3_bread(handle, inode, block, create, err);
+
+ if (!bh && !(*err)) {
+ *err = -EIO;
+ ext3_error(inode->i_sb, __func__,
+ "Directory hole detected on inode %lu\n",
+ inode->i_ino);
+ return NULL;
+ }
+ return bh;
+}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 17ae5c83d234..5366393528df 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1001,7 +1001,7 @@ static int parse_options (char *options, struct super_block *sb,
uid = make_kuid(current_user_ns(), option);
if (!uid_valid(uid)) {
ext3_msg(sb, KERN_ERR, "Invalid uid value %d", option);
- return -1;
+ return 0;
}
sbi->s_resuid = uid;
@@ -1012,7 +1012,7 @@ static int parse_options (char *options, struct super_block *sb,
gid = make_kgid(current_user_ns(), option);
if (!gid_valid(gid)) {
ext3_msg(sb, KERN_ERR, "Invalid gid value %d", option);
- return -1;
+ return 0;
}
sbi->s_resgid = gid;
break;
@@ -2578,11 +2578,9 @@ out:
static int ext3_unfreeze(struct super_block *sb)
{
if (!(sb->s_flags & MS_RDONLY)) {
- lock_super(sb);
/* Reser the needs_recovery flag before the fs is unlocked. */
EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
- unlock_super(sb);
journal_unlock_updates(EXT3_SB(sb)->s_journal);
}
return 0;
@@ -2602,7 +2600,6 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
#endif
/* Store the original options */
- lock_super(sb);
old_sb_flags = sb->s_flags;
old_opts.s_mount_opt = sbi->s_mount_opt;
old_opts.s_resuid = sbi->s_resuid;
@@ -2708,8 +2705,6 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
old_opts.s_qf_names[i] != sbi->s_qf_names[i])
kfree(old_opts.s_qf_names[i]);
#endif
- unlock_super(sb);
-
if (enable_quota)
dquot_resume(sb, -1);
return 0;
@@ -2728,7 +2723,6 @@ restore_opts:
sbi->s_qf_names[i] = old_opts.s_qf_names[i];
}
#endif
- unlock_super(sb);
return err;
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 1b5089067d01..cf1821784a16 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -174,8 +174,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
ext4_free_inodes_set(sb, gdp, 0);
ext4_itable_unused_set(sb, gdp, 0);
memset(bh->b_data, 0xff, sb->s_blocksize);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
return;
}
memset(bh->b_data, 0, sb->s_blocksize);
@@ -212,8 +211,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
*/
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
sb->s_blocksize * 8, bh->b_data);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
}
@@ -350,7 +348,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
return;
}
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
- desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) {
+ desc, bh))) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
return;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index 5c2d1813ebe9..3285aa5a706a 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -58,11 +58,12 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
__u32 hi;
__u32 provided, calculated;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
@@ -84,8 +85,9 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz)
+ struct buffer_head *bh)
{
+ int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
__u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c3411d4ce2da..3c20de1d59d0 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -186,7 +186,6 @@ struct mpage_da_data {
#define EXT4_IO_END_ERROR 0x0002
#define EXT4_IO_END_QUEUED 0x0004
#define EXT4_IO_END_DIRECT 0x0008
-#define EXT4_IO_END_IN_FSYNC 0x0010
struct ext4_io_page {
struct page *p_page;
@@ -912,9 +911,7 @@ struct ext4_inode_info {
struct list_head i_completed_io_list;
spinlock_t i_completed_io_lock;
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
- /* current io_end structure for async DIO write*/
- ext4_io_end_t *cur_aio_dio;
- atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */
+ atomic_t i_unwritten; /* Nr. of inflight conversions pending */
spinlock_t i_block_reservation_lock;
@@ -1233,6 +1230,7 @@ struct ext4_sb_info {
spinlock_t s_md_lock;
unsigned short *s_mb_offsets;
unsigned int *s_mb_maxs;
+ unsigned int s_group_info_size;
/* tunables */
unsigned long s_stripe;
@@ -1243,6 +1241,7 @@ struct ext4_sb_info {
unsigned int s_mb_order2_reqs;
unsigned int s_mb_group_prealloc;
unsigned int s_max_writeback_mb_bump;
+ unsigned int s_max_dir_size_kb;
/* where last allocation was done - for stream allocation */
unsigned long s_mb_last_group;
unsigned long s_mb_last_start;
@@ -1270,8 +1269,12 @@ struct ext4_sb_info {
unsigned long s_sectors_written_start;
u64 s_kbytes_written;
+ /* the size of zero-out chunk */
+ unsigned int s_extent_max_zeroout_kb;
+
unsigned int s_log_groups_per_flex;
struct flex_groups *s_flex_groups;
+ ext4_group_t s_flex_groups_allocated;
/* workqueue for dio unwritten */
struct workqueue_struct *dio_unwritten_wq;
@@ -1328,10 +1331,20 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode,
{
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
io_end->flag |= EXT4_IO_END_UNWRITTEN;
- atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
+ atomic_inc(&EXT4_I(inode)->i_unwritten);
}
}
+static inline ext4_io_end_t *ext4_inode_aio(struct inode *inode)
+{
+ return inode->i_private;
+}
+
+static inline void ext4_inode_aio_set(struct inode *inode, ext4_io_end_t *io)
+{
+ inode->i_private = io;
+}
+
/*
* Inode dynamic state flags
*/
@@ -1345,6 +1358,8 @@ enum {
EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
EXT4_STATE_NEWENTRY, /* File just added to dir */
EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */
+ EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read
+ nolocking */
};
#define EXT4_INODE_BIT_FNS(name, field, offset) \
@@ -1867,10 +1882,10 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct buffer_head *bh, int sz);
void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
- struct buffer_head *bh, int sz);
+ struct buffer_head *bh);
/* balloc.c */
extern void ext4_validate_block_bitmap(struct super_block *sb,
@@ -1932,7 +1947,7 @@ extern void ext4_htree_free_dir_info(struct dir_private_info *p);
/* fsync.c */
extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
-extern int ext4_flush_completed_IO(struct inode *);
+extern int ext4_flush_unwritten_io(struct inode *);
/* hash.c */
extern int ext4fs_dirhash(const char *name, int len, struct
@@ -1966,6 +1981,8 @@ extern void ext4_exit_mballoc(void);
extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t block,
unsigned long count, int flags);
+extern int ext4_mb_alloc_groupinfo(struct super_block *sb,
+ ext4_group_t ngroups);
extern int ext4_mb_add_groupinfo(struct super_block *sb,
ext4_group_t i, struct ext4_group_desc *desc);
extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
@@ -2046,11 +2063,12 @@ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
extern int ext4_calculate_overhead(struct super_block *sb);
extern int ext4_superblock_csum_verify(struct super_block *sb,
struct ext4_super_block *es);
-extern void ext4_superblock_csum_set(struct super_block *sb,
- struct ext4_super_block *es);
+extern void ext4_superblock_csum_set(struct super_block *sb);
extern void *ext4_kvmalloc(size_t size, gfp_t flags);
extern void *ext4_kvzalloc(size_t size, gfp_t flags);
extern void ext4_kvfree(void *ptr);
+extern int ext4_alloc_flex_bg_array(struct super_block *sb,
+ ext4_group_t ngroup);
extern __printf(4, 5)
void __ext4_error(struct super_block *, const char *, unsigned int,
const char *, ...);
@@ -2352,6 +2370,7 @@ extern const struct file_operations ext4_dir_operations;
extern const struct inode_operations ext4_file_inode_operations;
extern const struct file_operations ext4_file_operations;
extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
+extern void ext4_unwritten_wait(struct inode *inode);
/* namei.c */
extern const struct inode_operations ext4_dir_inode_operations;
@@ -2400,11 +2419,11 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
/* page-io.c */
extern int __init ext4_init_pageio(void);
+extern void ext4_add_complete_io(ext4_io_end_t *io_end);
extern void ext4_exit_pageio(void);
extern void ext4_ioend_wait(struct inode *);
extern void ext4_free_io_end(ext4_io_end_t *io);
extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
-extern int ext4_end_io_nolock(ext4_io_end_t *io);
extern void ext4_io_submit(struct ext4_io_submit *io);
extern int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
@@ -2452,6 +2471,21 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
}
+/*
+ * Disable DIO read nolock optimization, so new dioreaders will be forced
+ * to grab i_mutex
+ */
+static inline void ext4_inode_block_unlocked_dio(struct inode *inode)
+{
+ ext4_set_inode_state(inode, EXT4_STATE_DIOREAD_LOCK);
+ smp_mb();
+}
+static inline void ext4_inode_resume_unlocked_dio(struct inode *inode)
+{
+ smp_mb();
+ ext4_clear_inode_state(inode, EXT4_STATE_DIOREAD_LOCK);
+}
+
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
/* For ioend & aio unwritten conversion wait queues */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index bfa65b49d424..b4323ba846b5 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -143,17 +143,13 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
int err = 0;
+ ext4_superblock_csum_set(sb);
if (ext4_handle_valid(handle)) {
- ext4_superblock_csum_set(sb,
- (struct ext4_super_block *)bh->b_data);
err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
- } else {
- ext4_superblock_csum_set(sb,
- (struct ext4_super_block *)bh->b_data);
+ } else
mark_buffer_dirty(bh);
- }
return err;
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index aabbb3f53683..7011ac967208 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -52,6 +52,9 @@
#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
+#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
+#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
+
static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh)
{
@@ -1177,7 +1180,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
- neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
+ le16_add_cpu(&neh->eh_depth, 1);
ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
@@ -1656,16 +1659,60 @@ static int ext4_ext_try_to_merge_right(struct inode *inode,
}
/*
+ * This function does a very simple check to see if we can collapse
+ * an extent tree with a single extent tree leaf block into the inode.
+ */
+static void ext4_ext_try_to_merge_up(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path)
+{
+ size_t s;
+ unsigned max_root = ext4_ext_space_root(inode, 0);
+ ext4_fsblk_t blk;
+
+ if ((path[0].p_depth != 1) ||
+ (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
+ (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
+ return;
+
+ /*
+ * We need to modify the block allocation bitmap and the block
+ * group descriptor to release the extent tree block. If we
+ * can't get the journal credits, give up.
+ */
+ if (ext4_journal_extend(handle, 2))
+ return;
+
+ /*
+ * Copy the extent data up to the inode
+ */
+ blk = ext4_idx_pblock(path[0].p_idx);
+ s = le16_to_cpu(path[1].p_hdr->eh_entries) *
+ sizeof(struct ext4_extent_idx);
+ s += sizeof(struct ext4_extent_header);
+
+ memcpy(path[0].p_hdr, path[1].p_hdr, s);
+ path[0].p_depth = 0;
+ path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
+ (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
+ path[0].p_hdr->eh_max = cpu_to_le16(max_root);
+
+ brelse(path[1].p_bh);
+ ext4_free_blocks(handle, inode, NULL, blk, 1,
+ EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+}
+
+/*
* This function tries to merge the @ex extent to neighbours in the tree.
* return 1 if merge left else 0.
*/
-static int ext4_ext_try_to_merge(struct inode *inode,
+static void ext4_ext_try_to_merge(handle_t *handle,
+ struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex) {
struct ext4_extent_header *eh;
unsigned int depth;
int merge_done = 0;
- int ret = 0;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
@@ -1675,9 +1722,9 @@ static int ext4_ext_try_to_merge(struct inode *inode,
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
if (!merge_done)
- ret = ext4_ext_try_to_merge_right(inode, path, ex);
+ (void) ext4_ext_try_to_merge_right(inode, path, ex);
- return ret;
+ ext4_ext_try_to_merge_up(handle, inode, path);
}
/*
@@ -1893,7 +1940,7 @@ has_space:
merge:
/* try to merge extents */
if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
- ext4_ext_try_to_merge(inode, path, nearex);
+ ext4_ext_try_to_merge(handle, inode, path, nearex);
/* time to correct all indexes above */
@@ -1901,7 +1948,7 @@ merge:
if (err)
goto cleanup;
- err = ext4_ext_dirty(handle, inode, path + depth);
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
cleanup:
if (npath) {
@@ -2092,13 +2139,10 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
}
/*
- * ext4_ext_check_cache()
+ * ext4_ext_in_cache()
* Checks to see if the given block is in the cache.
* If it is, the cached extent is stored in the given
- * cache extent pointer. If the cached extent is a hole,
- * this routine should be used instead of
- * ext4_ext_in_cache if the calling function needs to
- * know the size of the hole.
+ * cache extent pointer.
*
* @inode: The files inode
* @block: The block to look for in the cache
@@ -2107,8 +2151,10 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
*
* Return 0 if cache is invalid; 1 if the cache is valid
*/
-static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
- struct ext4_ext_cache *ex){
+static int
+ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
+ struct ext4_extent *ex)
+{
struct ext4_ext_cache *cex;
struct ext4_sb_info *sbi;
int ret = 0;
@@ -2125,7 +2171,9 @@ static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
goto errout;
if (in_range(block, cex->ec_block, cex->ec_len)) {
- memcpy(ex, cex, sizeof(struct ext4_ext_cache));
+ ex->ee_block = cpu_to_le32(cex->ec_block);
+ ext4_ext_store_pblock(ex, cex->ec_start);
+ ex->ee_len = cpu_to_le16(cex->ec_len);
ext_debug("%u cached by %u:%u:%llu\n",
block,
cex->ec_block, cex->ec_len, cex->ec_start);
@@ -2138,37 +2186,6 @@ errout:
}
/*
- * ext4_ext_in_cache()
- * Checks to see if the given block is in the cache.
- * If it is, the cached extent is stored in the given
- * extent pointer.
- *
- * @inode: The files inode
- * @block: The block to look for in the cache
- * @ex: Pointer where the cached extent will be stored
- * if it contains block
- *
- * Return 0 if cache is invalid; 1 if the cache is valid
- */
-static int
-ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
- struct ext4_extent *ex)
-{
- struct ext4_ext_cache cex;
- int ret = 0;
-
- if (ext4_ext_check_cache(inode, block, &cex)) {
- ex->ee_block = cpu_to_le32(cex.ec_block);
- ext4_ext_store_pblock(ex, cex.ec_start);
- ex->ee_len = cpu_to_le16(cex.ec_len);
- ret = 1;
- }
-
- return ret;
-}
-
-
-/*
* ext4_ext_rm_idx:
* removes index from the index block.
*/
@@ -2274,10 +2291,13 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned short ee_len = ext4_ext_get_actual_len(ex);
ext4_fsblk_t pblk;
- int flags = EXT4_FREE_BLOCKS_FORGET;
+ int flags = 0;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- flags |= EXT4_FREE_BLOCKS_METADATA;
+ flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
+ else if (ext4_should_journal_data(inode))
+ flags |= EXT4_FREE_BLOCKS_FORGET;
+
/*
* For bigalloc file systems, we never free a partial cluster
* at the beginning of the extent. Instead, we make a note
@@ -2572,7 +2592,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
struct ext4_ext_path *path = NULL;
ext4_fsblk_t partial_cluster = 0;
handle_t *handle;
- int i = 0, err;
+ int i = 0, err = 0;
ext_debug("truncate since %u to %u\n", start, end);
@@ -2604,12 +2624,16 @@ again:
return PTR_ERR(path);
}
depth = ext_depth(inode);
+ /* Leaf not may not exist only if inode has no blocks at all */
ex = path[depth].p_ext;
if (!ex) {
- ext4_ext_drop_refs(path);
- kfree(path);
- path = NULL;
- goto cont;
+ if (depth) {
+ EXT4_ERROR_INODE(inode,
+ "path[%d].p_hdr == NULL",
+ depth);
+ err = -EIO;
+ }
+ goto out;
}
ee_block = le32_to_cpu(ex->ee_block);
@@ -2641,8 +2665,6 @@ again:
goto out;
}
}
-cont:
-
/*
* We start scanning from right side, freeing all the blocks
* after i_size and walking into the tree depth-wise.
@@ -2895,6 +2917,9 @@ static int ext4_split_extent_at(handle_t *handle,
unsigned int ee_len, depth;
int err = 0;
+ BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
+ (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
+
ext_debug("ext4_split_extents_at: inode %lu, logical"
"block %llu\n", inode->i_ino, (unsigned long long)split);
@@ -2924,9 +2949,9 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_mark_initialized(ex);
if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
- ext4_ext_try_to_merge(inode, path, ex);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
- err = ext4_ext_dirty(handle, inode, path + depth);
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
@@ -2953,13 +2978,20 @@ static int ext4_split_extent_at(handle_t *handle,
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
- err = ext4_ext_zeroout(inode, &orig_ex);
+ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
+ if (split_flag & EXT4_EXT_DATA_VALID1)
+ err = ext4_ext_zeroout(inode, ex2);
+ else
+ err = ext4_ext_zeroout(inode, ex);
+ } else
+ err = ext4_ext_zeroout(inode, &orig_ex);
+
if (err)
goto fix_extent_len;
/* update the extent length and mark as initialized */
ex->ee_len = cpu_to_le16(ee_len);
- ext4_ext_try_to_merge(inode, path, ex);
- err = ext4_ext_dirty(handle, inode, path + depth);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
} else if (err)
goto fix_extent_len;
@@ -3006,12 +3038,13 @@ static int ext4_split_extent(handle_t *handle,
uninitialized = ext4_ext_is_uninitialized(ex);
if (map->m_lblk + map->m_len < ee_block + ee_len) {
- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
- EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
EXT4_EXT_MARK_UNINIT2;
+ if (split_flag & EXT4_EXT_DATA_VALID2)
+ split_flag1 |= EXT4_EXT_DATA_VALID1;
err = ext4_split_extent_at(handle, inode, path,
map->m_lblk + map->m_len, split_flag1, flags1);
if (err)
@@ -3024,8 +3057,8 @@ static int ext4_split_extent(handle_t *handle,
return PTR_ERR(path);
if (map->m_lblk >= ee_block) {
- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
- EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
+ EXT4_EXT_DATA_VALID2);
if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1;
if (split_flag & EXT4_EXT_MARK_UNINIT2)
@@ -3041,7 +3074,6 @@ out:
return err ? err : map->m_len;
}
-#define EXT4_EXT_ZERO_LEN 7
/*
* This function is called by ext4_ext_map_blocks() if someone tries to write
* to an uninitialized extent. It may result in splitting the uninitialized
@@ -3067,13 +3099,14 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
struct ext4_map_blocks *map,
struct ext4_ext_path *path)
{
+ struct ext4_sb_info *sbi;
struct ext4_extent_header *eh;
struct ext4_map_blocks split_map;
struct ext4_extent zero_ex;
struct ext4_extent *ex;
ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth;
- int allocated;
+ int allocated, max_zeroout = 0;
int err = 0;
int split_flag = 0;
@@ -3081,6 +3114,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)map->m_lblk, map->m_len);
+ sbi = EXT4_SB(inode->i_sb);
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
@@ -3180,9 +3214,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
*/
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
- /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
- if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
- (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+ if (EXT4_EXT_MAY_ZEROOUT & split_flag)
+ max_zeroout = sbi->s_extent_max_zeroout_kb >>
+ inode->i_sb->s_blocksize_bits;
+
+ /* If extent is less than s_max_zeroout_kb, zeroout directly */
+ if (max_zeroout && (ee_len <= max_zeroout)) {
err = ext4_ext_zeroout(inode, ex);
if (err)
goto out;
@@ -3191,8 +3228,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
if (err)
goto out;
ext4_ext_mark_initialized(ex);
- ext4_ext_try_to_merge(inode, path, ex);
- err = ext4_ext_dirty(handle, inode, path + depth);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
@@ -3206,9 +3243,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len;
- if (allocated > map->m_len) {
- if (allocated <= EXT4_EXT_ZERO_LEN &&
- (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+ if (max_zeroout && (allocated > map->m_len)) {
+ if (allocated <= max_zeroout) {
/* case 3 */
zero_ex.ee_block =
cpu_to_le32(map->m_lblk);
@@ -3220,9 +3256,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
goto out;
split_map.m_lblk = map->m_lblk;
split_map.m_len = allocated;
- } else if ((map->m_lblk - ee_block + map->m_len <
- EXT4_EXT_ZERO_LEN) &&
- (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+ } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
/* case 2 */
if (map->m_lblk != ee_block) {
zero_ex.ee_block = ex->ee_block;
@@ -3242,7 +3276,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
}
allocated = ext4_split_extent(handle, inode, path,
- &split_map, split_flag, 0);
+ &split_map, split_flag, 0);
if (allocated < 0)
err = allocated;
@@ -3256,7 +3290,7 @@ out:
* to an uninitialized extent.
*
* Writing to an uninitialized extent may result in splitting the uninitialized
- * extent into multiple /initialized uninitialized extents (up to three)
+ * extent into multiple initialized/uninitialized extents (up to three)
* There are three possibilities:
* a> There is no split required: Entire extent should be uninitialized
* b> Splits in two extents: Write is happening at either end of the extent
@@ -3303,26 +3337,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= EXT4_EXT_MARK_UNINIT2;
-
+ if (flags & EXT4_GET_BLOCKS_CONVERT)
+ split_flag |= EXT4_EXT_DATA_VALID2;
flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, path, map, split_flag, flags);
}
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
- struct inode *inode,
- struct ext4_ext_path *path)
+ struct inode *inode,
+ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path)
{
struct ext4_extent *ex;
+ ext4_lblk_t ee_block;
+ unsigned int ee_len;
int depth;
int err = 0;
depth = ext_depth(inode);
ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
- (unsigned long long)le32_to_cpu(ex->ee_block),
- ext4_ext_get_actual_len(ex));
+ (unsigned long long)ee_block, ee_len);
+
+ /* If extent is larger than requested then split is required */
+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
+ err = ext4_split_unwritten_extents(handle, inode, map, path,
+ EXT4_GET_BLOCKS_CONVERT);
+ if (err < 0)
+ goto out;
+ ext4_ext_drop_refs(path);
+ path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+ }
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
@@ -3333,10 +3388,10 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
/* note: ext4_ext_correct_indexes() isn't needed here because
* borders are not changed
*/
- ext4_ext_try_to_merge(inode, path, ex);
+ ext4_ext_try_to_merge(handle, inode, path, ex);
/* Mark modified extent as dirty */
- err = ext4_ext_dirty(handle, inode, path + depth);
+ err = ext4_ext_dirty(handle, inode, path + path->p_depth);
out:
ext4_ext_show_leaf(inode, path);
return err;
@@ -3600,7 +3655,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
{
int ret = 0;
int err = 0;
- ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
+ ext4_io_end_t *io = ext4_inode_aio(inode);
ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
"block %llu, max_blocks %u, flags %x, allocated %u\n",
@@ -3615,6 +3670,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
ret = ext4_split_unwritten_extents(handle, inode, map,
path, flags);
+ if (ret <= 0)
+ goto out;
/*
* Flag the inode(non aio case) or end_io struct (aio case)
* that this IO needs to conversion to written when IO is
@@ -3630,7 +3687,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
}
/* IO end_io complete, convert the filled extent to written */
if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
- ret = ext4_convert_unwritten_extents_endio(handle, inode,
+ ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
path);
if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -3858,8 +3915,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
- ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
+ ext4_io_end_t *io = ext4_inode_aio(inode);
ext4_lblk_t cluster_offset;
+ int set_unwritten = 0;
ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino);
@@ -4082,13 +4140,8 @@ got_allocated_blocks:
* For non asycn direct IO case, flag the inode state
* that we need to perform conversion when IO is done.
*/
- if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
- if (io)
- ext4_set_io_unwritten_flag(inode, io);
- else
- ext4_set_inode_state(inode,
- EXT4_STATE_DIO_UNWRITTEN);
- }
+ if ((flags & EXT4_GET_BLOCKS_PRE_IO))
+ set_unwritten = 1;
if (ext4_should_dioread_nolock(inode))
map->m_flags |= EXT4_MAP_UNINIT;
}
@@ -4100,6 +4153,15 @@ got_allocated_blocks:
if (!err)
err = ext4_ext_insert_extent(handle, inode, path,
&newex, flags);
+
+ if (!err && set_unwritten) {
+ if (io)
+ ext4_set_io_unwritten_flag(inode, io);
+ else
+ ext4_set_inode_state(inode,
+ EXT4_STATE_DIO_UNWRITTEN);
+ }
+
if (err && free_on_err) {
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
@@ -4241,7 +4303,7 @@ void ext4_ext_truncate(struct inode *inode)
* finish any pending end_io work so we won't run the risk of
* converting any truncated blocks to initialized later
*/
- ext4_flush_completed_IO(inode);
+ ext4_flush_unwritten_io(inode);
/*
* probably first extent we're gonna free will be last in block
@@ -4401,6 +4463,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
*/
if (len <= EXT_UNINIT_MAX_LEN << blkbits)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
+
+ /* Prevent race condition between unwritten */
+ ext4_flush_unwritten_io(inode);
retry:
while (ret >= 0 && ret < max_blocks) {
map.m_lblk = map.m_lblk + ret;
@@ -4769,9 +4834,32 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
loff_t first_page_offset, last_page_offset;
int credits, err = 0;
+ /*
+ * Write out all dirty pages to avoid race conditions
+ * Then release them.
+ */
+ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ err = filemap_write_and_wait_range(mapping,
+ offset, offset + length - 1);
+
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&inode->i_mutex);
+ /* It's not possible punch hole on append only file */
+ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
+ err = -EPERM;
+ goto out_mutex;
+ }
+ if (IS_SWAPFILE(inode)) {
+ err = -ETXTBSY;
+ goto out_mutex;
+ }
+
/* No need to punch hole beyond i_size */
if (offset >= inode->i_size)
- return 0;
+ goto out_mutex;
/*
* If the hole extends beyond i_size, set the hole
@@ -4789,35 +4877,26 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
first_page_offset = first_page << PAGE_CACHE_SHIFT;
last_page_offset = last_page << PAGE_CACHE_SHIFT;
- /*
- * Write out all dirty pages to avoid race conditions
- * Then release them.
- */
- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- err = filemap_write_and_wait_range(mapping,
- offset, offset + length - 1);
-
- if (err)
- return err;
- }
-
/* Now release the pages */
if (last_page_offset > first_page_offset) {
truncate_pagecache_range(inode, first_page_offset,
last_page_offset - 1);
}
- /* finish any pending end_io work */
- ext4_flush_completed_IO(inode);
+ /* Wait all existing dio workers, newcomers will block on i_mutex */
+ ext4_inode_block_unlocked_dio(inode);
+ err = ext4_flush_unwritten_io(inode);
+ if (err)
+ goto out_dio;
+ inode_dio_wait(inode);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out_dio;
+ }
- err = ext4_orphan_add(handle, inode);
- if (err)
- goto out;
/*
* Now we need to zero out the non-page-aligned data in the
@@ -4903,10 +4982,13 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
up_write(&EXT4_I(inode)->i_data_sem);
out:
- ext4_orphan_del(handle, inode);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
+out_dio:
+ ext4_inode_resume_unlocked_dio(inode);
+out_mutex:
+ mutex_unlock(&inode->i_mutex);
return err;
}
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3b0e3bdaabfc..bf3966bccd34 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -55,11 +55,11 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
return 0;
}
-static void ext4_aiodio_wait(struct inode *inode)
+void ext4_unwritten_wait(struct inode *inode)
{
wait_queue_head_t *wq = ext4_ioend_wq(inode);
- wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));
+ wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
}
/*
@@ -116,7 +116,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
"performance will be poor.",
inode->i_ino, current->comm);
mutex_lock(ext4_aio_mutex(inode));
- ext4_aiodio_wait(inode);
+ ext4_unwritten_wait(inode);
}
BUG_ON(iocb->ki_pos != pos);
@@ -207,6 +207,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
static const struct vm_operations_struct ext4_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = ext4_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
@@ -217,7 +218,6 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ext4_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 2a1dcea4f12e..be1d89f385b4 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -34,87 +34,6 @@
#include <trace/events/ext4.h>
-static void dump_completed_IO(struct inode * inode)
-{
-#ifdef EXT4FS_DEBUG
- struct list_head *cur, *before, *after;
- ext4_io_end_t *io, *io0, *io1;
- unsigned long flags;
-
- if (list_empty(&EXT4_I(inode)->i_completed_io_list)){
- ext4_debug("inode %lu completed_io list is empty\n", inode->i_ino);
- return;
- }
-
- ext4_debug("Dump inode %lu completed_io list \n", inode->i_ino);
- spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
- list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list){
- cur = &io->list;
- before = cur->prev;
- io0 = container_of(before, ext4_io_end_t, list);
- after = cur->next;
- io1 = container_of(after, ext4_io_end_t, list);
-
- ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
- io, inode->i_ino, io0, io1);
- }
- spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
-#endif
-}
-
-/*
- * This function is called from ext4_sync_file().
- *
- * When IO is completed, the work to convert unwritten extents to
- * written is queued on workqueue but may not get immediately
- * scheduled. When fsync is called, we need to ensure the
- * conversion is complete before fsync returns.
- * The inode keeps track of a list of pending/completed IO that
- * might needs to do the conversion. This function walks through
- * the list and convert the related unwritten extents for completed IO
- * to written.
- * The function return the number of pending IOs on success.
- */
-int ext4_flush_completed_IO(struct inode *inode)
-{
- ext4_io_end_t *io;
- struct ext4_inode_info *ei = EXT4_I(inode);
- unsigned long flags;
- int ret = 0;
- int ret2 = 0;
-
- dump_completed_IO(inode);
- spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- while (!list_empty(&ei->i_completed_io_list)){
- io = list_entry(ei->i_completed_io_list.next,
- ext4_io_end_t, list);
- list_del_init(&io->list);
- io->flag |= EXT4_IO_END_IN_FSYNC;
- /*
- * Calling ext4_end_io_nolock() to convert completed
- * IO to written.
- *
- * When ext4_sync_file() is called, run_queue() may already
- * about to flush the work corresponding to this io structure.
- * It will be upset if it founds the io structure related
- * to the work-to-be schedule is freed.
- *
- * Thus we need to keep the io structure still valid here after
- * conversion finished. The io structure has a flag to
- * avoid double converting from both fsync and background work
- * queue work.
- */
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- ret = ext4_end_io_nolock(io);
- if (ret < 0)
- ret2 = ret;
- spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- io->flag &= ~EXT4_IO_END_IN_FSYNC;
- }
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- return (ret2 < 0) ? ret2 : 0;
-}
-
/*
* If we're not journaling and this is a just-created file, we have to
* sync our parent directory (if it was freshly created) since
@@ -203,7 +122,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct inode *inode = file->f_mapping->host;
struct ext4_inode_info *ei = EXT4_I(inode);
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
- int ret;
+ int ret, err;
tid_t commit_tid;
bool needs_barrier = false;
@@ -219,7 +138,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (inode->i_sb->s_flags & MS_RDONLY)
goto out;
- ret = ext4_flush_completed_IO(inode);
+ ret = ext4_flush_unwritten_io(inode);
if (ret < 0)
goto out;
@@ -255,8 +174,11 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
needs_barrier = true;
jbd2_log_start_commit(journal, commit_tid);
ret = jbd2_log_wait_commit(journal, commit_tid);
- if (needs_barrier)
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ if (needs_barrier) {
+ err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ if (!ret)
+ ret = err;
+ }
out:
mutex_unlock(&inode->i_mutex);
trace_ext4_sync_file_exit(inode, ret);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 26154b81b836..4facdd29a350 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -697,6 +697,15 @@ got_group:
if (!gdp)
goto fail;
+ /*
+ * Check free inodes count before loading bitmap.
+ */
+ if (ext4_free_inodes_count(sb, gdp) == 0) {
+ if (++group == ngroups)
+ group = 0;
+ continue;
+ }
+
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
if (!inode_bitmap_bh)
@@ -753,9 +762,7 @@ got:
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
ext4_block_bitmap_csum_set(sb, group, gdp,
- block_bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) /
- 8);
+ block_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 830e1b2bf145..792e388e7b44 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -807,16 +807,30 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
retry:
if (rw == READ && ext4_should_dioread_nolock(inode)) {
- if (unlikely(!list_empty(&ei->i_completed_io_list))) {
+ if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) {
mutex_lock(&inode->i_mutex);
- ext4_flush_completed_IO(inode);
+ ext4_flush_unwritten_io(inode);
mutex_unlock(&inode->i_mutex);
}
+ /*
+ * Nolock dioread optimization may be dynamically disabled
+ * via ext4_inode_block_unlocked_dio(). Check inode's state
+ * while holding extra i_dio_count ref.
+ */
+ atomic_inc(&inode->i_dio_count);
+ smp_mb();
+ if (unlikely(ext4_test_inode_state(inode,
+ EXT4_STATE_DIOREAD_LOCK))) {
+ inode_dio_done(inode);
+ goto locked;
+ }
ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL, NULL, 0);
+ inode_dio_done(inode);
} else {
+locked:
ret = blockdev_direct_IO(rw, iocb, inode, iov,
offset, nr_segs, ext4_get_block);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c862ee5fe79d..b3c243b9afa5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -732,11 +732,13 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
err = ext4_map_blocks(handle, inode, &map,
create ? EXT4_GET_BLOCKS_CREATE : 0);
+ /* ensure we send some value back into *errp */
+ *errp = 0;
+
if (err < 0)
*errp = err;
if (err <= 0)
return NULL;
- *errp = 0;
bh = sb_getblk(inode->i_sb, map.m_pblk);
if (!bh) {
@@ -1954,9 +1956,6 @@ out:
return ret;
}
-static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
-static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
-
/*
* Note that we don't need to start a transaction unless we're journaling data
* because we should have holes filled from ext4_page_mkwrite(). We even don't
@@ -2463,6 +2462,16 @@ static int ext4_nonda_switch(struct super_block *sb)
free_blocks = EXT4_C2B(sbi,
percpu_counter_read_positive(&sbi->s_freeclusters_counter));
dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
+ /*
+ * Start pushing delalloc when 1/2 of free blocks are dirty.
+ */
+ if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
+ !writeback_in_progress(sb->s_bdi) &&
+ down_read_trylock(&sb->s_umount)) {
+ writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
+ up_read(&sb->s_umount);
+ }
+
if (2 * free_blocks < 3 * dirty_blocks ||
free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
/*
@@ -2471,13 +2480,6 @@ static int ext4_nonda_switch(struct super_block *sb)
*/
return 1;
}
- /*
- * Even if we don't switch but are nearing capacity,
- * start pushing delalloc when 1/2 of free blocks are dirty.
- */
- if (free_blocks < 2 * dirty_blocks)
- writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
-
return 0;
}
@@ -2879,9 +2881,6 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
ext4_io_end_t *io_end = iocb->private;
- struct workqueue_struct *wq;
- unsigned long flags;
- struct ext4_inode_info *ei;
/* if not async direct IO or dio with 0 bytes write, just return */
if (!io_end || !size)
@@ -2910,24 +2909,14 @@ out:
io_end->iocb = iocb;
io_end->result = ret;
}
- wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
- /* Add the io_end to per-inode completed aio dio list*/
- ei = EXT4_I(io_end->inode);
- spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- list_add_tail(&io_end->list, &ei->i_completed_io_list);
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
-
- /* queue the work to convert unwritten extents to written */
- queue_work(wq, &io_end->work);
+ ext4_add_complete_io(io_end);
}
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
{
ext4_io_end_t *io_end = bh->b_private;
- struct workqueue_struct *wq;
struct inode *inode;
- unsigned long flags;
if (!test_clear_buffer_uninit(bh) || !io_end)
goto out;
@@ -2946,15 +2935,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
*/
inode = io_end->inode;
ext4_set_io_unwritten_flag(inode, io_end);
-
- /* Add the io_end to per-inode completed io list*/
- spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
- list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
- spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
-
- wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
- /* queue the work to convert unwritten extents to written */
- queue_work(wq, &io_end->work);
+ ext4_add_complete_io(io_end);
out:
bh->b_private = NULL;
bh->b_end_io = NULL;
@@ -3029,6 +3010,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
overwrite = *((int *)iocb->private);
if (overwrite) {
+ atomic_inc(&inode->i_dio_count);
down_read(&EXT4_I(inode)->i_data_sem);
mutex_unlock(&inode->i_mutex);
}
@@ -3054,7 +3036,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
* hook to the iocb.
*/
iocb->private = NULL;
- EXT4_I(inode)->cur_aio_dio = NULL;
+ ext4_inode_aio_set(inode, NULL);
if (!is_sync_kiocb(iocb)) {
ext4_io_end_t *io_end =
ext4_init_io_end(inode, GFP_NOFS);
@@ -3071,7 +3053,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
* is a unwritten extents needs to be converted
* when IO is completed.
*/
- EXT4_I(inode)->cur_aio_dio = iocb->private;
+ ext4_inode_aio_set(inode, io_end);
}
if (overwrite)
@@ -3091,7 +3073,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
NULL,
DIO_LOCKING);
if (iocb->private)
- EXT4_I(inode)->cur_aio_dio = NULL;
+ ext4_inode_aio_set(inode, NULL);
/*
* The io_end structure takes a reference to the inode,
* that structure needs to be destroyed and the
@@ -3126,6 +3108,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
retake_lock:
/* take i_mutex locking again if we do a ovewrite dio */
if (overwrite) {
+ inode_dio_done(inode);
up_read(&EXT4_I(inode)->i_data_sem);
mutex_lock(&inode->i_mutex);
}
@@ -4052,6 +4035,7 @@ static int ext4_do_update_inode(handle_t *handle,
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
int err = 0, rc, block;
+ int need_datasync = 0;
uid_t i_uid;
gid_t i_gid;
@@ -4102,7 +4086,10 @@ static int ext4_do_update_inode(handle_t *handle,
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
- ext4_isize_set(raw_inode, ei->i_disksize);
+ if (ei->i_disksize != ext4_isize(raw_inode)) {
+ ext4_isize_set(raw_inode, ei->i_disksize);
+ need_datasync = 1;
+ }
if (ei->i_disksize > 0x7fffffffULL) {
struct super_block *sb = inode->i_sb;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -4155,7 +4142,7 @@ static int ext4_do_update_inode(handle_t *handle,
err = rc;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
- ext4_update_inode_fsync_trans(handle, inode, 0);
+ ext4_update_inode_fsync_trans(handle, inode, need_datasync);
out_brelse:
brelse(bh);
ext4_std_error(inode->i_sb, err);
@@ -4298,7 +4285,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- inode_dio_wait(inode);
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -4347,8 +4333,17 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size != i_size_read(inode))
+ if (attr->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attr->ia_size);
+ /* Inode size will be reduced, wait for dio in flight.
+ * Temporarily disable dioread_nolock to prevent
+ * livelock. */
+ if (orphan) {
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+ ext4_inode_resume_unlocked_dio(inode);
+ }
+ }
ext4_truncate(inode);
}
@@ -4727,6 +4722,10 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
return err;
}
+ /* Wait for all existing dio workers */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+
jbd2_journal_lock_updates(journal);
/*
@@ -4746,6 +4745,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
+ ext4_inode_resume_unlocked_dio(inode);
/* Finally we can mark the inode as dirty. */
@@ -4780,6 +4780,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
int retries = 0;
sb_start_pagefault(inode->i_sb);
+ file_update_time(vma->vm_file);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 5439d6a56e99..5747f52f7c72 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -366,26 +366,11 @@ group_add_out:
return -EOPNOTSUPP;
}
- if (EXT4_HAS_INCOMPAT_FEATURE(sb,
- EXT4_FEATURE_INCOMPAT_META_BG)) {
- ext4_msg(sb, KERN_ERR,
- "Online resizing not (yet) supported with meta_bg");
- return -EOPNOTSUPP;
- }
-
if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
sizeof(__u64))) {
return -EFAULT;
}
- if (n_blocks_count > MAX_32_NUM &&
- !EXT4_HAS_INCOMPAT_FEATURE(sb,
- EXT4_FEATURE_INCOMPAT_64BIT)) {
- ext4_msg(sb, KERN_ERR,
- "File system only supports 32-bit block numbers");
- return -EOPNOTSUPP;
- }
-
err = ext4_resize_begin(sb);
if (err)
return err;
@@ -420,13 +405,6 @@ resizefs_out:
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
- EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
- ext4_msg(sb, KERN_ERR,
- "FITRIM not supported with bigalloc");
- return -EOPNOTSUPP;
- }
-
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
sizeof(range)))
return -EFAULT;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 08778f6cdfe9..526e55358606 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -24,6 +24,7 @@
#include "ext4_jbd2.h"
#include "mballoc.h"
#include <linux/debugfs.h>
+#include <linux/log2.h>
#include <linux/slab.h>
#include <trace/events/ext4.h>
@@ -1338,17 +1339,17 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
mb_check_buddy(e4b);
}
-static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
+static int mb_find_extent(struct ext4_buddy *e4b, int block,
int needed, struct ext4_free_extent *ex)
{
int next = block;
- int max;
+ int max, order;
void *buddy;
assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
BUG_ON(ex == NULL);
- buddy = mb_find_buddy(e4b, order, &max);
+ buddy = mb_find_buddy(e4b, 0, &max);
BUG_ON(buddy == NULL);
BUG_ON(block >= max);
if (mb_test_bit(block, buddy)) {
@@ -1358,12 +1359,9 @@ static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
return 0;
}
- /* FIXME dorp order completely ? */
- if (likely(order == 0)) {
- /* find actual order */
- order = mb_find_order_for_block(e4b, block);
- block = block >> order;
- }
+ /* find actual order */
+ order = mb_find_order_for_block(e4b, block);
+ block = block >> order;
ex->fe_len = 1 << order;
ex->fe_start = block << order;
@@ -1549,7 +1547,7 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
/* recheck chunk's availability - we don't know
* when it was found (within this lock-unlock
* period or not) */
- max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
+ max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
if (max >= gex->fe_len) {
ext4_mb_use_best_found(ac, e4b);
return;
@@ -1641,7 +1639,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
return err;
ext4_lock_group(ac->ac_sb, group);
- max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
+ max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
if (max > 0) {
ac->ac_b_ex = ex;
@@ -1662,17 +1660,20 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
int max;
int err;
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
+ struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
struct ext4_free_extent ex;
if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
return 0;
+ if (grp->bb_free == 0)
+ return 0;
err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
if (err)
return err;
ext4_lock_group(ac->ac_sb, group);
- max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
+ max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
ac->ac_g_ex.fe_len, &ex);
if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
@@ -1788,7 +1789,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
break;
}
- mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
+ mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
BUG_ON(ex.fe_len <= 0);
if (free < ex.fe_len) {
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
@@ -1840,7 +1841,7 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
if (!mb_test_bit(i, bitmap)) {
- max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
+ max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
if (max >= sbi->s_stripe) {
ac->ac_found++;
ac->ac_b_ex = ex;
@@ -1862,6 +1863,12 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
BUG_ON(cr < 0 || cr >= 4);
+ free = grp->bb_free;
+ if (free == 0)
+ return 0;
+ if (cr <= 2 && free < ac->ac_g_ex.fe_len)
+ return 0;
+
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
int ret = ext4_mb_init_group(ac->ac_sb, group);
@@ -1869,10 +1876,7 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
return 0;
}
- free = grp->bb_free;
fragments = grp->bb_fragments;
- if (free == 0)
- return 0;
if (fragments == 0)
return 0;
@@ -2163,6 +2167,39 @@ static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
return cachep;
}
+/*
+ * Allocate the top-level s_group_info array for the specified number
+ * of groups
+ */
+int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ unsigned size;
+ struct ext4_group_info ***new_groupinfo;
+
+ size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
+ EXT4_DESC_PER_BLOCK_BITS(sb);
+ if (size <= sbi->s_group_info_size)
+ return 0;
+
+ size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
+ new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
+ if (!new_groupinfo) {
+ ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
+ return -ENOMEM;
+ }
+ if (sbi->s_group_info) {
+ memcpy(new_groupinfo, sbi->s_group_info,
+ sbi->s_group_info_size * sizeof(*sbi->s_group_info));
+ ext4_kvfree(sbi->s_group_info);
+ }
+ sbi->s_group_info = new_groupinfo;
+ sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
+ ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
+ sbi->s_group_info_size);
+ return 0;
+}
+
/* Create and initialize ext4_group_info data for the given group. */
int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *desc)
@@ -2195,12 +2232,11 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
- meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
+ meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL);
if (meta_group_info[i] == NULL) {
ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
goto exit_group_info;
}
- memset(meta_group_info[i], 0, kmem_cache_size(cachep));
set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
&(meta_group_info[i]->bb_state));
@@ -2252,49 +2288,14 @@ static int ext4_mb_init_backend(struct super_block *sb)
ext4_group_t ngroups = ext4_get_groups_count(sb);
ext4_group_t i;
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_super_block *es = sbi->s_es;
- int num_meta_group_infos;
- int num_meta_group_infos_max;
- int array_size;
+ int err;
struct ext4_group_desc *desc;
struct kmem_cache *cachep;
- /* This is the number of blocks used by GDT */
- num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
- 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
-
- /*
- * This is the total number of blocks used by GDT including
- * the number of reserved blocks for GDT.
- * The s_group_info array is allocated with this value
- * to allow a clean online resize without a complex
- * manipulation of pointer.
- * The drawback is the unused memory when no resize
- * occurs but it's very low in terms of pages
- * (see comments below)
- * Need to handle this properly when META_BG resizing is allowed
- */
- num_meta_group_infos_max = num_meta_group_infos +
- le16_to_cpu(es->s_reserved_gdt_blocks);
+ err = ext4_mb_alloc_groupinfo(sb, ngroups);
+ if (err)
+ return err;
- /*
- * array_size is the size of s_group_info array. We round it
- * to the next power of two because this approximation is done
- * internally by kmalloc so we can have some more memory
- * for free here (e.g. may be used for META_BG resize).
- */
- array_size = 1;
- while (array_size < sizeof(*sbi->s_group_info) *
- num_meta_group_infos_max)
- array_size = array_size << 1;
- /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
- * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
- * So a two level scheme suffices for now. */
- sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
- if (sbi->s_group_info == NULL) {
- ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
- return -ENOMEM;
- }
sbi->s_buddy_cache = new_inode(sb);
if (sbi->s_buddy_cache == NULL) {
ext4_msg(sb, KERN_ERR, "can't get new inode");
@@ -2322,7 +2323,7 @@ err_freebuddy:
cachep = get_groupinfo_cache(sb->s_blocksize_bits);
while (i-- > 0)
kmem_cache_free(cachep, ext4_get_group_info(sb, i));
- i = num_meta_group_infos;
+ i = sbi->s_group_info_size;
while (i-- > 0)
kfree(sbi->s_group_info[i]);
iput(sbi->s_buddy_cache);
@@ -2804,8 +2805,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
}
len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
ext4_free_group_clusters_set(sb, gdp, len);
- ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
@@ -4008,7 +4008,6 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ext4_get_group_no_and_offset(sb, goal, &group, &block);
/* set up allocation goals */
- memset(ac, 0, sizeof(struct ext4_allocation_context));
ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_sb = sb;
@@ -4291,7 +4290,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
}
}
- ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
+ ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
if (!ac) {
ar->len = 0;
*errp = -ENOMEM;
@@ -4657,6 +4656,8 @@ do_more:
* with group lock held. generate_buddy look at
* them with group lock_held
*/
+ if (test_opt(sb, DISCARD))
+ ext4_issue_discard(sb, block_group, bit, count);
ext4_lock_group(sb, block_group);
mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
mb_free_blocks(inode, &e4b, bit, count_clusters);
@@ -4664,8 +4665,7 @@ do_more:
ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
ext4_free_group_clusters_set(sb, gdp, ret);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
@@ -4809,8 +4809,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
mb_free_blocks(NULL, &e4b, bit, count);
blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
ext4_free_group_clusters_set(sb, desc, blk_free_count);
- ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter,
@@ -4988,10 +4987,12 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
start = range->start >> sb->s_blocksize_bits;
end = start + (range->len >> sb->s_blocksize_bits) - 1;
- minlen = range->minlen >> sb->s_blocksize_bits;
+ minlen = EXT4_NUM_B2C(EXT4_SB(sb),
+ range->minlen >> sb->s_blocksize_bits);
- if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) ||
- unlikely(start >= max_blks))
+ if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
+ start >= max_blks ||
+ range->len < sb->s_blocksize)
return -EINVAL;
if (end >= max_blks)
end = max_blks - 1;
@@ -5048,6 +5049,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
out:
- range->len = trimmed * sb->s_blocksize;
+ range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
return ret;
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index c070618c21ce..3ccd889ba953 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -65,11 +65,6 @@ extern u8 mb_enable_debug;
#define MB_DEFAULT_MIN_TO_SCAN 10
/*
- * How many groups mballoc will scan looking for the best chunk
- */
-#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
-
-/*
* with 'ext4_mb_stats' allocator will collect stats that will be
* shown at umount. The collecting costs though!
*/
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index c5826c623e7a..292daeeed455 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
}
/**
- * mext_check_null_inode - NULL check for two inodes
- *
- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
- */
-static int
-mext_check_null_inode(struct inode *inode1, struct inode *inode2,
- const char *function, unsigned int line)
-{
- int ret = 0;
-
- if (inode1 == NULL) {
- __ext4_error(inode2->i_sb, function, line,
- "Both inodes should not be NULL: "
- "inode1 NULL inode2 %lu", inode2->i_ino);
- ret = -EIO;
- } else if (inode2 == NULL) {
- __ext4_error(inode1->i_sb, function, line,
- "Both inodes should not be NULL: "
- "inode1 %lu inode2 NULL", inode1->i_ino);
- ret = -EIO;
- }
- return ret;
-}
-
-/**
* double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
*
- * @orig_inode: original inode structure
- * @donor_inode: donor inode structure
- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
- * i_ino order.
+ * Acquire write lock of i_data_sem of the two inodes
*/
static void
-double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
+double_down_write_data_sem(struct inode *first, struct inode *second)
{
- struct inode *first = orig_inode, *second = donor_inode;
+ if (first < second) {
+ down_write(&EXT4_I(first)->i_data_sem);
+ down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ } else {
+ down_write(&EXT4_I(second)->i_data_sem);
+ down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
- /*
- * Use the inode number to provide the stable locking order instead
- * of its address, because the C language doesn't guarantee you can
- * compare pointers that don't come from the same array.
- */
- if (donor_inode->i_ino < orig_inode->i_ino) {
- first = donor_inode;
- second = orig_inode;
}
-
- down_write(&EXT4_I(first)->i_data_sem);
- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
}
/**
@@ -604,9 +570,8 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
diff = donor_off - le32_to_cpu(tmp_dext->ee_block);
ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff);
- tmp_dext->ee_block =
- cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff);
- tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff);
+ le32_add_cpu(&tmp_dext->ee_block, diff);
+ le16_add_cpu(&tmp_dext->ee_len, -diff);
if (max_count < ext4_ext_get_actual_len(tmp_dext))
tmp_dext->ee_len = cpu_to_le16(max_count);
@@ -629,6 +594,43 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
}
/**
+ * mext_check_coverage - Check that all extents in range has the same type
+ *
+ * @inode: inode in question
+ * @from: block offset of inode
+ * @count: block count to be checked
+ * @uninit: extents expected to be uninitialized
+ * @err: pointer to save error value
+ *
+ * Return 1 if all extents in range has expected type, and zero otherwise.
+ */
+static int
+mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
+ int uninit, int *err)
+{
+ struct ext4_ext_path *path = NULL;
+ struct ext4_extent *ext;
+ ext4_lblk_t last = from + count;
+ while (from < last) {
+ *err = get_ext_path(inode, from, &path);
+ if (*err)
+ return 0;
+ ext = path[ext_depth(inode)].p_ext;
+ if (!ext) {
+ ext4_ext_drop_refs(path);
+ return 0;
+ }
+ if (uninit != ext4_ext_is_uninitialized(ext)) {
+ ext4_ext_drop_refs(path);
+ return 0;
+ }
+ from += ext4_ext_get_actual_len(ext);
+ ext4_ext_drop_refs(path);
+ }
+ return 1;
+}
+
+/**
* mext_replace_branches - Replace original extents with new extents
*
* @handle: journal handle
@@ -663,9 +665,6 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
int replaced_count = 0;
int dext_alen;
- /* Protect extent trees against block allocations via delalloc */
- double_down_write_data_sem(orig_inode, donor_inode);
-
/* Get the original extent for the block "orig_off" */
*err = get_ext_path(orig_inode, orig_off, &orig_path);
if (*err)
@@ -764,12 +763,122 @@ out:
ext4_ext_invalidate_cache(orig_inode);
ext4_ext_invalidate_cache(donor_inode);
- double_up_write_data_sem(orig_inode, donor_inode);
-
return replaced_count;
}
/**
+ * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
+ *
+ * @inode1: the inode structure
+ * @inode2: the inode structure
+ * @index: page index
+ * @page: result page vector
+ *
+ * Grab two locked pages for inode's by inode order
+ */
+static int
+mext_page_double_lock(struct inode *inode1, struct inode *inode2,
+ pgoff_t index, struct page *page[2])
+{
+ struct address_space *mapping[2];
+ unsigned fl = AOP_FLAG_NOFS;
+
+ BUG_ON(!inode1 || !inode2);
+ if (inode1 < inode2) {
+ mapping[0] = inode1->i_mapping;
+ mapping[1] = inode2->i_mapping;
+ } else {
+ mapping[0] = inode2->i_mapping;
+ mapping[1] = inode1->i_mapping;
+ }
+
+ page[0] = grab_cache_page_write_begin(mapping[0], index, fl);
+ if (!page[0])
+ return -ENOMEM;
+
+ page[1] = grab_cache_page_write_begin(mapping[1], index, fl);
+ if (!page[1]) {
+ unlock_page(page[0]);
+ page_cache_release(page[0]);
+ return -ENOMEM;
+ }
+
+ if (inode1 > inode2) {
+ struct page *tmp;
+ tmp = page[0];
+ page[0] = page[1];
+ page[1] = tmp;
+ }
+ return 0;
+}
+
+/* Force page buffers uptodate w/o dropping page's lock */
+static int
+mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
+{
+ struct inode *inode = page->mapping->host;
+ sector_t block;
+ struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ unsigned int blocksize, block_start, block_end;
+ int i, err, nr = 0, partial = 0;
+ BUG_ON(!PageLocked(page));
+ BUG_ON(PageWriteback(page));
+
+ if (PageUptodate(page))
+ return 0;
+
+ blocksize = 1 << inode->i_blkbits;
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize, 0);
+
+ head = page_buffers(page);
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ for (bh = head, block_start = 0; bh != head || !block_start;
+ block++, block_start = block_end, bh = bh->b_this_page) {
+ block_end = block_start + blocksize;
+ if (block_end <= from || block_start >= to) {
+ if (!buffer_uptodate(bh))
+ partial = 1;
+ continue;
+ }
+ if (buffer_uptodate(bh))
+ continue;
+ if (!buffer_mapped(bh)) {
+ int err = 0;
+ err = ext4_get_block(inode, block, bh, 0);
+ if (err) {
+ SetPageError(page);
+ return err;
+ }
+ if (!buffer_mapped(bh)) {
+ zero_user(page, block_start, blocksize);
+ if (!err)
+ set_buffer_uptodate(bh);
+ continue;
+ }
+ }
+ BUG_ON(nr >= MAX_BUF_PER_PAGE);
+ arr[nr++] = bh;
+ }
+ /* No io required */
+ if (!nr)
+ goto out;
+
+ for (i = 0; i < nr; i++) {
+ bh = arr[i];
+ if (!bh_uptodate_or_lock(bh)) {
+ err = bh_submit_read(bh);
+ if (err)
+ return err;
+ }
+ }
+out:
+ if (!partial)
+ SetPageUptodate(page);
+ return 0;
+}
+
+/**
* move_extent_per_page - Move extent data per page
*
* @o_filp: file structure of original file
@@ -791,26 +900,24 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
int block_len_in_page, int uninit, int *err)
{
struct inode *orig_inode = o_filp->f_dentry->d_inode;
- struct address_space *mapping = orig_inode->i_mapping;
- struct buffer_head *bh;
- struct page *page = NULL;
- const struct address_space_operations *a_ops = mapping->a_ops;
+ struct page *pagep[2] = {NULL, NULL};
handle_t *handle;
ext4_lblk_t orig_blk_offset;
long long offs = orig_page_offset << PAGE_CACHE_SHIFT;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
unsigned int w_flags = 0;
unsigned int tmp_data_size, data_size, replaced_size;
- void *fsdata;
- int i, jblocks;
- int err2 = 0;
+ int err2, jblocks, retries = 0;
int replaced_count = 0;
+ int from = data_offset_in_page << orig_inode->i_blkbits;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
/*
* It needs twice the amount of ordinary journal buffers because
* inode and donor_inode may change each different metadata blocks.
*/
+again:
+ *err = 0;
jblocks = ext4_writepage_trans_blocks(orig_inode) * 2;
handle = ext4_journal_start(orig_inode, jblocks);
if (IS_ERR(handle)) {
@@ -824,19 +931,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
orig_blk_offset = orig_page_offset * blocks_per_page +
data_offset_in_page;
- /*
- * If orig extent is uninitialized one,
- * it's not necessary force the page into memory
- * and then force it to be written out again.
- * Just swap data blocks between orig and donor.
- */
- if (uninit) {
- replaced_count = mext_replace_branches(handle, orig_inode,
- donor_inode, orig_blk_offset,
- block_len_in_page, err);
- goto out2;
- }
-
offs = (long long)orig_blk_offset << orig_inode->i_blkbits;
/* Calculate data_size */
@@ -858,75 +952,120 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
replaced_size = data_size;
- *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags,
- &page, &fsdata);
+ *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
+ pagep);
if (unlikely(*err < 0))
- goto out;
-
- if (!PageUptodate(page)) {
- mapping->a_ops->readpage(o_filp, page);
- lock_page(page);
- }
-
+ goto stop_journal;
/*
- * try_to_release_page() doesn't call releasepage in writeback mode.
- * We should care about the order of writing to the same file
- * by multiple move extent processes.
- * It needs to call wait_on_page_writeback() to wait for the
- * writeback of the page.
+ * If orig extent was uninitialized it can become initialized
+ * at any time after i_data_sem was dropped, in order to
+ * serialize with delalloc we have recheck extent while we
+ * hold page's lock, if it is still the case data copy is not
+ * necessary, just swap data blocks between orig and donor.
*/
- wait_on_page_writeback(page);
+ if (uninit) {
+ double_down_write_data_sem(orig_inode, donor_inode);
+ /* If any of extents in range became initialized we have to
+ * fallback to data copying */
+ uninit = mext_check_coverage(orig_inode, orig_blk_offset,
+ block_len_in_page, 1, err);
+ if (*err)
+ goto drop_data_sem;
- /* Release old bh and drop refs */
- try_to_release_page(page, 0);
+ uninit &= mext_check_coverage(donor_inode, orig_blk_offset,
+ block_len_in_page, 1, err);
+ if (*err)
+ goto drop_data_sem;
+
+ if (!uninit) {
+ double_up_write_data_sem(orig_inode, donor_inode);
+ goto data_copy;
+ }
+ if ((page_has_private(pagep[0]) &&
+ !try_to_release_page(pagep[0], 0)) ||
+ (page_has_private(pagep[1]) &&
+ !try_to_release_page(pagep[1], 0))) {
+ *err = -EBUSY;
+ goto drop_data_sem;
+ }
+ replaced_count = mext_replace_branches(handle, orig_inode,
+ donor_inode, orig_blk_offset,
+ block_len_in_page, err);
+ drop_data_sem:
+ double_up_write_data_sem(orig_inode, donor_inode);
+ goto unlock_pages;
+ }
+data_copy:
+ *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
+ if (*err)
+ goto unlock_pages;
+
+ /* At this point all buffers in range are uptodate, old mapping layout
+ * is no longer required, try to drop it now. */
+ if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
+ (page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
+ *err = -EBUSY;
+ goto unlock_pages;
+ }
replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
- orig_blk_offset, block_len_in_page,
- &err2);
- if (err2) {
+ orig_blk_offset,
+ block_len_in_page, err);
+ if (*err) {
if (replaced_count) {
block_len_in_page = replaced_count;
replaced_size =
block_len_in_page << orig_inode->i_blkbits;
} else
- goto out;
+ goto unlock_pages;
}
+ /* Perform all necessary steps similar write_begin()/write_end()
+ * but keeping in mind that i_size will not change */
+ *err = __block_write_begin(pagep[0], from, from + replaced_size,
+ ext4_get_block);
+ if (!*err)
+ *err = block_commit_write(pagep[0], from, from + replaced_size);
- if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0);
-
- bh = page_buffers(page);
- for (i = 0; i < data_offset_in_page; i++)
- bh = bh->b_this_page;
-
- for (i = 0; i < block_len_in_page; i++) {
- *err = ext4_get_block(orig_inode,
- (sector_t)(orig_blk_offset + i), bh, 0);
- if (*err < 0)
- goto out;
-
- if (bh->b_this_page != NULL)
- bh = bh->b_this_page;
- }
-
- *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size,
- page, fsdata);
- page = NULL;
-
-out:
- if (unlikely(page)) {
- if (PageLocked(page))
- unlock_page(page);
- page_cache_release(page);
- ext4_journal_stop(handle);
- }
-out2:
+ if (unlikely(*err < 0))
+ goto repair_branches;
+
+ /* Even in case of data=writeback it is reasonable to pin
+ * inode to transaction, to prevent unexpected data loss */
+ *err = ext4_jbd2_file_inode(handle, orig_inode);
+
+unlock_pages:
+ unlock_page(pagep[0]);
+ page_cache_release(pagep[0]);
+ unlock_page(pagep[1]);
+ page_cache_release(pagep[1]);
+stop_journal:
ext4_journal_stop(handle);
-
- if (err2)
- *err = err2;
-
+ /* Buffer was busy because probably is pinned to journal transaction,
+ * force transaction commit may help to free it. */
+ if (*err == -EBUSY && ext4_should_retry_alloc(orig_inode->i_sb,
+ &retries))
+ goto again;
return replaced_count;
+
+repair_branches:
+ /*
+ * This should never ever happen!
+ * Extents are swapped already, but we are not able to copy data.
+ * Try to swap extents to it's original places
+ */
+ double_down_write_data_sem(orig_inode, donor_inode);
+ replaced_count = mext_replace_branches(handle, donor_inode, orig_inode,
+ orig_blk_offset,
+ block_len_in_page, &err2);
+ double_up_write_data_sem(orig_inode, donor_inode);
+ if (replaced_count != block_len_in_page) {
+ EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
+ "Unable to copy data block,"
+ " data will be lost.");
+ *err = -EIO;
+ }
+ replaced_count = 0;
+ goto unlock_pages;
}
/**
@@ -969,14 +1108,6 @@ mext_check_arguments(struct inode *orig_inode,
return -EINVAL;
}
- /* Files should be in the same ext4 FS */
- if (orig_inode->i_sb != donor_inode->i_sb) {
- ext4_debug("ext4 move extent: The argument files "
- "should be in same FS [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
/* Ext4 move extent supports only extent based file */
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "
@@ -1002,7 +1133,6 @@ mext_check_arguments(struct inode *orig_inode,
}
if ((orig_start >= EXT_MAX_BLOCKS) ||
- (donor_start >= EXT_MAX_BLOCKS) ||
(*len > EXT_MAX_BLOCKS) ||
(orig_start + *len >= EXT_MAX_BLOCKS)) {
ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
@@ -1072,35 +1202,19 @@ mext_check_arguments(struct inode *orig_inode,
* @inode1: the inode structure
* @inode2: the inode structure
*
- * Lock two inodes' i_mutex by i_ino order.
- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+ * Lock two inodes' i_mutex
*/
-static int
+static void
mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
{
- int ret = 0;
-
- BUG_ON(inode1 == NULL && inode2 == NULL);
-
- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
- if (ret < 0)
- goto out;
-
- if (inode1 == inode2) {
- mutex_lock(&inode1->i_mutex);
- goto out;
- }
-
- if (inode1->i_ino < inode2->i_ino) {
+ BUG_ON(inode1 == inode2);
+ if (inode1 < inode2) {
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
} else {
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
}
-
-out:
- return ret;
}
/**
@@ -1109,28 +1223,13 @@ out:
* @inode1: the inode that is released first
* @inode2: the inode that is released second
*
- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
*/
-static int
+static void
mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
{
- int ret = 0;
-
- BUG_ON(inode1 == NULL && inode2 == NULL);
-
- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
- if (ret < 0)
- goto out;
-
- if (inode1)
- mutex_unlock(&inode1->i_mutex);
-
- if (inode2 && inode2 != inode1)
- mutex_unlock(&inode2->i_mutex);
-
-out:
- return ret;
+ mutex_unlock(&inode1->i_mutex);
+ mutex_unlock(&inode2->i_mutex);
}
/**
@@ -1187,16 +1286,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
ext4_lblk_t rest_blocks;
pgoff_t orig_page_offset = 0, seq_end_page;
- int ret1, ret2, depth, last_extent = 0;
+ int ret, depth, last_extent = 0;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
int data_offset_in_page;
int block_len_in_page;
int uninit;
- /* orig and donor should be different file */
- if (orig_inode->i_ino == donor_inode->i_ino) {
+ if (orig_inode->i_sb != donor_inode->i_sb) {
+ ext4_debug("ext4 move extent: The argument files "
+ "should be in same FS [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+
+ /* orig and donor should be different inodes */
+ if (orig_inode == donor_inode) {
ext4_debug("ext4 move extent: The argument files should not "
- "be same file [ino:orig %lu, donor %lu]\n",
+ "be same inode [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
@@ -1208,18 +1314,27 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
-
+ /* TODO: This is non obvious task to swap blocks for inodes with full
+ jornaling enabled */
+ if (ext4_should_journal_data(orig_inode) ||
+ ext4_should_journal_data(donor_inode)) {
+ return -EINVAL;
+ }
/* Protect orig and donor inodes against a truncate */
- ret1 = mext_inode_double_lock(orig_inode, donor_inode);
- if (ret1 < 0)
- return ret1;
+ mext_inode_double_lock(orig_inode, donor_inode);
+
+ /* Wait for all existing dio workers */
+ ext4_inode_block_unlocked_dio(orig_inode);
+ ext4_inode_block_unlocked_dio(donor_inode);
+ inode_dio_wait(orig_inode);
+ inode_dio_wait(donor_inode);
/* Protect extent tree against block allocations via delalloc */
double_down_write_data_sem(orig_inode, donor_inode);
/* Check the filesystem environment whether move_extent can be done */
- ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
+ ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
donor_start, &len);
- if (ret1)
+ if (ret)
goto out;
file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
@@ -1227,13 +1342,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
if (file_end < block_end)
len -= block_end - file_end;
- ret1 = get_ext_path(orig_inode, block_start, &orig_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, block_start, &orig_path);
+ if (ret)
goto out;
/* Get path structure to check the hole */
- ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, block_start, &holecheck_path);
+ if (ret)
goto out;
depth = ext_depth(orig_inode);
@@ -1252,13 +1367,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
last_extent = mext_next_extent(orig_inode,
holecheck_path, &ext_cur);
if (last_extent < 0) {
- ret1 = last_extent;
+ ret = last_extent;
goto out;
}
last_extent = mext_next_extent(orig_inode, orig_path,
&ext_dummy);
if (last_extent < 0) {
- ret1 = last_extent;
+ ret = last_extent;
goto out;
}
seq_start = le32_to_cpu(ext_cur->ee_block);
@@ -1272,7 +1387,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
if (le32_to_cpu(ext_cur->ee_block) > block_end) {
ext4_debug("ext4 move extent: The specified range of file "
"may be the hole\n");
- ret1 = -EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -1292,7 +1407,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
last_extent = mext_next_extent(orig_inode, holecheck_path,
&ext_cur);
if (last_extent < 0) {
- ret1 = last_extent;
+ ret = last_extent;
break;
}
add_blocks = ext4_ext_get_actual_len(ext_cur);
@@ -1349,18 +1464,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
orig_page_offset,
data_offset_in_page,
block_len_in_page, uninit,
- &ret1);
+ &ret);
/* Count how many blocks we have exchanged */
*moved_len += block_len_in_page;
- if (ret1 < 0)
+ if (ret < 0)
break;
if (*moved_len > len) {
EXT4_ERROR_INODE(orig_inode,
"We replaced blocks too much! "
"sum of replaced: %llu requested: %llu",
*moved_len, len);
- ret1 = -EIO;
+ ret = -EIO;
break;
}
@@ -1374,22 +1489,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
}
double_down_write_data_sem(orig_inode, donor_inode);
- if (ret1 < 0)
+ if (ret < 0)
break;
/* Decrease buffer counter */
if (holecheck_path)
ext4_ext_drop_refs(holecheck_path);
- ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
+ if (ret)
break;
depth = holecheck_path->p_depth;
/* Decrease buffer counter */
if (orig_path)
ext4_ext_drop_refs(orig_path);
- ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, seq_start, &orig_path);
+ if (ret)
break;
ext_cur = holecheck_path[depth].p_ext;
@@ -1412,12 +1527,9 @@ out:
kfree(holecheck_path);
}
double_up_write_data_sem(orig_inode, donor_inode);
- ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
-
- if (ret1)
- return ret1;
- else if (ret2)
- return ret2;
+ ext4_inode_resume_unlocked_dio(orig_inode);
+ ext4_inode_resume_unlocked_dio(donor_inode);
+ mext_inode_double_unlock(orig_inode, donor_inode);
- return 0;
+ return ret;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2a42cc04466f..6d600a69fc9d 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -55,6 +55,13 @@ static struct buffer_head *ext4_append(handle_t *handle,
{
struct buffer_head *bh;
+ if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
+ ((inode->i_size >> 10) >=
+ EXT4_SB(inode->i_sb)->s_max_dir_size_kb))) {
+ *err = -ENOSPC;
+ return NULL;
+ }
+
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
bh = ext4_bread(handle, inode, *block, 1, err);
@@ -67,6 +74,12 @@ static struct buffer_head *ext4_append(handle_t *handle,
bh = NULL;
}
}
+ if (!bh && !(*err)) {
+ *err = -EIO;
+ ext4_error(inode->i_sb,
+ "Directory hole detected on inode %lu\n",
+ inode->i_ino);
+ }
return bh;
}
@@ -594,8 +607,11 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
u32 hash;
frame->bh = NULL;
- if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
+ if (!(bh = ext4_bread(NULL, dir, 0, 0, err))) {
+ if (*err == 0)
+ *err = ERR_BAD_DX_DIR;
goto fail;
+ }
root = (struct dx_root *) bh->b_data;
if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 &&
@@ -696,8 +712,11 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
frame->entries = entries;
frame->at = at;
if (!indirect--) return frame;
- if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
+ if (!(bh = ext4_bread(NULL, dir, dx_get_block(at), 0, err))) {
+ if (!(*err))
+ *err = ERR_BAD_DX_DIR;
goto fail2;
+ }
at = entries = ((struct dx_node *) bh->b_data)->entries;
if (!buffer_verified(bh) &&
@@ -807,8 +826,15 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
*/
while (num_frames--) {
if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
- 0, &err)))
+ 0, &err))) {
+ if (!err) {
+ ext4_error(dir->i_sb,
+ "Directory hole detected on inode %lu\n",
+ dir->i_ino);
+ return -EIO;
+ }
return err; /* Failure */
+ }
if (!buffer_verified(bh) &&
!ext4_dx_csum_verify(dir,
@@ -839,12 +865,19 @@ static int htree_dirblock_to_tree(struct file *dir_file,
{
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *top;
- int err, count = 0;
+ int err = 0, count = 0;
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
- if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
+ if (!(bh = ext4_bread(NULL, dir, block, 0, &err))) {
+ if (!err) {
+ err = -EIO;
+ ext4_error(dir->i_sb,
+ "Directory hole detected on inode %lu\n",
+ dir->i_ino);
+ }
return err;
+ }
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
@@ -1267,8 +1300,15 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
return NULL;
do {
block = dx_get_block(frame->at);
- if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
+ if (!(bh = ext4_bread(NULL, dir, block, 0, err))) {
+ if (!(*err)) {
+ *err = -EIO;
+ ext4_error(dir->i_sb,
+ "Directory hole detected on inode %lu\n",
+ dir->i_ino);
+ }
goto errout;
+ }
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir,
@@ -1801,9 +1841,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0; block < blocks; block++) {
- bh = ext4_bread(handle, dir, block, 0, &retval);
- if(!bh)
+ if (!(bh = ext4_bread(handle, dir, block, 0, &retval))) {
+ if (!retval) {
+ retval = -EIO;
+ ext4_error(inode->i_sb,
+ "Directory hole detected on inode %lu\n",
+ inode->i_ino);
+ }
return retval;
+ }
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir,
(struct ext4_dir_entry *)bh->b_data))
@@ -1860,8 +1906,15 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
entries = frame->entries;
at = frame->at;
- if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
+ if (!(bh = ext4_bread(handle, dir, dx_get_block(frame->at), 0, &err))) {
+ if (!err) {
+ err = -EIO;
+ ext4_error(dir->i_sb,
+ "Directory hole detected on inode %lu\n",
+ dir->i_ino);
+ }
goto cleanup;
+ }
if (!buffer_verified(bh) &&
!ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
@@ -2149,9 +2202,7 @@ retry:
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT4_FS_XATTR
inode->i_op = &ext4_special_inode_operations;
-#endif
err = ext4_add_nondir(handle, dentry, inode);
}
ext4_journal_stop(handle);
@@ -2199,9 +2250,15 @@ retry:
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
- dir_block = ext4_bread(handle, inode, 0, 1, &err);
- if (!dir_block)
+ if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
+ if (!err) {
+ err = -EIO;
+ ext4_error(inode->i_sb,
+ "Directory hole detected on inode %lu\n",
+ inode->i_ino);
+ }
goto out_clear_inode;
+ }
BUFFER_TRACE(dir_block, "get_write_access");
err = ext4_journal_get_write_access(handle, dir_block);
if (err)
@@ -2318,6 +2375,11 @@ static int empty_dir(struct inode *inode)
EXT4_ERROR_INODE(inode,
"error %d reading directory "
"lblock %u", err, lblock);
+ else
+ ext4_warning(inode->i_sb,
+ "bad directory (dir #%lu) - no data block",
+ inode->i_ino);
+
offset += sb->s_blocksize;
continue;
}
@@ -2362,7 +2424,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
struct ext4_iloc iloc;
int err = 0, rc;
- if (!ext4_handle_valid(handle))
+ if (!EXT4_SB(sb)->s_journal)
return 0;
mutex_lock(&EXT4_SB(sb)->s_orphan_lock);
@@ -2436,8 +2498,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
struct ext4_iloc iloc;
int err = 0;
- /* ext4_handle_valid() assumes a valid handle_t pointer */
- if (handle && !ext4_handle_valid(handle))
+ if (!EXT4_SB(inode->i_sb)->s_journal)
return 0;
mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
@@ -2456,7 +2517,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
* transaction handle with which to update the orphan list on
* disk, but we still need to remove the inode from the linked
* list in memory. */
- if (sbi->s_journal && !handle)
+ if (!handle)
goto out;
err = ext4_reserve_inode_write(handle, inode, &iloc);
@@ -2826,9 +2887,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
goto end_rename;
}
retval = -EIO;
- dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval);
- if (!dir_bh)
+ if (!(dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval))) {
+ if (!retval) {
+ retval = -EIO;
+ ext4_error(old_inode->i_sb,
+ "Directory hole detected on inode %lu\n",
+ old_inode->i_ino);
+ }
goto end_rename;
+ }
if (!buffer_verified(dir_bh) &&
!ext4_dirent_csum_verify(old_inode,
(struct ext4_dir_entry *)dir_bh->b_data))
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index dcdeef169a69..68e896e12a67 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -71,6 +71,9 @@ void ext4_free_io_end(ext4_io_end_t *io)
int i;
BUG_ON(!io);
+ BUG_ON(!list_empty(&io->list));
+ BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
+
if (io->page)
put_page(io->page);
for (i = 0; i < io->num_io_pages; i++)
@@ -81,13 +84,8 @@ void ext4_free_io_end(ext4_io_end_t *io)
kmem_cache_free(io_end_cachep, io);
}
-/*
- * check a range of space and convert unwritten extents to written.
- *
- * Called with inode->i_mutex; we depend on this when we manipulate
- * io->flag, since we could otherwise race with ext4_flush_completed_IO()
- */
-int ext4_end_io_nolock(ext4_io_end_t *io)
+/* check a range of space and convert unwritten extents to written. */
+static int ext4_end_io(ext4_io_end_t *io)
{
struct inode *inode = io->inode;
loff_t offset = io->offset;
@@ -106,63 +104,136 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
"(inode %lu, offset %llu, size %zd, error %d)",
inode->i_ino, offset, size, ret);
}
-
if (io->iocb)
aio_complete(io->iocb, io->result, 0);
if (io->flag & EXT4_IO_END_DIRECT)
inode_dio_done(inode);
/* Wake up anyone waiting on unwritten extent conversion */
- if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
+ if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
wake_up_all(ext4_ioend_wq(io->inode));
return ret;
}
-/*
- * work on completed aio dio IO, to convert unwritten extents to extents
- */
-static void ext4_end_io_work(struct work_struct *work)
+static void dump_completed_IO(struct inode *inode)
+{
+#ifdef EXT4FS_DEBUG
+ struct list_head *cur, *before, *after;
+ ext4_io_end_t *io, *io0, *io1;
+ unsigned long flags;
+
+ if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
+ ext4_debug("inode %lu completed_io list is empty\n",
+ inode->i_ino);
+ return;
+ }
+
+ ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
+ list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
+ cur = &io->list;
+ before = cur->prev;
+ io0 = container_of(before, ext4_io_end_t, list);
+ after = cur->next;
+ io1 = container_of(after, ext4_io_end_t, list);
+
+ ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+ io, inode->i_ino, io0, io1);
+ }
+#endif
+}
+
+/* Add the io_end to per-inode completed end_io list. */
+void ext4_add_complete_io(ext4_io_end_t *io_end)
{
- ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
- struct inode *inode = io->inode;
- struct ext4_inode_info *ei = EXT4_I(inode);
- unsigned long flags;
+ struct ext4_inode_info *ei = EXT4_I(io_end->inode);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- if (io->flag & EXT4_IO_END_IN_FSYNC)
- goto requeue;
- if (list_empty(&io->list)) {
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- goto free;
+ if (list_empty(&ei->i_completed_io_list)) {
+ io_end->flag |= EXT4_IO_END_QUEUED;
+ queue_work(wq, &io_end->work);
}
+ list_add_tail(&io_end->list, &ei->i_completed_io_list);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+}
- if (!mutex_trylock(&inode->i_mutex)) {
- bool was_queued;
-requeue:
- was_queued = !!(io->flag & EXT4_IO_END_QUEUED);
- io->flag |= EXT4_IO_END_QUEUED;
- spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- /*
- * Requeue the work instead of waiting so that the work
- * items queued after this can be processed.
- */
- queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
- /*
- * To prevent the ext4-dio-unwritten thread from keeping
- * requeueing end_io requests and occupying cpu for too long,
- * yield the cpu if it sees an end_io request that has already
- * been requeued.
- */
- if (was_queued)
- yield();
- return;
+static int ext4_do_flush_completed_IO(struct inode *inode,
+ ext4_io_end_t *work_io)
+{
+ ext4_io_end_t *io;
+ struct list_head unwritten, complete, to_free;
+ unsigned long flags;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int err, ret = 0;
+
+ INIT_LIST_HEAD(&complete);
+ INIT_LIST_HEAD(&to_free);
+
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ dump_completed_IO(inode);
+ list_replace_init(&ei->i_completed_io_list, &unwritten);
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+
+ while (!list_empty(&unwritten)) {
+ io = list_entry(unwritten.next, ext4_io_end_t, list);
+ BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
+ list_del_init(&io->list);
+
+ err = ext4_end_io(io);
+ if (unlikely(!ret && err))
+ ret = err;
+
+ list_add_tail(&io->list, &complete);
+ }
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
+ while (!list_empty(&complete)) {
+ io = list_entry(complete.next, ext4_io_end_t, list);
+ io->flag &= ~EXT4_IO_END_UNWRITTEN;
+ /* end_io context can not be destroyed now because it still
+ * used by queued worker. Worker thread will destroy it later */
+ if (io->flag & EXT4_IO_END_QUEUED)
+ list_del_init(&io->list);
+ else
+ list_move(&io->list, &to_free);
+ }
+ /* If we are called from worker context, it is time to clear queued
+ * flag, and destroy it's end_io if it was converted already */
+ if (work_io) {
+ work_io->flag &= ~EXT4_IO_END_QUEUED;
+ if (!(work_io->flag & EXT4_IO_END_UNWRITTEN))
+ list_add_tail(&work_io->list, &to_free);
}
- list_del_init(&io->list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
- (void) ext4_end_io_nolock(io);
- mutex_unlock(&inode->i_mutex);
-free:
- ext4_free_io_end(io);
+
+ while (!list_empty(&to_free)) {
+ io = list_entry(to_free.next, ext4_io_end_t, list);
+ list_del_init(&io->list);
+ ext4_free_io_end(io);
+ }
+ return ret;
+}
+
+/*
+ * work on completed aio dio IO, to convert unwritten extents to extents
+ */
+static void ext4_end_io_work(struct work_struct *work)
+{
+ ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
+ ext4_do_flush_completed_IO(io->inode, io);
+}
+
+int ext4_flush_unwritten_io(struct inode *inode)
+{
+ int ret;
+ WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
+ !(inode->i_state & I_FREEING));
+ ret = ext4_do_flush_completed_IO(inode, NULL);
+ ext4_unwritten_wait(inode);
+ return ret;
}
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
@@ -195,9 +266,7 @@ static void buffer_io_error(struct buffer_head *bh)
static void ext4_end_bio(struct bio *bio, int error)
{
ext4_io_end_t *io_end = bio->bi_private;
- struct workqueue_struct *wq;
struct inode *inode;
- unsigned long flags;
int i;
sector_t bi_sector = bio->bi_sector;
@@ -255,14 +324,7 @@ static void ext4_end_bio(struct bio *bio, int error)
return;
}
- /* Add the io_end to per-inode completed io list*/
- spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
- list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
- spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
-
- wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
- /* queue the work to convert unwritten extents to written */
- queue_work(wq, &io_end->work);
+ ext4_add_complete_io(io_end);
}
void ext4_io_submit(struct ext4_io_submit *io)
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 41f6ef68e2e1..47bf06a2765d 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -45,6 +45,28 @@ void ext4_resize_end(struct super_block *sb)
smp_mb__after_clear_bit();
}
+static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
+ ext4_group_t group) {
+ return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
+ EXT4_DESC_PER_BLOCK_BITS(sb);
+}
+
+static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
+ ext4_group_t group) {
+ group = ext4_meta_bg_first_group(sb, group);
+ return ext4_group_first_block_no(sb, group);
+}
+
+static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
+ ext4_group_t group) {
+ ext4_grpblk_t overhead;
+ overhead = ext4_bg_num_gdb(sb, group);
+ if (ext4_bg_has_super(sb, group))
+ overhead += 1 +
+ le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
+ return overhead;
+}
+
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
#define inside(b, first, last) ((b) >= (first) && (b) < (last))
@@ -57,9 +79,7 @@ static int verify_group_input(struct super_block *sb,
ext4_fsblk_t end = start + input->blocks_count;
ext4_group_t group = input->group;
ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
- unsigned overhead = ext4_bg_has_super(sb, group) ?
- (1 + ext4_bg_num_gdb(sb, group) +
- le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+ unsigned overhead = ext4_group_overhead_blocks(sb, group);
ext4_fsblk_t metaend = start + overhead;
struct buffer_head *bh = NULL;
ext4_grpblk_t free_blocks_count, offset;
@@ -200,13 +220,15 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
* be a partial of a flex group.
*
* @sb: super block of fs to which the groups belongs
+ *
+ * Returns 0 on a successful allocation of the metadata blocks in the
+ * block group.
*/
-static void ext4_alloc_group_tables(struct super_block *sb,
+static int ext4_alloc_group_tables(struct super_block *sb,
struct ext4_new_flex_group_data *flex_gd,
int flexbg_size)
{
struct ext4_new_group_data *group_data = flex_gd->groups;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
ext4_fsblk_t start_blk;
ext4_fsblk_t last_blk;
ext4_group_t src_group;
@@ -226,23 +248,24 @@ static void ext4_alloc_group_tables(struct super_block *sb,
(last_group & ~(flexbg_size - 1))));
next_group:
group = group_data[0].group;
+ if (src_group >= group_data[0].group + flex_gd->count)
+ return -ENOSPC;
start_blk = ext4_group_first_block_no(sb, src_group);
last_blk = start_blk + group_data[src_group - group].blocks_count;
- overhead = ext4_bg_has_super(sb, src_group) ?
- (1 + ext4_bg_num_gdb(sb, src_group) +
- le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+ overhead = ext4_group_overhead_blocks(sb, src_group);
start_blk += overhead;
- BUG_ON(src_group >= group_data[0].group + flex_gd->count);
/* We collect contiguous blocks as much as possible. */
src_group++;
- for (; src_group <= last_group; src_group++)
- if (!ext4_bg_has_super(sb, src_group))
+ for (; src_group <= last_group; src_group++) {
+ overhead = ext4_group_overhead_blocks(sb, src_group);
+ if (overhead != 0)
last_blk += group_data[src_group - group].blocks_count;
else
break;
+ }
/* Allocate block bitmaps */
for (; bb_index < flex_gd->count; bb_index++) {
@@ -300,6 +323,7 @@ next_group:
group_data[i].free_blocks_count);
}
}
+ return 0;
}
static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
@@ -433,11 +457,13 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
ext4_group_t group, count;
struct buffer_head *bh = NULL;
int reserved_gdb, i, j, err = 0, err2;
+ int meta_bg;
BUG_ON(!flex_gd->count || !group_data ||
group_data[0].group != sbi->s_groups_count);
reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
+ meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
/* This transaction may be extended/restarted along the way */
handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
@@ -447,12 +473,25 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
group = group_data[0].group;
for (i = 0; i < flex_gd->count; i++, group++) {
unsigned long gdblocks;
+ ext4_grpblk_t overhead;
gdblocks = ext4_bg_num_gdb(sb, group);
start = ext4_group_first_block_no(sb, group);
+ if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
+ goto handle_itb;
+
+ if (meta_bg == 1) {
+ ext4_group_t first_group;
+ first_group = ext4_meta_bg_first_group(sb, group);
+ if (first_group != group + 1 &&
+ first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
+ goto handle_itb;
+ }
+
+ block = start + ext4_bg_has_super(sb, group);
/* Copy all of the GDT blocks into the backup in this group */
- for (j = 0, block = start + 1; j < gdblocks; j++, block++) {
+ for (j = 0; j < gdblocks; j++, block++) {
struct buffer_head *gdb;
ext4_debug("update backup group %#04llx\n", block);
@@ -493,6 +532,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
goto out;
}
+handle_itb:
/* Initialize group tables of the grop @group */
if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
goto handle_bb;
@@ -521,11 +561,11 @@ handle_bb:
err = PTR_ERR(bh);
goto out;
}
- if (ext4_bg_has_super(sb, group)) {
+ overhead = ext4_group_overhead_blocks(sb, group);
+ if (overhead != 0) {
ext4_debug("mark backup superblock %#04llx (+0)\n",
start);
- ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb +
- 1);
+ ext4_set_bits(bh->b_data, 0, overhead);
}
ext4_mark_bitmap_end(group_data[i].blocks_count,
sb->s_blocksize * 8, bh->b_data);
@@ -822,6 +862,45 @@ exit_bh:
}
/*
+ * add_new_gdb_meta_bg is the sister of add_new_gdb.
+ */
+static int add_new_gdb_meta_bg(struct super_block *sb,
+ handle_t *handle, ext4_group_t group) {
+ ext4_fsblk_t gdblock;
+ struct buffer_head *gdb_bh;
+ struct buffer_head **o_group_desc, **n_group_desc;
+ unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+ int err;
+
+ gdblock = ext4_meta_bg_first_block_no(sb, group) +
+ ext4_bg_has_super(sb, group);
+ gdb_bh = sb_bread(sb, gdblock);
+ if (!gdb_bh)
+ return -EIO;
+ n_group_desc = ext4_kvmalloc((gdb_num + 1) *
+ sizeof(struct buffer_head *),
+ GFP_NOFS);
+ if (!n_group_desc) {
+ err = -ENOMEM;
+ ext4_warning(sb, "not enough memory for %lu groups",
+ gdb_num + 1);
+ return err;
+ }
+
+ o_group_desc = EXT4_SB(sb)->s_group_desc;
+ memcpy(n_group_desc, o_group_desc,
+ EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+ n_group_desc[gdb_num] = gdb_bh;
+ EXT4_SB(sb)->s_group_desc = n_group_desc;
+ EXT4_SB(sb)->s_gdb_count++;
+ ext4_kvfree(o_group_desc);
+ err = ext4_journal_get_write_access(handle, gdb_bh);
+ if (unlikely(err))
+ brelse(gdb_bh);
+ return err;
+}
+
+/*
* Called when we are adding a new group which has a backup copy of each of
* the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
* We need to add these reserved backup GDT blocks to the resize inode, so
@@ -949,16 +1028,16 @@ exit_free:
* do not copy the full number of backups at this time. The resize
* which changed s_groups_count will backup again.
*/
-static void update_backups(struct super_block *sb,
- int blk_off, char *data, int size)
+static void update_backups(struct super_block *sb, int blk_off, char *data,
+ int size, int meta_bg)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- const ext4_group_t last = sbi->s_groups_count;
+ ext4_group_t last;
const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
unsigned three = 1;
unsigned five = 5;
unsigned seven = 7;
- ext4_group_t group;
+ ext4_group_t group = 0;
int rest = sb->s_blocksize - size;
handle_t *handle;
int err = 0, err2;
@@ -970,10 +1049,17 @@ static void update_backups(struct super_block *sb,
goto exit_err;
}
- ext4_superblock_csum_set(sb, (struct ext4_super_block *)data);
+ if (meta_bg == 0) {
+ group = ext4_list_backups(sb, &three, &five, &seven);
+ last = sbi->s_groups_count;
+ } else {
+ group = ext4_meta_bg_first_group(sb, group) + 1;
+ last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
+ }
- while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
+ while (group < sbi->s_groups_count) {
struct buffer_head *bh;
+ ext4_fsblk_t backup_block;
/* Out of journal space, and can't get more - abort - so sad */
if (ext4_handle_valid(handle) &&
@@ -982,13 +1068,20 @@ static void update_backups(struct super_block *sb,
(err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
break;
- bh = sb_getblk(sb, group * bpg + blk_off);
+ if (meta_bg == 0)
+ backup_block = group * bpg + blk_off;
+ else
+ backup_block = (ext4_group_first_block_no(sb, group) +
+ ext4_bg_has_super(sb, group));
+
+ bh = sb_getblk(sb, backup_block);
if (!bh) {
err = -EIO;
break;
}
- ext4_debug("update metadata backup %#04lx\n",
- (unsigned long)bh->b_blocknr);
+ ext4_debug("update metadata backup %llu(+%llu)\n",
+ backup_block, backup_block -
+ ext4_group_first_block_no(sb, group));
if ((err = ext4_journal_get_write_access(handle, bh)))
break;
lock_buffer(bh);
@@ -1001,6 +1094,13 @@ static void update_backups(struct super_block *sb,
if (unlikely(err))
ext4_std_error(sb, err);
brelse(bh);
+
+ if (meta_bg == 0)
+ group = ext4_list_backups(sb, &three, &five, &seven);
+ else if (group == last)
+ break;
+ else
+ group = last;
}
if ((err2 = ext4_journal_stop(handle)) && !err)
err = err2;
@@ -1043,7 +1143,9 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
struct ext4_super_block *es = sbi->s_es;
struct buffer_head *gdb_bh;
int i, gdb_off, gdb_num, err = 0;
+ int meta_bg;
+ meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
for (i = 0; i < count; i++, group++) {
int reserved_gdb = ext4_bg_has_super(sb, group) ?
le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
@@ -1063,8 +1165,11 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
err = reserve_backup_gdb(handle, resize_inode, group);
- } else
+ } else if (meta_bg != 0) {
+ err = add_new_gdb_meta_bg(sb, handle, group);
+ } else {
err = add_new_gdb(handle, resize_inode, group);
+ }
if (err)
break;
}
@@ -1076,17 +1181,12 @@ static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
struct buffer_head *bh = sb_getblk(sb, block);
if (!bh)
return NULL;
-
- if (bitmap_uptodate(bh))
- return bh;
-
- lock_buffer(bh);
- if (bh_submit_read(bh) < 0) {
- unlock_buffer(bh);
- brelse(bh);
- return NULL;
+ if (!bh_uptodate_or_lock(bh)) {
+ if (bh_submit_read(bh) < 0) {
+ brelse(bh);
+ return NULL;
+ }
}
- unlock_buffer(bh);
return bh;
}
@@ -1112,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
bh = ext4_get_bitmap(sb, group_data->block_bitmap);
if (!bh)
return -EIO;
- ext4_block_bitmap_csum_set(sb, group, gdp, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ ext4_block_bitmap_csum_set(sb, group, gdp, bh);
brelse(bh);
return 0;
@@ -1161,6 +1260,9 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
ext4_free_group_clusters_set(sb, gdp,
EXT4_B2C(sbi, group_data->free_blocks_count));
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
+ if (ext4_has_group_desc_csum(sb))
+ ext4_itable_unused_set(sb, gdp,
+ EXT4_INODES_PER_GROUP(sb));
gdp->bg_flags = cpu_to_le16(*bg_flags);
ext4_group_desc_csum_set(sb, group, gdp);
@@ -1216,7 +1318,7 @@ static void ext4_update_super(struct super_block *sb,
}
reserved_blocks = ext4_r_blocks_count(es) * 100;
- do_div(reserved_blocks, ext4_blocks_count(es));
+ reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
reserved_blocks *= blocks_count;
do_div(reserved_blocks, 100);
@@ -1227,6 +1329,7 @@ static void ext4_update_super(struct super_block *sb,
le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
flex_gd->count);
+ ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
/*
* We need to protect s_groups_count against other CPUs seeing
* inconsistent state in the superblock.
@@ -1261,6 +1364,8 @@ static void ext4_update_super(struct super_block *sb,
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
+ ext4_debug("free blocks count %llu",
+ percpu_counter_read(&sbi->s_freeclusters_counter));
if (EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
sbi->s_log_groups_per_flex) {
@@ -1349,16 +1454,24 @@ exit_journal:
err = err2;
if (!err) {
- int i;
+ int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+ int gdb_num_end = ((group + flex_gd->count - 1) /
+ EXT4_DESC_PER_BLOCK(sb));
+ int meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_META_BG);
+ sector_t old_gdb = 0;
+
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
- sizeof(struct ext4_super_block));
- for (i = 0; i < flex_gd->count; i++, group++) {
+ sizeof(struct ext4_super_block), 0);
+ for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh;
- int gdb_num;
- gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb);
+
gdb_bh = sbi->s_group_desc[gdb_num];
+ if (old_gdb == gdb_bh->b_blocknr)
+ continue;
update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
- gdb_bh->b_size);
+ gdb_bh->b_size, meta_bg);
+ old_gdb = gdb_bh->b_blocknr;
}
}
exit:
@@ -1402,9 +1515,7 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
group_data[i].group = group + i;
group_data[i].blocks_count = blocks_per_group;
- overhead = ext4_bg_has_super(sb, group + i) ?
- (1 + ext4_bg_num_gdb(sb, group + i) +
- le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+ overhead = ext4_group_overhead_blocks(sb, group + i);
group_data[i].free_blocks_count = blocks_per_group - overhead;
if (ext4_has_group_desc_csum(sb))
flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
@@ -1492,6 +1603,14 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
if (err)
goto out;
+ err = ext4_alloc_flex_bg_array(sb, input->group + 1);
+ if (err)
+ return err;
+
+ err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
+ if (err)
+ goto out;
+
flex_gd.count = 1;
flex_gd.groups = input;
flex_gd.bg_flags = &bg_flags;
@@ -1544,11 +1663,13 @@ errout:
err = err2;
if (!err) {
+ ext4_fsblk_t first_block;
+ first_block = ext4_group_first_block_no(sb, 0);
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
"blocks\n", ext4_blocks_count(es));
- update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es,
- sizeof(struct ext4_super_block));
+ update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr - first_block,
+ (char *)es, sizeof(struct ext4_super_block), 0);
}
return err;
}
@@ -1631,6 +1752,94 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
return err;
} /* ext4_group_extend */
+
+static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
+{
+ return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
+}
+
+/*
+ * Release the resize inode and drop the resize_inode feature if there
+ * are no more reserved gdt blocks, and then convert the file system
+ * to enable meta_bg
+ */
+static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
+{
+ handle_t *handle;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ ext4_fsblk_t nr;
+ int i, ret, err = 0;
+ int credits = 1;
+
+ ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
+ if (inode) {
+ if (es->s_reserved_gdt_blocks) {
+ ext4_error(sb, "Unexpected non-zero "
+ "s_reserved_gdt_blocks");
+ return -EPERM;
+ }
+
+ /* Do a quick sanity check of the resize inode */
+ if (inode->i_blocks != 1 << (inode->i_blkbits - 9))
+ goto invalid_resize_inode;
+ for (i = 0; i < EXT4_N_BLOCKS; i++) {
+ if (i == EXT4_DIND_BLOCK) {
+ if (ei->i_data[i])
+ continue;
+ else
+ goto invalid_resize_inode;
+ }
+ if (ei->i_data[i])
+ goto invalid_resize_inode;
+ }
+ credits += 3; /* block bitmap, bg descriptor, resize inode */
+ }
+
+ handle = ext4_journal_start_sb(sb, credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+ if (err)
+ goto errout;
+
+ EXT4_CLEAR_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE);
+ EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
+ sbi->s_es->s_first_meta_bg =
+ cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
+
+ err = ext4_handle_dirty_super(handle, sb);
+ if (err) {
+ ext4_std_error(sb, err);
+ goto errout;
+ }
+
+ if (inode) {
+ nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
+ ext4_free_blocks(handle, inode, NULL, nr, 1,
+ EXT4_FREE_BLOCKS_METADATA |
+ EXT4_FREE_BLOCKS_FORGET);
+ ei->i_data[EXT4_DIND_BLOCK] = 0;
+ inode->i_blocks = 0;
+
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (err)
+ ext4_std_error(sb, err);
+ }
+
+errout:
+ ret = ext4_journal_stop(handle);
+ if (!err)
+ err = ret;
+ return ret;
+
+invalid_resize_inode:
+ ext4_error(sb, "corrupted/inconsistent resize inode");
+ return -EINVAL;
+}
+
/*
* ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
*
@@ -1643,21 +1852,31 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
struct buffer_head *bh;
- struct inode *resize_inode;
- ext4_fsblk_t o_blocks_count;
- ext4_group_t o_group;
- ext4_group_t n_group;
- ext4_grpblk_t offset, add;
+ struct inode *resize_inode = NULL;
+ ext4_grpblk_t add, offset;
unsigned long n_desc_blocks;
unsigned long o_desc_blocks;
- unsigned long desc_blocks;
- int err = 0, flexbg_size = 1;
+ ext4_group_t o_group;
+ ext4_group_t n_group;
+ ext4_fsblk_t o_blocks_count;
+ ext4_fsblk_t n_blocks_count_retry = 0;
+ unsigned long last_update_time = 0;
+ int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
+ int meta_bg;
+ /* See if the device is actually as big as what was requested */
+ bh = sb_bread(sb, n_blocks_count - 1);
+ if (!bh) {
+ ext4_warning(sb, "can't read last block, resize aborted");
+ return -ENOSPC;
+ }
+ brelse(bh);
+
+retry:
o_blocks_count = ext4_blocks_count(es);
- if (test_opt(sb, DEBUG))
- ext4_msg(sb, KERN_DEBUG, "resizing filesystem from %llu "
- "to %llu blocks", o_blocks_count, n_blocks_count);
+ ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
+ "to %llu blocks", o_blocks_count, n_blocks_count);
if (n_blocks_count < o_blocks_count) {
/* On-line shrinking not supported */
@@ -1672,32 +1891,49 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
- n_desc_blocks = (n_group + EXT4_DESC_PER_BLOCK(sb)) /
- EXT4_DESC_PER_BLOCK(sb);
- o_desc_blocks = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
- EXT4_DESC_PER_BLOCK(sb);
- desc_blocks = n_desc_blocks - o_desc_blocks;
+ n_desc_blocks = num_desc_blocks(sb, n_group + 1);
+ o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
- if (desc_blocks &&
- (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE) ||
- le16_to_cpu(es->s_reserved_gdt_blocks) < desc_blocks)) {
- ext4_warning(sb, "No reserved GDT blocks, can't resize");
- return -EPERM;
- }
+ meta_bg = EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG);
- resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
- if (IS_ERR(resize_inode)) {
- ext4_warning(sb, "Error opening resize inode");
- return PTR_ERR(resize_inode);
+ if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE)) {
+ if (meta_bg) {
+ ext4_error(sb, "resize_inode and meta_bg enabled "
+ "simultaneously");
+ return -EINVAL;
+ }
+ if (n_desc_blocks > o_desc_blocks +
+ le16_to_cpu(es->s_reserved_gdt_blocks)) {
+ n_blocks_count_retry = n_blocks_count;
+ n_desc_blocks = o_desc_blocks +
+ le16_to_cpu(es->s_reserved_gdt_blocks);
+ n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
+ n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
+ n_group--; /* set to last group number */
+ }
+
+ if (!resize_inode)
+ resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
+ if (IS_ERR(resize_inode)) {
+ ext4_warning(sb, "Error opening resize inode");
+ return PTR_ERR(resize_inode);
+ }
}
- /* See if the device is actually as big as what was requested */
- bh = sb_bread(sb, n_blocks_count - 1);
- if (!bh) {
- ext4_warning(sb, "can't read last block, resize aborted");
- return -ENOSPC;
+ if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
+ err = ext4_convert_meta_bg(sb, resize_inode);
+ if (err)
+ goto out;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
+ if (n_blocks_count_retry) {
+ n_blocks_count = n_blocks_count_retry;
+ n_blocks_count_retry = 0;
+ goto retry;
+ }
}
- brelse(bh);
/* extend the last group */
if (n_group == o_group)
@@ -1710,12 +1946,15 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
goto out;
}
- if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
- es->s_log_groups_per_flex)
- flexbg_size = 1 << es->s_log_groups_per_flex;
+ if (ext4_blocks_count(es) == n_blocks_count)
+ goto out;
- o_blocks_count = ext4_blocks_count(es);
- if (o_blocks_count == n_blocks_count)
+ err = ext4_alloc_flex_bg_array(sb, n_group + 1);
+ if (err)
+ return err;
+
+ err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
+ if (err)
goto out;
flex_gd = alloc_flex_gd(flexbg_size);
@@ -1729,19 +1968,33 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
*/
while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
flexbg_size)) {
- ext4_alloc_group_tables(sb, flex_gd, flexbg_size);
+ if (jiffies - last_update_time > HZ * 10) {
+ if (last_update_time)
+ ext4_msg(sb, KERN_INFO,
+ "resized to %llu blocks",
+ ext4_blocks_count(es));
+ last_update_time = jiffies;
+ }
+ if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
+ break;
err = ext4_flex_group_add(sb, resize_inode, flex_gd);
if (unlikely(err))
break;
}
+ if (!err && n_blocks_count_retry) {
+ n_blocks_count = n_blocks_count_retry;
+ n_blocks_count_retry = 0;
+ free_flex_gd(flex_gd);
+ flex_gd = NULL;
+ goto retry;
+ }
+
out:
if (flex_gd)
free_flex_gd(flex_gd);
-
- iput(resize_inode);
- if (test_opt(sb, DEBUG))
- ext4_msg(sb, KERN_DEBUG, "resized filesystem from %llu "
- "upto %llu blocks", o_blocks_count, n_blocks_count);
+ if (resize_inode != NULL)
+ iput(resize_inode);
+ ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
return err;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 69c55d4e4626..80928f716850 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -143,9 +143,10 @@ int ext4_superblock_csum_verify(struct super_block *sb,
return es->s_checksum == ext4_superblock_csum(sb, es);
}
-void ext4_superblock_csum_set(struct super_block *sb,
- struct ext4_super_block *es)
+void ext4_superblock_csum_set(struct super_block *sb)
{
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
return;
@@ -420,7 +421,7 @@ static void __save_error_info(struct super_block *sb, const char *func,
*/
if (!es->s_error_count)
mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
- es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1);
+ le32_add_cpu(&es->s_error_count, 1);
}
static void save_error_info(struct super_block *sb, const char *func,
@@ -850,7 +851,6 @@ static void ext4_put_super(struct super_block *sb)
flush_workqueue(sbi->dio_unwritten_wq);
destroy_workqueue(sbi->dio_unwritten_wq);
- lock_super(sb);
if (sbi->s_journal) {
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
@@ -917,7 +917,6 @@ static void ext4_put_super(struct super_block *sb)
* Now that we are completely done shutting down the
* superblock, we need to actually destroy the kobject.
*/
- unlock_super(sb);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
if (sbi->s_chksum_driver)
@@ -956,11 +955,10 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->jinode = NULL;
INIT_LIST_HEAD(&ei->i_completed_io_list);
spin_lock_init(&ei->i_completed_io_lock);
- ei->cur_aio_dio = NULL;
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
atomic_set(&ei->i_ioend_count, 0);
- atomic_set(&ei->i_aiodio_unwritten, 0);
+ atomic_set(&ei->i_unwritten, 0);
return &ei->vfs_inode;
}
@@ -1224,6 +1222,7 @@ enum {
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
+ Opt_max_dir_size_kb,
};
static const match_table_t tokens = {
@@ -1297,6 +1296,7 @@ static const match_table_t tokens = {
{Opt_init_itable, "init_itable=%u"},
{Opt_init_itable, "init_itable"},
{Opt_noinit_itable, "noinit_itable"},
+ {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
{Opt_removed, "nocheck"}, /* mount option from ext2/3 */
{Opt_removed, "reservation"}, /* mount option from ext2/3 */
@@ -1477,6 +1477,7 @@ static const struct mount_opts {
{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
+ {Opt_max_dir_size_kb, 0, MOPT_GTE0},
{Opt_err, 0, 0}
};
@@ -1592,6 +1593,8 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
if (!args->from)
arg = EXT4_DEF_LI_WAIT_MULT;
sbi->s_li_wait_mult = arg;
+ } else if (token == Opt_max_dir_size_kb) {
+ sbi->s_max_dir_size_kb = arg;
} else if (token == Opt_stripe) {
sbi->s_stripe = arg;
} else if (m->flags & MOPT_DATAJ) {
@@ -1664,7 +1667,7 @@ static int parse_options(char *options, struct super_block *sb,
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
- args[0].to = args[0].from = 0;
+ args[0].to = args[0].from = NULL;
token = match_token(p, tokens, args);
if (handle_mount_opt(sb, p, token, args, journal_devnum,
journal_ioprio, is_remount) < 0)
@@ -1740,7 +1743,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
static const char *token2str(int token)
{
- static const struct match_token *t;
+ const struct match_token *t;
for (t = tokens; t->token != Opt_err; t++)
if (t->token == token && !strchr(t->pattern, '='))
@@ -1823,6 +1826,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
(sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
+ if (nodefs || sbi->s_max_dir_size_kb)
+ SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
ext4_show_quota_options(seq, sb);
return 0;
@@ -1914,34 +1919,56 @@ done:
return res;
}
+int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct flex_groups *new_groups;
+ int size;
+
+ if (!sbi->s_log_groups_per_flex)
+ return 0;
+
+ size = ext4_flex_group(sbi, ngroup - 1) + 1;
+ if (size <= sbi->s_flex_groups_allocated)
+ return 0;
+
+ size = roundup_pow_of_two(size * sizeof(struct flex_groups));
+ new_groups = ext4_kvzalloc(size, GFP_KERNEL);
+ if (!new_groups) {
+ ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
+ size / (int) sizeof(struct flex_groups));
+ return -ENOMEM;
+ }
+
+ if (sbi->s_flex_groups) {
+ memcpy(new_groups, sbi->s_flex_groups,
+ (sbi->s_flex_groups_allocated *
+ sizeof(struct flex_groups)));
+ ext4_kvfree(sbi->s_flex_groups);
+ }
+ sbi->s_flex_groups = new_groups;
+ sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
+ return 0;
+}
+
static int ext4_fill_flex_info(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL;
- ext4_group_t flex_group_count;
ext4_group_t flex_group;
unsigned int groups_per_flex = 0;
- size_t size;
- int i;
+ int i, err;
sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
sbi->s_log_groups_per_flex = 0;
return 1;
}
- groups_per_flex = 1 << sbi->s_log_groups_per_flex;
-
- /* We allocate both existing and potentially added groups */
- flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
- ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
- EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
- size = flex_group_count * sizeof(struct flex_groups);
- sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
- if (sbi->s_flex_groups == NULL) {
- ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
- flex_group_count);
+ groups_per_flex = 1U << sbi->s_log_groups_per_flex;
+
+ err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
+ if (err)
goto failed;
- }
for (i = 0; i < sbi->s_groups_count; i++) {
gdp = ext4_get_group_desc(sb, i, NULL);
@@ -2144,10 +2171,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
}
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
- if (es->s_last_orphan)
+ /* don't clear list on RO mount w/ errors */
+ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
jbd_debug(1, "Errors on filesystem, "
"clearing orphan list.\n");
- es->s_last_orphan = 0;
+ es->s_last_orphan = 0;
+ }
jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
return;
}
@@ -2528,6 +2557,7 @@ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
static struct attribute *ext4_attrs[] = {
@@ -2543,6 +2573,7 @@ static struct attribute *ext4_attrs[] = {
ATTR_LIST(mb_stream_req),
ATTR_LIST(mb_group_prealloc),
ATTR_LIST(max_writeback_mb_bump),
+ ATTR_LIST(extent_max_zeroout_kb),
ATTR_LIST(trigger_fs_error),
NULL,
};
@@ -2550,10 +2581,12 @@ static struct attribute *ext4_attrs[] = {
/* Features this copy of ext4 supports */
EXT4_INFO_ATTR(lazy_itable_init);
EXT4_INFO_ATTR(batched_discard);
+EXT4_INFO_ATTR(meta_bg_resize);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
+ ATTR_LIST(meta_bg_resize),
NULL,
};
@@ -3374,7 +3407,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* enable delayed allocation by default
* Use -o nodelalloc to turn it off
*/
- if (!IS_EXT3_SB(sb) &&
+ if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) &&
((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
set_opt(sb, DELALLOC);
@@ -3743,6 +3776,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_stripe = ext4_get_stripe_size(sbi);
sbi->s_max_writeback_mb_bump = 128;
+ sbi->s_extent_max_zeroout_kb = 32;
/*
* set up enough so that it can read an inode
@@ -4348,7 +4382,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty");
- ext4_superblock_csum_set(sb, es);
+ ext4_superblock_csum_set(sb);
mark_buffer_dirty(sbh);
if (sync) {
error = sync_dirty_buffer(sbh);
@@ -4519,11 +4553,9 @@ static int ext4_unfreeze(struct super_block *sb)
if (sb->s_flags & MS_RDONLY)
return 0;
- lock_super(sb);
/* Reset the needs_recovery flag before the fs is unlocked. */
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
ext4_commit_super(sb, 1);
- unlock_super(sb);
return 0;
}
@@ -4559,7 +4591,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
char *orig_data = kstrdup(data, GFP_KERNEL);
/* Store the original options */
- lock_super(sb);
old_sb_flags = sb->s_flags;
old_opts.s_mount_opt = sbi->s_mount_opt;
old_opts.s_mount_opt2 = sbi->s_mount_opt2;
@@ -4701,7 +4732,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_journal == NULL)
ext4_commit_super(sb, 1);
- unlock_super(sb);
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
@@ -4714,10 +4744,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
else if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_QUOTA)) {
err = ext4_enable_quotas(sb);
- if (err) {
- lock_super(sb);
+ if (err)
goto restore_opts;
- }
}
}
#endif
@@ -4744,7 +4772,6 @@ restore_opts:
sbi->s_qf_names[i] = old_opts.s_qf_names[i];
}
#endif
- unlock_super(sb);
kfree(orig_data);
return err;
}
@@ -5269,8 +5296,10 @@ static int __init ext4_init_fs(void)
if (err)
goto out6;
ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
- if (!ext4_kset)
+ if (!ext4_kset) {
+ err = -ENOMEM;
goto out5;
+ }
ext4_proc_root = proc_mkdir("fs/ext4", NULL);
err = ext4_init_feat_adverts();
diff --git a/fs/fat/Makefile b/fs/fat/Makefile
index e06190322c1c..964b634f6667 100644
--- a/fs/fat/Makefile
+++ b/fs/fat/Makefile
@@ -6,6 +6,6 @@ obj-$(CONFIG_FAT_FS) += fat.o
obj-$(CONFIG_VFAT_FS) += vfat.o
obj-$(CONFIG_MSDOS_FS) += msdos.o
-fat-y := cache.o dir.o fatent.o file.o inode.o misc.o
+fat-y := cache.o dir.o fatent.o file.o inode.o misc.o nfs.o
vfat-y := namei_vfat.o
msdos-y := namei_msdos.o
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 1cc7038e273d..91ad9e1c9441 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -190,7 +190,8 @@ static void __fat_cache_inval_inode(struct inode *inode)
struct fat_cache *cache;
while (!list_empty(&i->cache_lru)) {
- cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list);
+ cache = list_entry(i->cache_lru.next,
+ struct fat_cache, cache_list);
list_del_init(&cache->cache_list);
i->nr_caches--;
fat_cache_free(cache);
@@ -261,9 +262,10 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
if (nr < 0)
goto out;
else if (nr == FAT_ENT_FREE) {
- fat_fs_error_ratelimit(sb, "%s: invalid cluster chain"
- " (i_pos %lld)", __func__,
- MSDOS_I(inode)->i_pos);
+ fat_fs_error_ratelimit(sb,
+ "%s: invalid cluster chain (i_pos %lld)",
+ __func__,
+ MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
} else if (nr == FAT_ENT_EOF) {
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index dc49ed2cbffa..2a182342442e 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -18,7 +18,7 @@
#include <linux/time.h>
#include <linux/buffer_head.h>
#include <linux/compat.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kernel.h>
#include "fat.h"
@@ -123,7 +123,8 @@ static inline int fat_get_entry(struct inode *dir, loff_t *pos,
{
/* Fast stuff first */
if (*bh && *de &&
- (*de - (struct msdos_dir_entry *)(*bh)->b_data) < MSDOS_SB(dir->i_sb)->dir_per_block - 1) {
+ (*de - (struct msdos_dir_entry *)(*bh)->b_data) <
+ MSDOS_SB(dir->i_sb)->dir_per_block - 1) {
*pos += sizeof(struct msdos_dir_entry);
(*de)++;
return 0;
@@ -155,7 +156,8 @@ static int uni16_to_x8(struct super_block *sb, unsigned char *ascii,
while (*ip && ((len - NLS_MAX_CHARSET_SIZE) > 0)) {
ec = *ip++;
- if ((charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE)) > 0) {
+ charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE);
+ if (charlen > 0) {
op += charlen;
len -= charlen;
} else {
@@ -172,12 +174,12 @@ static int uni16_to_x8(struct super_block *sb, unsigned char *ascii,
}
if (unlikely(*ip)) {
- fat_msg(sb, KERN_WARNING, "filename was truncated while "
- "converting.");
+ fat_msg(sb, KERN_WARNING,
+ "filename was truncated while converting.");
}
*op = 0;
- return (op - ascii);
+ return op - ascii;
}
static inline int fat_uni_to_x8(struct super_block *sb, const wchar_t *uni,
@@ -205,7 +207,8 @@ fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni)
}
static inline int
-fat_short2lower_uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni)
+fat_short2lower_uni(struct nls_table *t, unsigned char *c,
+ int clen, wchar_t *uni)
{
int charlen;
wchar_t wc;
@@ -220,7 +223,8 @@ fat_short2lower_uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *un
if (!nc)
nc = *c;
- if ( (charlen = t->char2uni(&nc, 1, uni)) < 0) {
+ charlen = t->char2uni(&nc, 1, uni);
+ if (charlen < 0) {
*uni = 0x003f; /* a question mark */
charlen = 1;
}
@@ -537,7 +541,6 @@ end_of_dir:
return err;
}
-
EXPORT_SYMBOL_GPL(fat_search_long);
struct fat_ioctl_filldir_callback {
@@ -568,13 +571,14 @@ static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
int short_len = 0, fill_len = 0;
int ret = 0;
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
cpos = filp->f_pos;
/* Fake . and .. for the root directory. */
if (inode->i_ino == MSDOS_ROOT_INO) {
while (cpos < 2) {
- if (filldir(dirent, "..", cpos+1, cpos, MSDOS_ROOT_INO, DT_DIR) < 0)
+ if (filldir(dirent, "..", cpos+1, cpos,
+ MSDOS_ROOT_INO, DT_DIR) < 0)
goto out;
cpos++;
filp->f_pos++;
@@ -689,7 +693,7 @@ fill_failed:
if (unicode)
__putname(unicode);
out:
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return ret;
}
@@ -872,25 +876,26 @@ static int fat_get_short_entry(struct inode *dir, loff_t *pos,
}
/*
- * The ".." entry can not provide the "struct fat_slot_info" informations
- * for inode. So, this function provide the some informations only.
+ * The ".." entry can not provide the "struct fat_slot_info" information
+ * for inode, nor a usable i_pos. So, this function provides some information
+ * only.
+ *
+ * Since this function walks through the on-disk inodes within a directory,
+ * callers are responsible for taking any locks necessary to prevent the
+ * directory from changing.
*/
int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
- struct msdos_dir_entry **de, loff_t *i_pos)
+ struct msdos_dir_entry **de)
{
- loff_t offset;
+ loff_t offset = 0;
- offset = 0;
- *bh = NULL;
+ *de = NULL;
while (fat_get_short_entry(dir, &offset, bh, de) >= 0) {
- if (!strncmp((*de)->name, MSDOS_DOTDOT, MSDOS_NAME)) {
- *i_pos = fat_make_i_pos(dir->i_sb, *bh, *de);
+ if (!strncmp((*de)->name, MSDOS_DOTDOT, MSDOS_NAME))
return 0;
- }
}
return -ENOENT;
}
-
EXPORT_SYMBOL_GPL(fat_get_dotdot_entry);
/* See if directory is empty */
@@ -913,7 +918,6 @@ int fat_dir_empty(struct inode *dir)
brelse(bh);
return result;
}
-
EXPORT_SYMBOL_GPL(fat_dir_empty);
/*
@@ -959,7 +963,6 @@ int fat_scan(struct inode *dir, const unsigned char *name,
}
return -ENOENT;
}
-
EXPORT_SYMBOL_GPL(fat_scan);
static int __fat_remove_entries(struct inode *dir, loff_t pos, int nr_slots)
@@ -1047,7 +1050,6 @@ int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
return 0;
}
-
EXPORT_SYMBOL_GPL(fat_remove_entries);
static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
@@ -1141,10 +1143,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
de[0].ctime_cs = de[1].ctime_cs = 0;
de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = 0;
}
- de[0].start = cpu_to_le16(cluster);
- de[0].starthi = cpu_to_le16(cluster >> 16);
- de[1].start = cpu_to_le16(MSDOS_I(dir)->i_logstart);
- de[1].starthi = cpu_to_le16(MSDOS_I(dir)->i_logstart >> 16);
+ fat_set_start(&de[0], cluster);
+ fat_set_start(&de[1], MSDOS_I(dir)->i_logstart);
de[0].size = de[1].size = 0;
memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
set_buffer_uptodate(bhs[0]);
@@ -1161,7 +1161,6 @@ error_free:
error:
return err;
}
-
EXPORT_SYMBOL_GPL(fat_alloc_new_dir);
static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
@@ -1377,5 +1376,4 @@ error_remove:
__fat_remove_entries(dir, pos, free_slots);
return err;
}
-
EXPORT_SYMBOL_GPL(fat_add_entries);
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 7d8e0dcac5d5..623f36f0423b 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -5,6 +5,7 @@
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/fs.h>
+#include <linux/hash.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/msdos_fs.h>
@@ -27,26 +28,27 @@ struct fat_mount_options {
kgid_t fs_gid;
unsigned short fs_fmask;
unsigned short fs_dmask;
- unsigned short codepage; /* Codepage for shortname conversions */
- char *iocharset; /* Charset used for filename input/display */
- unsigned short shortname; /* flags for shortname display/create rule */
- unsigned char name_check; /* r = relaxed, n = normal, s = strict */
- unsigned char errors; /* On error: continue, panic, remount-ro */
+ unsigned short codepage; /* Codepage for shortname conversions */
+ char *iocharset; /* Charset used for filename input/display */
+ unsigned short shortname; /* flags for shortname display/create rule */
+ unsigned char name_check; /* r = relaxed, n = normal, s = strict */
+ unsigned char errors; /* On error: continue, panic, remount-ro */
unsigned short allow_utime;/* permission for setting the [am]time */
- unsigned quiet:1, /* set = fake successful chmods and chowns */
- showexec:1, /* set = only set x bit for com/exe/bat */
- sys_immutable:1, /* set = system files are immutable */
- dotsOK:1, /* set = hidden and system files are named '.filename' */
- isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
- utf8:1, /* Use of UTF-8 character set (Default) */
- unicode_xlate:1, /* create escape sequences for unhandled Unicode */
- numtail:1, /* Does first alias have a numeric '~1' type tail? */
- flush:1, /* write things quickly */
- nocase:1, /* Does this need case conversion? 0=need case conversion*/
- usefree:1, /* Use free_clusters for FAT32 */
- tz_utc:1, /* Filesystem timestamps are in UTC */
- rodir:1, /* allow ATTR_RO for directory */
- discard:1; /* Issue discard requests on deletions */
+ unsigned quiet:1, /* set = fake successful chmods and chowns */
+ showexec:1, /* set = only set x bit for com/exe/bat */
+ sys_immutable:1, /* set = system files are immutable */
+ dotsOK:1, /* set = hidden and system files are named '.filename' */
+ isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
+ utf8:1, /* Use of UTF-8 character set (Default) */
+ unicode_xlate:1, /* create escape sequences for unhandled Unicode */
+ numtail:1, /* Does first alias have a numeric '~1' type tail? */
+ flush:1, /* write things quickly */
+ nocase:1, /* Does this need case conversion? 0=need case conversion*/
+ usefree:1, /* Use free_clusters for FAT32 */
+ tz_utc:1, /* Filesystem timestamps are in UTC */
+ rodir:1, /* allow ATTR_RO for directory */
+ discard:1, /* Issue discard requests on deletions */
+ nfs:1; /* Do extra work needed for NFS export */
};
#define FAT_HASH_BITS 8
@@ -56,28 +58,29 @@ struct fat_mount_options {
* MS-DOS file system in-core superblock data
*/
struct msdos_sb_info {
- unsigned short sec_per_clus; /* sectors/cluster */
- unsigned short cluster_bits; /* log2(cluster_size) */
- unsigned int cluster_size; /* cluster size */
- unsigned char fats,fat_bits; /* number of FATs, FAT bits (12 or 16) */
+ unsigned short sec_per_clus; /* sectors/cluster */
+ unsigned short cluster_bits; /* log2(cluster_size) */
+ unsigned int cluster_size; /* cluster size */
+ unsigned char fats, fat_bits; /* number of FATs, FAT bits (12 or 16) */
unsigned short fat_start;
- unsigned long fat_length; /* FAT start & length (sec.) */
+ unsigned long fat_length; /* FAT start & length (sec.) */
unsigned long dir_start;
- unsigned short dir_entries; /* root dir start & entries */
- unsigned long data_start; /* first data sector */
- unsigned long max_cluster; /* maximum cluster number */
- unsigned long root_cluster; /* first cluster of the root directory */
- unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
+ unsigned short dir_entries; /* root dir start & entries */
+ unsigned long data_start; /* first data sector */
+ unsigned long max_cluster; /* maximum cluster number */
+ unsigned long root_cluster; /* first cluster of the root directory */
+ unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
struct mutex fat_lock;
+ struct mutex s_lock;
unsigned int prev_free; /* previously allocated cluster number */
unsigned int free_clusters; /* -1 if undefined */
unsigned int free_clus_valid; /* is free_clusters valid? */
struct fat_mount_options options;
- struct nls_table *nls_disk; /* Codepage used on disk */
- struct nls_table *nls_io; /* Charset used for input and display */
- const void *dir_ops; /* Opaque; default directory operations */
- int dir_per_block; /* dir entries per block */
- int dir_per_block_bits; /* log2(dir_per_block) */
+ struct nls_table *nls_disk; /* Codepage used on disk */
+ struct nls_table *nls_io; /* Charset used for input and display */
+ const void *dir_ops; /* Opaque; default directory operations */
+ int dir_per_block; /* dir entries per block */
+ int dir_per_block_bits; /* log2(dir_per_block) */
int fatent_shift;
struct fatent_operations *fatent_ops;
@@ -88,6 +91,9 @@ struct msdos_sb_info {
spinlock_t inode_hash_lock;
struct hlist_head inode_hashtable[FAT_HASH_SIZE];
+
+ spinlock_t dir_hash_lock;
+ struct hlist_head dir_hashtable[FAT_HASH_SIZE];
};
#define FAT_CACHE_VALID 0 /* special case for valid cache */
@@ -110,6 +116,7 @@ struct msdos_inode_info {
int i_attrs; /* unused attribute bits */
loff_t i_pos; /* on-disk position of directory entry or 0 */
struct hlist_node i_fat_hash; /* hash by i_location */
+ struct hlist_node i_dir_hash; /* hash by i_logstart */
struct rw_semaphore truncate_lock; /* protect bmap against truncate */
struct inode vfs_inode;
};
@@ -262,7 +269,7 @@ extern int fat_subdirs(struct inode *dir);
extern int fat_scan(struct inode *dir, const unsigned char *name,
struct fat_slot_info *sinfo);
extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
- struct msdos_dir_entry **de, loff_t *i_pos);
+ struct msdos_dir_entry **de);
extern int fat_alloc_new_dir(struct inode *dir, struct timespec *ts);
extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
struct fat_slot_info *sinfo);
@@ -322,7 +329,7 @@ extern long fat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern const struct file_operations fat_file_operations;
extern const struct inode_operations fat_file_inode_operations;
-extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
+extern int fat_setattr(struct dentry *dentry, struct iattr *attr);
extern void fat_truncate_blocks(struct inode *inode, loff_t offset);
extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
@@ -340,7 +347,12 @@ extern int fat_fill_super(struct super_block *sb, void *data, int silent,
int isvfat, void (*setup)(struct super_block *));
extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2);
+ struct inode *i2);
+static inline unsigned long fat_dir_hash(int logstart)
+{
+ return hash_32(logstart, FAT_HASH_BITS);
+}
+
/* fat/misc.c */
extern __printf(3, 4) __cold
void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
@@ -366,6 +378,14 @@ extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
int fat_cache_init(void);
void fat_cache_destroy(void);
+/* fat/nfs.c */
+struct fid;
+extern struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type);
+extern struct dentry *fat_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type);
+extern struct dentry *fat_get_parent(struct dentry *child_dir);
+
/* helper for printk */
typedef unsigned long long llu;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 31f08ab62c56..260705c58062 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -186,9 +186,6 @@ static void fat16_ent_put(struct fat_entry *fatent, int new)
static void fat32_ent_put(struct fat_entry *fatent, int new)
{
- if (new == FAT_ENT_EOF)
- new = EOF_FAT32;
-
WARN_ON(new & 0xf0000000);
new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
*fatent->u.ent32_p = cpu_to_le32(new);
@@ -203,15 +200,18 @@ static int fat12_ent_next(struct fat_entry *fatent)
fatent->entry++;
if (fatent->nr_bhs == 1) {
- WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 2)));
- WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
+ WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
+ (bhs[0]->b_size - 2)));
+ WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
+ (bhs[0]->b_size - 1)));
if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
ent12_p[0] = nextp - 1;
ent12_p[1] = nextp;
return 1;
}
} else {
- WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
+ WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
+ (bhs[0]->b_size - 1)));
WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
ent12_p[0] = nextp - 1;
ent12_p[1] = nextp;
@@ -631,7 +631,6 @@ error:
return err;
}
-
EXPORT_SYMBOL_GPL(fat_free_clusters);
/* 128kb is the whole sectors for FAT12 and FAT16 */
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 4e5a6ac54ebd..5bafaad00530 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -281,15 +281,42 @@ static inline unsigned long fat_hash(loff_t i_pos)
return hash_32(i_pos, FAT_HASH_BITS);
}
+static void dir_hash_init(struct super_block *sb)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ int i;
+
+ spin_lock_init(&sbi->dir_hash_lock);
+ for (i = 0; i < FAT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&sbi->dir_hashtable[i]);
+}
+
void fat_attach(struct inode *inode, loff_t i_pos)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
- struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos);
- spin_lock(&sbi->inode_hash_lock);
- MSDOS_I(inode)->i_pos = i_pos;
- hlist_add_head(&MSDOS_I(inode)->i_fat_hash, head);
- spin_unlock(&sbi->inode_hash_lock);
+ if (inode->i_ino != MSDOS_ROOT_INO) {
+ struct hlist_head *head = sbi->inode_hashtable
+ + fat_hash(i_pos);
+
+ spin_lock(&sbi->inode_hash_lock);
+ MSDOS_I(inode)->i_pos = i_pos;
+ hlist_add_head(&MSDOS_I(inode)->i_fat_hash, head);
+ spin_unlock(&sbi->inode_hash_lock);
+ }
+
+ /* If NFS support is enabled, cache the mapping of start cluster
+ * to directory inode. This is used during reconnection of
+ * dentries to the filesystem root.
+ */
+ if (S_ISDIR(inode->i_mode) && sbi->options.nfs) {
+ struct hlist_head *d_head = sbi->dir_hashtable;
+ d_head += fat_dir_hash(MSDOS_I(inode)->i_logstart);
+
+ spin_lock(&sbi->dir_hash_lock);
+ hlist_add_head(&MSDOS_I(inode)->i_dir_hash, d_head);
+ spin_unlock(&sbi->dir_hash_lock);
+ }
}
EXPORT_SYMBOL_GPL(fat_attach);
@@ -300,6 +327,12 @@ void fat_detach(struct inode *inode)
MSDOS_I(inode)->i_pos = 0;
hlist_del_init(&MSDOS_I(inode)->i_fat_hash);
spin_unlock(&sbi->inode_hash_lock);
+
+ if (S_ISDIR(inode->i_mode) && sbi->options.nfs) {
+ spin_lock(&sbi->dir_hash_lock);
+ hlist_del_init(&MSDOS_I(inode)->i_dir_hash);
+ spin_unlock(&sbi->dir_hash_lock);
+ }
}
EXPORT_SYMBOL_GPL(fat_detach);
@@ -504,6 +537,7 @@ static void init_once(void *foo)
ei->cache_valid_id = FAT_CACHE_VALID + 1;
INIT_LIST_HEAD(&ei->cache_lru);
INIT_HLIST_NODE(&ei->i_fat_hash);
+ INIT_HLIST_NODE(&ei->i_dir_hash);
inode_init_once(&ei->vfs_inode);
}
@@ -639,9 +673,9 @@ static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
if (inode->i_ino == MSDOS_FSINFO_INO) {
struct super_block *sb = inode->i_sb;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = fat_clusters_flush(sb);
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
} else
err = __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
@@ -668,125 +702,9 @@ static const struct super_operations fat_sops = {
.show_options = fat_show_options,
};
-/*
- * a FAT file handle with fhtype 3 is
- * 0/ i_ino - for fast, reliable lookup if still in the cache
- * 1/ i_generation - to see if i_ino is still valid
- * bit 0 == 0 iff directory
- * 2/ i_pos(8-39) - if ino has changed, but still in cache
- * 3/ i_pos(4-7)|i_logstart - to semi-verify inode found at i_pos
- * 4/ i_pos(0-3)|parent->i_logstart - maybe used to hunt for the file on disc
- *
- * Hack for NFSv2: Maximum FAT entry number is 28bits and maximum
- * i_pos is 40bits (blocknr(32) + dir offset(8)), so two 4bits
- * of i_logstart is used to store the directory entry offset.
- */
-
-static struct dentry *fat_fh_to_dentry(struct super_block *sb,
- struct fid *fid, int fh_len, int fh_type)
-{
- struct inode *inode = NULL;
- u32 *fh = fid->raw;
-
- if (fh_len < 5 || fh_type != 3)
- return NULL;
-
- inode = ilookup(sb, fh[0]);
- if (!inode || inode->i_generation != fh[1]) {
- if (inode)
- iput(inode);
- inode = NULL;
- }
- if (!inode) {
- loff_t i_pos;
- int i_logstart = fh[3] & 0x0fffffff;
-
- i_pos = (loff_t)fh[2] << 8;
- i_pos |= ((fh[3] >> 24) & 0xf0) | (fh[4] >> 28);
-
- /* try 2 - see if i_pos is in F-d-c
- * require i_logstart to be the same
- * Will fail if you truncate and then re-write
- */
-
- inode = fat_iget(sb, i_pos);
- if (inode && MSDOS_I(inode)->i_logstart != i_logstart) {
- iput(inode);
- inode = NULL;
- }
- }
-
- /*
- * For now, do nothing if the inode is not found.
- *
- * What we could do is:
- *
- * - follow the file starting at fh[4], and record the ".." entry,
- * and the name of the fh[2] entry.
- * - then follow the ".." file finding the next step up.
- *
- * This way we build a path to the root of the tree. If this works, we
- * lookup the path and so get this inode into the cache. Finally try
- * the fat_iget lookup again. If that fails, then we are totally out
- * of luck. But all that is for another day
- */
- return d_obtain_alias(inode);
-}
-
-static int
-fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
-{
- int len = *lenp;
- struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
- loff_t i_pos;
-
- if (len < 5) {
- *lenp = 5;
- return 255; /* no room */
- }
-
- i_pos = fat_i_pos_read(sbi, inode);
- *lenp = 5;
- fh[0] = inode->i_ino;
- fh[1] = inode->i_generation;
- fh[2] = i_pos >> 8;
- fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart;
- fh[4] = (i_pos & 0x0f) << 28;
- if (parent)
- fh[4] |= MSDOS_I(parent)->i_logstart;
- return 3;
-}
-
-static struct dentry *fat_get_parent(struct dentry *child)
-{
- struct super_block *sb = child->d_sb;
- struct buffer_head *bh;
- struct msdos_dir_entry *de;
- loff_t i_pos;
- struct dentry *parent;
- struct inode *inode;
- int err;
-
- lock_super(sb);
-
- err = fat_get_dotdot_entry(child->d_inode, &bh, &de, &i_pos);
- if (err) {
- parent = ERR_PTR(err);
- goto out;
- }
- inode = fat_build_inode(sb, de, i_pos);
- brelse(bh);
-
- parent = d_obtain_alias(inode);
-out:
- unlock_super(sb);
-
- return parent;
-}
-
static const struct export_operations fat_export_ops = {
- .encode_fh = fat_encode_fh,
.fh_to_dentry = fat_fh_to_dentry,
+ .fh_to_parent = fat_fh_to_parent,
.get_parent = fat_get_parent,
};
@@ -836,6 +754,8 @@ static int fat_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",usefree");
if (opts->quiet)
seq_puts(m, ",quiet");
+ if (opts->nfs)
+ seq_puts(m, ",nfs");
if (opts->showexec)
seq_puts(m, ",showexec");
if (opts->sys_immutable)
@@ -880,7 +800,7 @@ enum {
Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
Opt_obsolete, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
- Opt_err_panic, Opt_err_ro, Opt_discard, Opt_err,
+ Opt_err_panic, Opt_err_ro, Opt_discard, Opt_nfs, Opt_err,
};
static const match_table_t fat_tokens = {
@@ -909,6 +829,7 @@ static const match_table_t fat_tokens = {
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_discard, "discard"},
+ {Opt_nfs, "nfs"},
{Opt_obsolete, "conv=binary"},
{Opt_obsolete, "conv=text"},
{Opt_obsolete, "conv=auto"},
@@ -989,6 +910,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
opts->numtail = 1;
opts->usefree = opts->nocase = 0;
opts->tz_utc = 0;
+ opts->nfs = 0;
opts->errors = FAT_ERRORS_RO;
*debug = 0;
@@ -1153,6 +1075,9 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
case Opt_discard:
opts->discard = 1;
break;
+ case Opt_nfs:
+ opts->nfs = 1;
+ break;
/* obsolete mount options */
case Opt_obsolete:
@@ -1343,6 +1268,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
b = (struct fat_boot_sector *) bh->b_data;
}
+ mutex_init(&sbi->s_lock);
sbi->cluster_size = sb->s_blocksize * sbi->sec_per_clus;
sbi->cluster_bits = ffs(sbi->cluster_size) - 1;
sbi->fats = b->fats;
@@ -1443,6 +1369,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
/* set up enough so that it can read an inode */
fat_hash_init(sb);
+ dir_hash_init(sb);
fat_ent_access_init(sb);
/*
@@ -1497,6 +1424,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
}
error = -ENOMEM;
insert_inode_hash(root_inode);
+ fat_attach(root_inode, 0);
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
fat_msg(sb, KERN_ERR, "get root inode failed");
@@ -1536,18 +1464,14 @@ static int writeback_inode(struct inode *inode)
{
int ret;
- struct address_space *mapping = inode->i_mapping;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = 0,
- };
- /* if we used WB_SYNC_ALL, sync_inode waits for the io for the
- * inode to finish. So WB_SYNC_NONE is sent down to sync_inode
+
+ /* if we used wait=1, sync_inode_metadata waits for the io for the
+ * inode to finish. So wait=0 is sent down to sync_inode_metadata
* and filemap_fdatawrite is used for the data blocks
*/
- ret = sync_inode(inode, &wbc);
+ ret = sync_inode_metadata(inode, 0);
if (!ret)
- ret = filemap_fdatawrite(mapping);
+ ret = filemap_fdatawrite(inode->i_mapping);
return ret;
}
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index b0e12bf9f4a1..e2cfda94a28d 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -208,7 +208,7 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
switch (err) {
case -ENOENT:
@@ -221,7 +221,7 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
default:
inode = ERR_PTR(err);
}
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return d_splice_alias(inode, dentry);
}
@@ -273,7 +273,7 @@ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
unsigned char msdos_name[MSDOS_NAME];
int err, is_hid;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_format_name(dentry->d_name.name, dentry->d_name.len,
msdos_name, &MSDOS_SB(sb)->options);
@@ -302,7 +302,7 @@ static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
d_instantiate(dentry, inode);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, dir, inode);
return err;
@@ -316,7 +316,7 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
struct fat_slot_info sinfo;
int err;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
/*
* Check whether the directory is not in use, then check
* whether it is empty.
@@ -337,7 +337,7 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_ctime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, dir, inode);
@@ -354,7 +354,7 @@ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct timespec ts;
int err, is_hid, cluster;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_format_name(dentry->d_name.name, dentry->d_name.len,
msdos_name, &MSDOS_SB(sb)->options);
@@ -392,14 +392,14 @@ static int msdos_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
d_instantiate(dentry, inode);
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
fat_flush_inodes(sb, dir, inode);
return 0;
out_free:
fat_free_clusters(dir, cluster);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
@@ -407,11 +407,11 @@ out:
static int msdos_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- struct super_block *sb= inode->i_sb;
+ struct super_block *sb = inode->i_sb;
struct fat_slot_info sinfo;
int err;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo);
if (err)
goto out;
@@ -423,7 +423,7 @@ static int msdos_unlink(struct inode *dir, struct dentry *dentry)
inode->i_ctime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, dir, inode);
@@ -440,7 +440,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec ts;
- loff_t dotdot_i_pos, new_i_pos;
+ loff_t new_i_pos;
int err, old_attrs, is_dir, update_dotdot, corrupt = 0;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
@@ -456,8 +456,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
is_dir = S_ISDIR(old_inode->i_mode);
update_dotdot = (is_dir && old_dir != new_dir);
if (update_dotdot) {
- if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de,
- &dotdot_i_pos) < 0) {
+ if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de)) {
err = -EIO;
goto out;
}
@@ -607,7 +606,7 @@ static int msdos_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned char old_msdos_name[MSDOS_NAME], new_msdos_name[MSDOS_NAME];
int err, is_hid;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = msdos_format_name(old_dentry->d_name.name,
old_dentry->d_name.len, old_msdos_name,
@@ -626,7 +625,7 @@ static int msdos_rename(struct inode *old_dir, struct dentry *old_dentry,
err = do_msdos_rename(old_dir, old_msdos_name, old_dentry,
new_dir, new_msdos_name, new_dentry, is_hid);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!err)
err = fat_flush_inodes(sb, old_dir, new_dir);
return err;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6a6d8c0715a1..ac959d655e7d 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -721,7 +721,7 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *alias;
int err;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err) {
@@ -752,13 +752,13 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
if (!S_ISDIR(inode->i_mode))
d_move(alias, dentry);
iput(inode);
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return alias;
} else
dput(alias);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
dentry->d_time = dentry->d_parent->d_inode->i_version;
dentry = d_splice_alias(inode, dentry);
if (dentry)
@@ -766,7 +766,7 @@ out:
return dentry;
error:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return ERR_PTR(err);
}
@@ -779,7 +779,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
struct timespec ts;
int err;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
ts = CURRENT_TIME_SEC;
err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
@@ -800,7 +800,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
@@ -811,7 +811,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
struct fat_slot_info sinfo;
int err;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = fat_dir_empty(inode);
if (err)
@@ -829,7 +829,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
@@ -841,7 +841,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
struct fat_slot_info sinfo;
int err;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err)
@@ -854,7 +854,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
@@ -867,7 +867,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct timespec ts;
int err, cluster;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
ts = CURRENT_TIME_SEC;
cluster = fat_alloc_new_dir(dir, &ts);
@@ -896,13 +896,13 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return 0;
out_free:
fat_free_clusters(dir, cluster);
out:
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
}
@@ -914,14 +914,14 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec ts;
- loff_t dotdot_i_pos, new_i_pos;
+ loff_t new_i_pos;
int err, is_dir, update_dotdot, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
- lock_super(sb);
+ mutex_lock(&MSDOS_SB(sb)->s_lock);
err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo);
if (err)
goto out;
@@ -929,8 +929,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
is_dir = S_ISDIR(old_inode->i_mode);
update_dotdot = (is_dir && old_dir != new_dir);
if (update_dotdot) {
- if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de,
- &dotdot_i_pos) < 0) {
+ if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de)) {
err = -EIO;
goto out;
}
@@ -997,7 +996,7 @@ out:
brelse(sinfo.bh);
brelse(dotdot_bh);
brelse(old_sinfo.bh);
- unlock_super(sb);
+ mutex_unlock(&MSDOS_SB(sb)->s_lock);
return err;
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
new file mode 100644
index 000000000000..ef4b5faba87b
--- /dev/null
+++ b/fs/fat/nfs.c
@@ -0,0 +1,101 @@
+/* fs/fat/nfs.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/exportfs.h>
+#include "fat.h"
+
+/**
+ * Look up a directory inode given its starting cluster.
+ */
+static struct inode *fat_dget(struct super_block *sb, int i_logstart)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ struct hlist_head *head;
+ struct hlist_node *_p;
+ struct msdos_inode_info *i;
+ struct inode *inode = NULL;
+
+ head = sbi->dir_hashtable + fat_dir_hash(i_logstart);
+ spin_lock(&sbi->dir_hash_lock);
+ hlist_for_each_entry(i, _p, head, i_dir_hash) {
+ BUG_ON(i->vfs_inode.i_sb != sb);
+ if (i->i_logstart != i_logstart)
+ continue;
+ inode = igrab(&i->vfs_inode);
+ if (inode)
+ break;
+ }
+ spin_unlock(&sbi->dir_hash_lock);
+ return inode;
+}
+
+static struct inode *fat_nfs_get_inode(struct super_block *sb,
+ u64 ino, u32 generation)
+{
+ struct inode *inode;
+
+ if ((ino < MSDOS_ROOT_INO) || (ino == MSDOS_FSINFO_INO))
+ return NULL;
+
+ inode = ilookup(sb, ino);
+ if (inode && generation && (inode->i_generation != generation)) {
+ iput(inode);
+ inode = NULL;
+ }
+
+ return inode;
+}
+
+/**
+ * Map a NFS file handle to a corresponding dentry.
+ * The dentry may or may not be connected to the filesystem root.
+ */
+struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
+ fat_nfs_get_inode);
+}
+
+/*
+ * Find the parent for a file specified by NFS handle.
+ * This requires that the handle contain the i_ino of the parent.
+ */
+struct dentry *fat_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+ fat_nfs_get_inode);
+}
+
+/*
+ * Find the parent for a directory that is not currently connected to
+ * the filesystem root.
+ *
+ * On entry, the caller holds child_dir->d_inode->i_mutex.
+ */
+struct dentry *fat_get_parent(struct dentry *child_dir)
+{
+ struct super_block *sb = child_dir->d_sb;
+ struct buffer_head *bh = NULL;
+ struct msdos_dir_entry *de;
+ struct inode *parent_inode = NULL;
+
+ if (!fat_get_dotdot_entry(child_dir->d_inode, &bh, &de)) {
+ int parent_logstart = fat_get_start(MSDOS_SB(sb), de);
+ parent_inode = fat_dget(sb, parent_logstart);
+ }
+ brelse(bh);
+
+ return d_obtain_alias(parent_inode);
+}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8f704291d4ed..71a600a19f06 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -258,7 +258,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
err = f_dupfd(arg, filp, 0);
break;
case F_DUPFD_CLOEXEC:
- err = f_dupfd(arg, filp, FD_CLOEXEC);
+ err = f_dupfd(arg, filp, O_CLOEXEC);
break;
case F_GETFD:
err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
diff --git a/fs/file.c b/fs/file.c
index 0f1bda4bebfa..d3b5fa80b71b 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -922,6 +922,9 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
if ((flags & ~O_CLOEXEC) != 0)
return -EINVAL;
+ if (unlikely(oldfd == newfd))
+ return -EINVAL;
+
if (newfd >= rlimit(RLIMIT_NOFILE))
return -EMFILE;
diff --git a/fs/file_table.c b/fs/file_table.c
index dac67923330f..a72bf9ddd0d2 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -36,7 +36,7 @@ struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
-DEFINE_LGLOCK(files_lglock);
+DEFINE_STATIC_LGLOCK(files_lglock);
/* SLAB cache for file structures */
static struct kmem_cache *filp_cachep __read_mostly;
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 96f24286667a..da165f6adcbf 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(unregister_filesystem);
static int fs_index(const char __user * __name)
{
struct file_system_type * tmp;
- char * name;
+ struct filename *name;
int err, index;
name = getname(__name);
@@ -135,7 +135,7 @@ static int fs_index(const char __user * __name)
err = -EINVAL;
read_lock(&file_systems_lock);
for (tmp=file_systems, index=0 ; tmp ; tmp=tmp->next, index++) {
- if (strcmp(tmp->name,name) == 0) {
+ if (strcmp(tmp->name, name->name) == 0) {
err = index;
break;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6d46c0d78338..51ea267d444c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -63,6 +63,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
{
return test_bit(BDI_writeback_running, &bdi->state);
}
+EXPORT_SYMBOL(writeback_in_progress);
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
@@ -248,7 +249,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
}
/*
- * Move expired (dirtied after work->older_than_this) dirty inodes from
+ * Move expired (dirtied before work->older_than_this) dirty inodes from
* @delaying_queue to @dispatch_queue.
*/
static int move_expired_inodes(struct list_head *delaying_queue,
@@ -438,8 +439,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* setting I_SYNC flag and calling inode_sync_complete() to clear it.
*/
static int
-__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
- struct writeback_control *wbc)
+__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
{
struct address_space *mapping = inode->i_mapping;
long nr_to_write = wbc->nr_to_write;
@@ -526,7 +526,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
inode->i_state |= I_SYNC;
spin_unlock(&inode->i_lock);
- ret = __writeback_single_inode(inode, wb, wbc);
+ ret = __writeback_single_inode(inode, wbc);
spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
@@ -669,7 +669,7 @@ static long writeback_sb_inodes(struct super_block *sb,
* We use I_SYNC to pin the inode in memory. While it is set
* evict_inode() will wait so the inode cannot be freed.
*/
- __writeback_single_inode(inode, wb, &wbc);
+ __writeback_single_inode(inode, &wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote += write_chunk - wbc.nr_to_write;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index aba15f1b7ad2..78d2837bc940 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1379,6 +1379,7 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
.close = fuse_vma_close,
.fault = filemap_fault,
.page_mkwrite = fuse_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index e8ed6d4a6181..4767774a5f3e 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -161,6 +161,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
case GFS2_SMALL_FH_SIZE:
case GFS2_LARGE_FH_SIZE:
case GFS2_OLD_FH_SIZE:
+ if (fh_len < GFS2_SMALL_FH_SIZE)
+ return NULL;
this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
this.no_formal_ino |= be32_to_cpu(fh[1]);
this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
@@ -180,6 +182,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
switch (fh_type) {
case GFS2_LARGE_FH_SIZE:
case GFS2_OLD_FH_SIZE:
+ if (fh_len < GFS2_LARGE_FH_SIZE)
+ return NULL;
parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
parent.no_formal_ino |= be32_to_cpu(fh[5]);
parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 30e21997a1a1..0def0504afc1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -492,6 +492,7 @@ out:
static const struct vm_operations_struct gfs2_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = gfs2_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
/**
@@ -526,7 +527,6 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
return error;
}
vma->vm_ops = &gfs2_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 1fe731337f07..9c88da0e855a 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -1,7 +1,7 @@
#ifndef __UM_FS_HOSTFS
#define __UM_FS_HOSTFS
-#include "os.h"
+#include <os.h>
/*
* These are exactly the same definitions as in fs.h, but the names are
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 6c9f3a9d5e21..457addc5c91f 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -16,8 +16,8 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include "hostfs.h"
-#include "init.h"
-#include "kern.h"
+#include <init.h>
+#include <kern.h>
struct hostfs_inode_info {
int fd;
@@ -848,9 +848,11 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_size != i_size_read(inode)) {
int error;
- error = vmtruncate(inode, attr->ia_size);
- if (err)
- return err;
+ error = inode_newsize_ok(inode, attr->ia_size);
+ if (error)
+ return error;
+
+ truncate_setsize(inode, attr->ia_size);
}
setattr_copy(inode, attr);
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index a74ad0d371c2..67838f3aa20a 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -15,7 +15,6 @@
#include <sys/types.h>
#include <sys/vfs.h>
#include "hostfs.h"
-#include "os.h"
#include <utime.h>
static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index 4bae4a4a60b1..2d5b254ad9e2 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -102,7 +102,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
return -1;
}
if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
- btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1);
+ le32_add_cpu(&btree->u.external[n].length, 1);
mark_buffer_dirty(bh);
brelse(bh);
return se;
@@ -153,7 +153,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
btree = &anode->btree;
}
btree->n_free_nodes--; n = btree->n_used_nodes++;
- btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12);
+ le16_add_cpu(&btree->first_free, 12);
btree->u.external[n].disk_secno = cpu_to_le32(se);
btree->u.external[n].file_secno = cpu_to_le32(fs);
btree->u.external[n].length = cpu_to_le32(1);
@@ -174,7 +174,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
}
if (btree->n_free_nodes) {
btree->n_free_nodes--; n = btree->n_used_nodes++;
- btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8);
+ le16_add_cpu(&btree->first_free, 8);
btree->u.internal[n].file_secno = cpu_to_le32(-1);
btree->u.internal[n].down = cpu_to_le32(na);
btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 3228c524ebe5..4364b2a02c5d 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -145,10 +145,10 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
}
}
if (ptr) {
- d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4);
+ le32_add_cpu(&d->first_free, 4);
if (le32_to_cpu(d->first_free) > 2048) {
hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self));
- d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4);
+ le32_add_cpu(&d->first_free, -4);
return;
}
de->length = cpu_to_le16(36);
@@ -184,7 +184,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
de->not_8x3 = hpfs_is_name_long(name, namelen);
de->namelen = namelen;
memcpy(de->name, name, namelen);
- d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size);
+ le32_add_cpu(&d->first_free, d_size);
return de;
}
@@ -314,7 +314,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
de = de_next_de(de);
memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de);
- nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20));
+ le32_add_cpu(&nd->first_free, -((char *)de - (char *)nd - 20));
memcpy(d, nd, le32_to_cpu(nd->first_free));
for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos);
fix_up_ptrs(i->i_sb, ad);
@@ -474,8 +474,8 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
hpfs_brelse4(&qbh);
return 0;
}
- dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4);
- de->length = cpu_to_le16(le16_to_cpu(de->length) - 4);
+ le32_add_cpu(&dnode->first_free, -4);
+ le16_add_cpu(&de->length, -4);
de->down = 0;
hpfs_mark_4buffers_dirty(&qbh);
dno = up;
@@ -570,8 +570,8 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p);
if (!down) {
de->down = 0;
- de->length = cpu_to_le16(le16_to_cpu(de->length) - 4);
- dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4);
+ le16_add_cpu(&de->length, -4);
+ le32_add_cpu(&dnode->first_free, -4);
memmove(de_next_de(de), (char *)de_next_de(de) + 4,
(char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de));
} else {
@@ -647,14 +647,14 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n");
printk("HPFS: warning: goin'on\n");
}
- del->length = cpu_to_le16(le16_to_cpu(del->length) + 4);
+ le16_add_cpu(&del->length, 4);
del->down = 1;
- d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4);
+ le32_add_cpu(&d1->first_free, 4);
}
if (dlp && !down) {
- del->length = cpu_to_le16(le16_to_cpu(del->length) - 4);
+ le16_add_cpu(&del->length, -4);
del->down = 0;
- d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4);
+ le32_add_cpu(&d1->first_free, -4);
} else if (down)
*(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
} else goto endm;
@@ -668,9 +668,9 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length));
hpfs_delete_de(i->i_sb, dnode, de_prev);
if (!de_prev->down) {
- de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4);
+ le16_add_cpu(&de_prev->length, 4);
de_prev->down = 1;
- dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4);
+ le32_add_cpu(&dnode->first_free, 4);
}
*(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
hpfs_mark_4buffers_dirty(&qbh);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index bc28bf077a6a..a3076228523d 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -398,7 +398,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
*flags |= MS_NOATIME;
hpfs_lock(s);
- lock_super(s);
uid = sbi->sb_uid; gid = sbi->sb_gid;
umask = 0777 & ~sbi->sb_mode;
lowercase = sbi->sb_lowercase;
@@ -431,12 +430,10 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
replace_mount_options(s, new_opts);
- unlock_super(s);
hpfs_unlock(s);
return 0;
out_err:
- unlock_super(s);
hpfs_unlock(s);
kfree(new_opts);
return -EINVAL;
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index c1dffe47fde2..78f21f8dc2ec 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -18,7 +18,7 @@
#include <linux/pid_namespace.h>
#include <linux/namei.h>
#include <asm/uaccess.h>
-#include "os.h"
+#include <os.h>
static struct inode *get_inode(struct super_block *, struct dentry *);
@@ -674,7 +674,7 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
if (!inode) {
dput(dentry);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
if (S_ISDIR(dentry->d_inode->i_mode)) {
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9460120a5170..c5bc355d8243 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* way when do_mmap_pgoff unwinds (may be important on powerpc
* and ia64).
*/
- vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
+ vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &hugetlb_vm_ops;
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
@@ -397,17 +397,16 @@ static void hugetlbfs_evict_inode(struct inode *inode)
}
static inline void
-hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
+hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
{
struct vm_area_struct *vma;
- struct prio_tree_iter iter;
- vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) {
+ vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
unsigned long v_offset;
/*
* Can the expression below overflow on 32-bit arches?
- * No, because the prio_tree returns us only those vmas
+ * No, because the interval tree returns us only those vmas
* which overlap the truncated area starting at pgoff,
* and no vma on a 32-bit arch can span beyond the 4GB.
*/
@@ -432,7 +431,7 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
i_size_write(inode, offset);
mutex_lock(&mapping->i_mmap_mutex);
- if (!prio_tree_empty(&mapping->i_mmap))
+ if (!RB_EMPTY_ROOT(&mapping->i_mmap))
hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
mutex_unlock(&mapping->i_mmap_mutex);
truncate_hugepages(inode, offset);
diff --git a/fs/inode.c b/fs/inode.c
index ac8d904b3f16..b03c71957246 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -348,7 +348,7 @@ void address_space_init_once(struct address_space *mapping)
mutex_init(&mapping->i_mmap_mutex);
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
- INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ mapping->i_mmap = RB_ROOT;
INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
}
EXPORT_SYMBOL(address_space_init_once);
diff --git a/fs/internal.h b/fs/internal.h
index 371bcc4b1697..916b7cbf3e3e 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -97,8 +97,8 @@ struct open_flags {
int acc_mode;
int intent;
};
-extern struct file *do_filp_open(int dfd, const char *pathname,
- const struct open_flags *op, int lookup_flags);
+extern struct file *do_filp_open(int dfd, struct filename *pathname,
+ const struct open_flags *op, int flags);
extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
const char *, const struct open_flags *, int lookup_flags);
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 1d3804492aa7..2b4f2358eadb 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -175,7 +175,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
{
struct isofs_fid *ifid = (struct isofs_fid *)fid;
- if (fh_type != 2)
+ if (fh_len < 2 || fh_type != 2)
return NULL;
return isofs_export_iget(sb,
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index af5280fb579b..3091d42992f0 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -1014,17 +1014,35 @@ restart_loop:
* there's no point in keeping a checkpoint record for
* it. */
- /* A buffer which has been freed while still being
- * journaled by a previous transaction may end up still
- * being dirty here, but we want to avoid writing back
- * that buffer in the future after the "add to orphan"
- * operation been committed, That's not only a performance
- * gain, it also stops aliasing problems if the buffer is
- * left behind for writeback and gets reallocated for another
- * use in a different page. */
- if (buffer_freed(bh) && !jh->b_next_transaction) {
- clear_buffer_freed(bh);
- clear_buffer_jbddirty(bh);
+ /*
+ * A buffer which has been freed while still being journaled by
+ * a previous transaction.
+ */
+ if (buffer_freed(bh)) {
+ /*
+ * If the running transaction is the one containing
+ * "add to orphan" operation (b_next_transaction !=
+ * NULL), we have to wait for that transaction to
+ * commit before we can really get rid of the buffer.
+ * So just clear b_modified to not confuse transaction
+ * credit accounting and refile the buffer to
+ * BJ_Forget of the running transaction. If the just
+ * committed transaction contains "add to orphan"
+ * operation, we can completely invalidate the buffer
+ * now. We are rather through in that since the
+ * buffer may be still accessible when blocksize <
+ * pagesize and it is attached to the last partial
+ * page.
+ */
+ jh->b_modified = 0;
+ if (!jh->b_next_transaction) {
+ clear_buffer_freed(bh);
+ clear_buffer_jbddirty(bh);
+ clear_buffer_mapped(bh);
+ clear_buffer_new(bh);
+ clear_buffer_req(bh);
+ bh->b_bdev = NULL;
+ }
}
if (buffer_jbddirty(bh)) {
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e149b99a7ffb..484b8d1c6cb6 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1354,6 +1354,11 @@ static void jbd2_mark_journal_empty(journal_t *journal)
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
read_lock(&journal->j_state_lock);
+ /* Is it already empty? */
+ if (sb->s_start == 0) {
+ read_unlock(&journal->j_state_lock);
+ return;
+ }
jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
journal->j_tail_sequence);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 0131e4362534..626846bac32f 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -289,8 +289,11 @@ int jbd2_journal_recover(journal_t *journal)
if (!err)
err = err2;
/* Make sure all replayed data is on permanent storage */
- if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ if (journal->j_flags & JBD2_BARRIER) {
+ err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ if (!err)
+ err = err2;
+ }
return err;
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index fb1ab9533b67..a74ba4659549 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1841,15 +1841,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
* We're outside-transaction here. Either or both of j_running_transaction
* and j_committing_transaction may be NULL.
*/
-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
+ int partial_page)
{
transaction_t *transaction;
struct journal_head *jh;
int may_free = 1;
- int ret;
BUFFER_TRACE(bh, "entry");
+retry:
/*
* It is safe to proceed here without the j_list_lock because the
* buffers cannot be stolen by try_to_free_buffers as long as we are
@@ -1878,10 +1879,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
* clear the buffer dirty bit at latest at the moment when the
* transaction marking the buffer as freed in the filesystem
* structures is committed because from that moment on the
- * buffer can be reallocated and used by a different page.
+ * block can be reallocated and used by a different page.
* Since the block hasn't been freed yet but the inode has
* already been added to orphan list, it is safe for us to add
* the buffer to BJ_Forget list of the newest transaction.
+ *
+ * Also we have to clear buffer_mapped flag of a truncated buffer
+ * because the buffer_head may be attached to the page straddling
+ * i_size (can happen only when blocksize < pagesize) and thus the
+ * buffer_head can be reused when the file is extended again. So we end
+ * up keeping around invalidated buffers attached to transactions'
+ * BJ_Forget list just to stop checkpointing code from cleaning up
+ * the transaction this buffer was modified in.
*/
transaction = jh->b_transaction;
if (transaction == NULL) {
@@ -1908,13 +1917,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
* committed, the buffer won't be needed any
* longer. */
JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
- ret = __dispose_buffer(jh,
+ may_free = __dispose_buffer(jh,
journal->j_running_transaction);
- jbd2_journal_put_journal_head(jh);
- spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
- write_unlock(&journal->j_state_lock);
- return ret;
+ goto zap_buffer;
} else {
/* There is no currently-running transaction. So the
* orphan record which we wrote for this file must have
@@ -1922,13 +1927,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
* the committing transaction, if it exists. */
if (journal->j_committing_transaction) {
JBUFFER_TRACE(jh, "give to committing trans");
- ret = __dispose_buffer(jh,
+ may_free = __dispose_buffer(jh,
journal->j_committing_transaction);
- jbd2_journal_put_journal_head(jh);
- spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
- write_unlock(&journal->j_state_lock);
- return ret;
+ goto zap_buffer;
} else {
/* The orphan record's transaction has
* committed. We can cleanse this buffer */
@@ -1940,10 +1941,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
JBUFFER_TRACE(jh, "on committing transaction");
/*
* The buffer is committing, we simply cannot touch
- * it. So we just set j_next_transaction to the
- * running transaction (if there is one) and mark
- * buffer as freed so that commit code knows it should
- * clear dirty bits when it is done with the buffer.
+ * it. If the page is straddling i_size we have to wait
+ * for commit and try again.
+ */
+ if (partial_page) {
+ tid_t tid = journal->j_committing_transaction->t_tid;
+
+ jbd2_journal_put_journal_head(jh);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ write_unlock(&journal->j_state_lock);
+ jbd2_log_wait_commit(journal, tid);
+ goto retry;
+ }
+ /*
+ * OK, buffer won't be reachable after truncate. We just set
+ * j_next_transaction to the running transaction (if there is
+ * one) and mark buffer as freed so that commit code knows it
+ * should clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
@@ -1966,6 +1981,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
}
zap_buffer:
+ /*
+ * This is tricky. Although the buffer is truncated, it may be reused
+ * if blocksize < pagesize and it is attached to the page straddling
+ * EOF. Since the buffer might have been added to BJ_Forget list of the
+ * running transaction, journal_get_write_access() won't clear
+ * b_modified and credit accounting gets confused. So clear b_modified
+ * here.
+ */
+ jh->b_modified = 0;
jbd2_journal_put_journal_head(jh);
zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
@@ -2017,7 +2041,8 @@ void jbd2_journal_invalidatepage(journal_t *journal,
if (offset <= curr_off) {
/* This block is wholly outside the truncation point */
lock_buffer(bh);
- may_free &= journal_unmap_buffer(journal, bh);
+ may_free &= journal_unmap_buffer(journal, bh,
+ offset > 0);
unlock_buffer(bh);
}
curr_off = next_off;
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 1ea349fff68b..ae81b01e6fd7 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -394,8 +394,11 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
}
/* Trivial function to remove the last node in the tree. Which by definition
- has no right-hand -- so can be removed just by making its only child (if
- any) take its place under its parent. */
+ has no right-hand child — so can be removed just by making its left-hand
+ child (if any) take its place under its parent. Since this is only done
+ when we're consuming the whole tree, there's no need to use rb_erase()
+ and let it worry about adjusting colours and balancing the tree. That
+ would just be a waste of time. */
static void eat_last(struct rb_root *root, struct rb_node *node)
{
struct rb_node *parent = rb_parent(node);
@@ -412,12 +415,12 @@ static void eat_last(struct rb_root *root, struct rb_node *node)
link = &parent->rb_right;
*link = node->rb_left;
- /* Colour doesn't matter now. Only the parent pointer. */
if (node->rb_left)
- node->rb_left->rb_parent_color = node->rb_parent_color;
+ node->rb_left->__rb_parent_color = node->__rb_parent_color;
}
-/* We put this in reverse order, so we can just use eat_last */
+/* We put the version tree in reverse order, so we can use the same eat_last()
+ function that we use to consume the tmpnode tree (tn_root). */
static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
{
struct rb_node **link = &ver_root->rb_node;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index ff487954cd96..d3d8799e2187 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -100,6 +100,10 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ cancel_delayed_work_sync(&c->wbuf_dwork);
+#endif
+
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 6f4529d3697f..a6597d60d76d 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1044,10 +1044,10 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
ops.datbuf = NULL;
ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
- if (ret || ops.oobretlen != ops.ooblen) {
+ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
jeb->offset, ops.ooblen, ops.oobretlen, ret);
- if (!ret)
+ if (!ret || mtd_is_bitflip(ret))
ret = -EIO;
return ret;
}
@@ -1086,10 +1086,10 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
ops.datbuf = NULL;
ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
- if (ret || ops.oobretlen != ops.ooblen) {
+ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
jeb->offset, ops.ooblen, ops.oobretlen, ret);
- if (!ret)
+ if (!ret || mtd_is_bitflip(ret))
ret = -EIO;
return ret;
}
diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c
index 9947563e4175..dfcd50304559 100644
--- a/fs/jfs/jfs_discard.c
+++ b/fs/jfs/jfs_discard.c
@@ -83,7 +83,7 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
struct super_block *sb = ipbmap->i_sb;
int agno, agno_end;
- s64 start, end, minlen;
+ u64 start, end, minlen;
u64 trimmed = 0;
/**
@@ -93,15 +93,19 @@ int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range)
* minlen: minimum extent length in Bytes
*/
start = range->start >> sb->s_blocksize_bits;
- if (start < 0)
- start = 0;
end = start + (range->len >> sb->s_blocksize_bits) - 1;
- if (end >= bmp->db_mapsize)
- end = bmp->db_mapsize - 1;
minlen = range->minlen >> sb->s_blocksize_bits;
- if (minlen <= 0)
+ if (minlen == 0)
minlen = 1;
+ if (minlen > bmp->db_agsize ||
+ start >= bmp->db_mapsize ||
+ range->len < sb->s_blocksize)
+ return -EINVAL;
+
+ if (end >= bmp->db_mapsize)
+ end = bmp->db_mapsize - 1;
+
/**
* we trim all ag's within the range
*/
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
index d269ada7670e..982d2676e1f8 100644
--- a/fs/lockd/clntxdr.c
+++ b/fs/lockd/clntxdr.c
@@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
{
__be32 *p;
- BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+ WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
p = xdr_reserve_space(xdr, 4);
*p = stat;
}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 7ef14b3c5bee..3d7e09bcc0e9 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -7,7 +7,6 @@
*/
#include <linux/types.h>
-#include <linux/utsname.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/slab.h>
@@ -19,6 +18,8 @@
#include <asm/unaligned.h>
+#include "netns.h"
+
#define NLMDBG_FACILITY NLMDBG_MONITOR
#define NSM_PROGRAM 100024
#define NSM_VERSION 1
@@ -40,6 +41,7 @@ struct nsm_args {
u32 proc;
char *mon_name;
+ char *nodename;
};
struct nsm_res {
@@ -70,7 +72,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
};
struct rpc_create_args args = {
.net = net,
- .protocol = XPRT_TRANSPORT_UDP,
+ .protocol = XPRT_TRANSPORT_TCP,
.address = (struct sockaddr *)&sin,
.addrsize = sizeof(sin),
.servername = "rpc.statd",
@@ -83,10 +85,61 @@ static struct rpc_clnt *nsm_create(struct net *net)
return rpc_create(&args);
}
+static struct rpc_clnt *nsm_client_set(struct lockd_net *ln,
+ struct rpc_clnt *clnt)
+{
+ spin_lock(&ln->nsm_clnt_lock);
+ if (ln->nsm_users == 0) {
+ if (clnt == NULL)
+ goto out;
+ ln->nsm_clnt = clnt;
+ }
+ clnt = ln->nsm_clnt;
+ ln->nsm_users++;
+out:
+ spin_unlock(&ln->nsm_clnt_lock);
+ return clnt;
+}
+
+static struct rpc_clnt *nsm_client_get(struct net *net)
+{
+ struct rpc_clnt *clnt, *new;
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ clnt = nsm_client_set(ln, NULL);
+ if (clnt != NULL)
+ goto out;
+
+ clnt = new = nsm_create(net);
+ if (IS_ERR(clnt))
+ goto out;
+
+ clnt = nsm_client_set(ln, new);
+ if (clnt != new)
+ rpc_shutdown_client(new);
+out:
+ return clnt;
+}
+
+static void nsm_client_put(struct net *net)
+{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+ struct rpc_clnt *clnt = NULL;
+
+ spin_lock(&ln->nsm_clnt_lock);
+ ln->nsm_users--;
+ if (ln->nsm_users == 0) {
+ clnt = ln->nsm_clnt;
+ ln->nsm_clnt = NULL;
+ }
+ spin_unlock(&ln->nsm_clnt_lock);
+ if (clnt != NULL)
+ rpc_shutdown_client(clnt);
+}
+
static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
- struct net *net)
+ struct rpc_clnt *clnt)
{
- struct rpc_clnt *clnt;
int status;
struct nsm_args args = {
.priv = &nsm->sm_priv,
@@ -94,31 +147,24 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
.vers = 3,
.proc = NLMPROC_NSM_NOTIFY,
.mon_name = nsm->sm_mon_name,
+ .nodename = clnt->cl_nodename,
};
struct rpc_message msg = {
.rpc_argp = &args,
.rpc_resp = res,
};
- clnt = nsm_create(net);
- if (IS_ERR(clnt)) {
- status = PTR_ERR(clnt);
- dprintk("lockd: failed to create NSM upcall transport, "
- "status=%d\n", status);
- goto out;
- }
+ BUG_ON(clnt == NULL);
memset(res, 0, sizeof(*res));
msg.rpc_proc = &clnt->cl_procinfo[proc];
- status = rpc_call_sync(clnt, &msg, 0);
+ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
if (status < 0)
dprintk("lockd: NSM upcall RPC failed, status=%d\n",
status);
else
status = 0;
- rpc_shutdown_client(clnt);
- out:
return status;
}
@@ -138,6 +184,7 @@ int nsm_monitor(const struct nlm_host *host)
struct nsm_handle *nsm = host->h_nsmhandle;
struct nsm_res res;
int status;
+ struct rpc_clnt *clnt;
dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
@@ -150,7 +197,15 @@ int nsm_monitor(const struct nlm_host *host)
*/
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
- status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host->net);
+ clnt = nsm_client_get(host->net);
+ if (IS_ERR(clnt)) {
+ status = PTR_ERR(clnt);
+ dprintk("lockd: failed to create NSM upcall transport, "
+ "status=%d, net=%p\n", status, host->net);
+ return status;
+ }
+
+ status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, clnt);
if (unlikely(res.status != 0))
status = -EIO;
if (unlikely(status < 0)) {
@@ -182,9 +237,11 @@ void nsm_unmonitor(const struct nlm_host *host)
if (atomic_read(&nsm->sm_count) == 1
&& nsm->sm_monitored && !nsm->sm_sticky) {
+ struct lockd_net *ln = net_generic(host->net, lockd_net_id);
+
dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
- status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host->net);
+ status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, ln->nsm_clnt);
if (res.status != 0)
status = -EIO;
if (status < 0)
@@ -192,6 +249,8 @@ void nsm_unmonitor(const struct nlm_host *host)
nsm->sm_name);
else
nsm->sm_monitored = 0;
+
+ nsm_client_put(host->net);
}
}
@@ -430,7 +489,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
{
__be32 *p;
- encode_nsm_string(xdr, utsname()->nodename);
+ encode_nsm_string(xdr, argp->nodename);
p = xdr_reserve_space(xdr, 4 + 4 + 4);
*p++ = cpu_to_be32(argp->prog);
*p++ = cpu_to_be32(argp->vers);
diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
index 4eee248ba96e..5010b55628b4 100644
--- a/fs/lockd/netns.h
+++ b/fs/lockd/netns.h
@@ -12,6 +12,10 @@ struct lockd_net {
struct delayed_work grace_period_end;
struct lock_manager lockd_manager;
struct list_head grace_list;
+
+ spinlock_t nsm_clnt_lock;
+ unsigned int nsm_users;
+ struct rpc_clnt *nsm_clnt;
};
extern int lockd_net_id;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 31a63f87b806..a2aa97d45670 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -126,7 +126,7 @@ static void restart_grace(void)
static int
lockd(void *vrqstp)
{
- int err = 0, preverr = 0;
+ int err = 0;
struct svc_rqst *rqstp = vrqstp;
/* try_to_freeze() is called from svc_recv() */
@@ -165,21 +165,8 @@ lockd(void *vrqstp)
* recvfrom routine.
*/
err = svc_recv(rqstp, timeout);
- if (err == -EAGAIN || err == -EINTR) {
- preverr = err;
+ if (err == -EAGAIN || err == -EINTR)
continue;
- }
- if (err < 0) {
- if (err != preverr) {
- printk(KERN_WARNING "%s: unexpected error "
- "from svc_recv (%d)\n", __func__, err);
- preverr = err;
- }
- schedule_timeout_interruptible(HZ);
- continue;
- }
- preverr = err;
-
dprintk("lockd: request from %s\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
@@ -596,6 +583,7 @@ static int lockd_init_net(struct net *net)
INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
INIT_LIST_HEAD(&ln->grace_list);
+ spin_lock_init(&ln->nsm_clnt_lock);
return 0;
}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 3009a365e082..21171f0c6477 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -68,7 +68,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
/* Obtain file pointer. Not used by FREE_ALL call. */
if (filp != NULL) {
- if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
+ error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
+ if (error != 0)
goto no_locks;
*filp = file;
diff --git a/fs/locks.c b/fs/locks.c
index abc7dc6c490b..a94e331a52a2 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1289,7 +1289,7 @@ EXPORT_SYMBOL(__break_lease);
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
struct file_lock *flock = inode->i_flock;
- if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
+ if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
*time = current_fs_time(inode->i_sb);
else
*time = inode->i_mtime;
@@ -2185,8 +2185,8 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
} else {
seq_printf(f, "%s ",
(lease_breaking(fl))
- ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
- : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
+ ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
+ : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
}
if (inode) {
#ifdef WE_CAN_BREAK_LSLK_NOW
diff --git a/fs/namei.c b/fs/namei.c
index aa30d19e9edd..937f9d50c84b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -117,18 +117,70 @@
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
* PATH_MAX includes the nul terminator --RR.
*/
-static char *getname_flags(const char __user *filename, int flags, int *empty)
+void final_putname(struct filename *name)
{
- char *result = __getname(), *err;
+ if (name->separate) {
+ __putname(name->name);
+ kfree(name);
+ } else {
+ __putname(name);
+ }
+}
+
+#define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename))
+
+static struct filename *
+getname_flags(const char __user *filename, int flags, int *empty)
+{
+ struct filename *result, *err;
int len;
+ long max;
+ char *kname;
+ result = audit_reusename(filename);
+ if (result)
+ return result;
+
+ result = __getname();
if (unlikely(!result))
return ERR_PTR(-ENOMEM);
- len = strncpy_from_user(result, filename, PATH_MAX);
- err = ERR_PTR(len);
- if (unlikely(len < 0))
+ /*
+ * First, try to embed the struct filename inside the names_cache
+ * allocation
+ */
+ kname = (char *)result + sizeof(*result);
+ result->name = kname;
+ result->separate = false;
+ max = EMBEDDED_NAME_MAX;
+
+recopy:
+ len = strncpy_from_user(kname, filename, max);
+ if (unlikely(len < 0)) {
+ err = ERR_PTR(len);
goto error;
+ }
+
+ /*
+ * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a
+ * separate struct filename so we can dedicate the entire
+ * names_cache allocation for the pathname, and re-do the copy from
+ * userland.
+ */
+ if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) {
+ kname = (char *)result;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result) {
+ err = ERR_PTR(-ENOMEM);
+ result = (struct filename *)kname;
+ goto error;
+ }
+ result->name = kname;
+ result->separate = true;
+ max = PATH_MAX;
+ goto recopy;
+ }
/* The empty path is special. */
if (unlikely(!len)) {
@@ -140,30 +192,32 @@ static char *getname_flags(const char __user *filename, int flags, int *empty)
}
err = ERR_PTR(-ENAMETOOLONG);
- if (likely(len < PATH_MAX)) {
- audit_getname(result);
- return result;
- }
+ if (unlikely(len >= PATH_MAX))
+ goto error;
+
+ result->uptr = filename;
+ audit_getname(result);
+ return result;
error:
- __putname(result);
+ final_putname(result);
return err;
}
-char *getname(const char __user * filename)
+struct filename *
+getname(const char __user * filename)
{
return getname_flags(filename, 0, NULL);
}
+EXPORT_SYMBOL(getname);
#ifdef CONFIG_AUDITSYSCALL
-void putname(const char *name)
+void putname(struct filename *name)
{
if (unlikely(!audit_dummy_context()))
- audit_putname(name);
- else
- __putname(name);
+ return audit_putname(name);
+ final_putname(name);
}
-EXPORT_SYMBOL(putname);
#endif
static int check_acl(struct inode *inode, int mask)
@@ -651,8 +705,8 @@ static inline void put_link(struct nameidata *nd, struct path *link, void *cooki
path_put(link);
}
-int sysctl_protected_symlinks __read_mostly = 1;
-int sysctl_protected_hardlinks __read_mostly = 1;
+int sysctl_protected_symlinks __read_mostly = 0;
+int sysctl_protected_hardlinks __read_mostly = 0;
/**
* may_follow_link - Check symlink following for unsafe situations
@@ -692,9 +746,9 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
if (uid_eq(parent->i_uid, inode->i_uid))
return 0;
+ audit_log_link_denied("follow_link", link);
path_put_conditional(link, nd);
path_put(&nd->path);
- audit_log_link_denied("follow_link", link);
return -EACCES;
}
@@ -810,6 +864,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
return error;
out_put_nd_path:
+ *p = NULL;
path_put(&nd->path);
path_put(link);
return error;
@@ -1962,24 +2017,29 @@ static int path_lookupat(int dfd, const char *name,
return err;
}
-static int do_path_lookup(int dfd, const char *name,
+static int filename_lookup(int dfd, struct filename *name,
unsigned int flags, struct nameidata *nd)
{
- int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
+ int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd);
if (unlikely(retval == -ECHILD))
- retval = path_lookupat(dfd, name, flags, nd);
+ retval = path_lookupat(dfd, name->name, flags, nd);
if (unlikely(retval == -ESTALE))
- retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
+ retval = path_lookupat(dfd, name->name,
+ flags | LOOKUP_REVAL, nd);
- if (likely(!retval)) {
- if (unlikely(!audit_dummy_context())) {
- if (nd->path.dentry && nd->inode)
- audit_inode(name, nd->path.dentry);
- }
- }
+ if (likely(!retval))
+ audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
return retval;
}
+static int do_path_lookup(int dfd, const char *name,
+ unsigned int flags, struct nameidata *nd)
+{
+ struct filename filename = { .name = name };
+
+ return filename_lookup(dfd, &filename, flags, nd);
+}
+
/* does lookup, returns the object with parent locked */
struct dentry *kern_path_locked(const char *name, struct path *path)
{
@@ -2097,13 +2157,13 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
struct path *path, int *empty)
{
struct nameidata nd;
- char *tmp = getname_flags(name, flags, empty);
+ struct filename *tmp = getname_flags(name, flags, empty);
int err = PTR_ERR(tmp);
if (!IS_ERR(tmp)) {
BUG_ON(flags & LOOKUP_PARENT);
- err = do_path_lookup(dfd, tmp, flags, &nd);
+ err = filename_lookup(dfd, tmp, flags, &nd);
putname(tmp);
if (!err)
*path = nd.path;
@@ -2117,22 +2177,28 @@ int user_path_at(int dfd, const char __user *name, unsigned flags,
return user_path_at_empty(dfd, name, flags, path, NULL);
}
-static int user_path_parent(int dfd, const char __user *path,
- struct nameidata *nd, char **name)
+/*
+ * NB: most callers don't do anything directly with the reference to the
+ * to struct filename, but the nd->last pointer points into the name string
+ * allocated by getname. So we must hold the reference to it until all
+ * path-walking is complete.
+ */
+static struct filename *
+user_path_parent(int dfd, const char __user *path, struct nameidata *nd)
{
- char *s = getname(path);
+ struct filename *s = getname(path);
int error;
if (IS_ERR(s))
- return PTR_ERR(s);
+ return s;
- error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd);
- if (error)
+ error = filename_lookup(dfd, s, LOOKUP_PARENT, nd);
+ if (error) {
putname(s);
- else
- *name = s;
+ return ERR_PTR(error);
+ }
- return error;
+ return s;
}
/*
@@ -2179,7 +2245,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
return -ENOENT;
BUG_ON(victim->d_parent->d_inode != dir);
- audit_inode_child(victim, dir);
+ audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE);
error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (error)
@@ -2624,7 +2690,7 @@ out_dput:
*/
static int do_last(struct nameidata *nd, struct path *path,
struct file *file, const struct open_flags *op,
- int *opened, const char *pathname)
+ int *opened, struct filename *name)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
@@ -2651,7 +2717,7 @@ static int do_last(struct nameidata *nd, struct path *path,
error = complete_walk(nd);
if (error)
return error;
- audit_inode(pathname, nd->path.dentry);
+ audit_inode(name, nd->path.dentry, 0);
if (open_flag & O_CREAT) {
error = -EISDIR;
goto out;
@@ -2661,7 +2727,7 @@ static int do_last(struct nameidata *nd, struct path *path,
error = complete_walk(nd);
if (error)
return error;
- audit_inode(pathname, dir);
+ audit_inode(name, dir, 0);
goto finish_open;
}
@@ -2690,7 +2756,7 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
- audit_inode(pathname, dir);
+ audit_inode(name, dir, 0);
error = -EISDIR;
/* trailing slashes? */
if (nd->last.name[nd->last.len])
@@ -2720,7 +2786,7 @@ retry_lookup:
!S_ISREG(file->f_path.dentry->d_inode->i_mode))
will_truncate = false;
- audit_inode(pathname, file->f_path.dentry);
+ audit_inode(name, file->f_path.dentry, 0);
goto opened;
}
@@ -2737,7 +2803,7 @@ retry_lookup:
* create/update audit record if it already exists.
*/
if (path->dentry->d_inode)
- audit_inode(pathname, path->dentry);
+ audit_inode(name, path->dentry, 0);
/*
* If atomic_open() acquired write access it is dropped now due to
@@ -2802,7 +2868,7 @@ finish_lookup:
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
goto out;
- audit_inode(pathname, nd->path.dentry);
+ audit_inode(name, nd->path.dentry, 0);
finish_open:
if (!S_ISREG(nd->inode->i_mode))
will_truncate = false;
@@ -2870,7 +2936,7 @@ stale_open:
goto retry_lookup;
}
-static struct file *path_openat(int dfd, const char *pathname,
+static struct file *path_openat(int dfd, struct filename *pathname,
struct nameidata *nd, const struct open_flags *op, int flags)
{
struct file *base = NULL;
@@ -2885,12 +2951,12 @@ static struct file *path_openat(int dfd, const char *pathname,
file->f_flags = op->open_flag;
- error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base);
+ error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(error))
goto out;
current->total_link_count = 0;
- error = link_path_walk(pathname, nd);
+ error = link_path_walk(pathname->name, nd);
if (unlikely(error))
goto out;
@@ -2936,7 +3002,7 @@ out:
return file;
}
-struct file *do_filp_open(int dfd, const char *pathname,
+struct file *do_filp_open(int dfd, struct filename *pathname,
const struct open_flags *op, int flags)
{
struct nameidata nd;
@@ -2955,6 +3021,7 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
{
struct nameidata nd;
struct file *file;
+ struct filename filename = { .name = name };
nd.root.mnt = mnt;
nd.root.dentry = dentry;
@@ -2964,11 +3031,11 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN)
return ERR_PTR(-ELOOP);
- file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU);
+ file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_RCU);
if (unlikely(file == ERR_PTR(-ECHILD)))
- file = path_openat(-1, name, &nd, op, flags);
+ file = path_openat(-1, &filename, &nd, op, flags);
if (unlikely(file == ERR_PTR(-ESTALE)))
- file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL);
+ file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_REVAL);
return file;
}
@@ -3043,11 +3110,11 @@ EXPORT_SYMBOL(done_path_create);
struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir)
{
- char *tmp = getname(pathname);
+ struct filename *tmp = getname(pathname);
struct dentry *res;
if (IS_ERR(tmp))
return ERR_CAST(tmp);
- res = kern_path_create(dfd, tmp, path, is_dir);
+ res = kern_path_create(dfd, tmp->name, path, is_dir);
putname(tmp);
return res;
}
@@ -3252,13 +3319,13 @@ out:
static long do_rmdir(int dfd, const char __user *pathname)
{
int error = 0;
- char * name;
+ struct filename *name;
struct dentry *dentry;
struct nameidata nd;
- error = user_path_parent(dfd, pathname, &nd, &name);
- if (error)
- return error;
+ name = user_path_parent(dfd, pathname, &nd);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
switch(nd.last_type) {
case LAST_DOTDOT:
@@ -3347,14 +3414,14 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
static long do_unlinkat(int dfd, const char __user *pathname)
{
int error;
- char *name;
+ struct filename *name;
struct dentry *dentry;
struct nameidata nd;
struct inode *inode = NULL;
- error = user_path_parent(dfd, pathname, &nd, &name);
- if (error)
- return error;
+ name = user_path_parent(dfd, pathname, &nd);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
error = -EISDIR;
if (nd.last_type != LAST_NORM)
@@ -3438,7 +3505,7 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
int, newdfd, const char __user *, newname)
{
int error;
- char *from;
+ struct filename *from;
struct dentry *dentry;
struct path path;
@@ -3451,9 +3518,9 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
if (IS_ERR(dentry))
goto out_putname;
- error = security_path_symlink(&path, dentry, from);
+ error = security_path_symlink(&path, dentry, from->name);
if (!error)
- error = vfs_symlink(path.dentry->d_inode, dentry, from);
+ error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
done_path_create(&path, dentry);
out_putname:
putname(from);
@@ -3733,17 +3800,21 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
struct dentry *old_dentry, *new_dentry;
struct dentry *trap;
struct nameidata oldnd, newnd;
- char *from;
- char *to;
+ struct filename *from;
+ struct filename *to;
int error;
- error = user_path_parent(olddfd, oldname, &oldnd, &from);
- if (error)
+ from = user_path_parent(olddfd, oldname, &oldnd);
+ if (IS_ERR(from)) {
+ error = PTR_ERR(from);
goto exit;
+ }
- error = user_path_parent(newdfd, newname, &newnd, &to);
- if (error)
+ to = user_path_parent(newdfd, newname, &newnd);
+ if (IS_ERR(to)) {
+ error = PTR_ERR(to);
goto exit1;
+ }
error = -EXDEV;
if (oldnd.path.mnt != newnd.path.mnt)
@@ -3967,7 +4038,6 @@ EXPORT_SYMBOL(follow_down_one);
EXPORT_SYMBOL(follow_down);
EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(get_write_access); /* nfsd */
-EXPORT_SYMBOL(getname);
EXPORT_SYMBOL(lock_rename);
EXPORT_SYMBOL(lookup_one_len);
EXPORT_SYMBOL(page_follow_link_light);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7bdf7907413f..24960626bb6b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1640,7 +1640,7 @@ static int do_change_type(struct path *path, int flag)
/*
* do loopback mount.
*/
-static int do_loopback(struct path *path, char *old_name,
+static int do_loopback(struct path *path, const char *old_name,
int recurse)
{
LIST_HEAD(umount_list);
@@ -1764,7 +1764,7 @@ static inline int tree_contains_unbindable(struct mount *mnt)
return 0;
}
-static int do_move_mount(struct path *path, char *old_name)
+static int do_move_mount(struct path *path, const char *old_name)
{
struct path old_path, parent_path;
struct mount *p;
@@ -1917,8 +1917,8 @@ unlock:
* create a new mount for userspace and request it to be added into the
* namespace's tree
*/
-static int do_new_mount(struct path *path, char *type, int flags,
- int mnt_flags, char *name, void *data)
+static int do_new_mount(struct path *path, const char *type, int flags,
+ int mnt_flags, const char *name, void *data)
{
struct vfsmount *mnt;
int err;
@@ -2191,8 +2191,8 @@ int copy_mount_string(const void __user *data, char **where)
* Therefore, if this magic number is present, it carries no information
* and must be discarded.
*/
-long do_mount(char *dev_name, char *dir_name, char *type_page,
- unsigned long flags, void *data_page)
+long do_mount(const char *dev_name, const char *dir_name,
+ const char *type_page, unsigned long flags, void *data_page)
{
struct path path;
int retval = 0;
@@ -2408,7 +2408,7 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
{
int ret;
char *kernel_type;
- char *kernel_dir;
+ struct filename *kernel_dir;
char *kernel_dev;
unsigned long data_page;
@@ -2430,7 +2430,7 @@ SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
if (ret < 0)
goto out_data;
- ret = do_mount(kernel_dev, kernel_dir, kernel_type, flags,
+ ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags,
(void *) data_page);
free_page(data_page);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index db7ad719628a..13ca196385f5 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -95,8 +95,8 @@ config NFS_SWAP
This option enables swapon to work on files located on NFS mounts.
config NFS_V4_1
- bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
- depends on NFS_V4 && EXPERIMENTAL
+ bool "NFS client support for NFSv4.1"
+ depends on NFS_V4
select SUNRPC_BACKCHANNEL
help
This option enables support for minor version 1 of the NFSv4 protocol
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index dd392ed5f2e2..f1027b06a1a9 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -37,6 +37,7 @@
#include <linux/bio.h> /* struct bio */
#include <linux/buffer_head.h> /* various write calls */
#include <linux/prefetch.h>
+#include <linux/pagevec.h>
#include "../pnfs.h"
#include "../internal.h"
@@ -162,25 +163,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
return bio;
}
-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
+static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
sector_t isect, struct page *page,
struct pnfs_block_extent *be,
void (*end_io)(struct bio *, int err),
- struct parallel_io *par)
+ struct parallel_io *par,
+ unsigned int offset, int len)
{
+ isect = isect + (offset >> SECTOR_SHIFT);
+ dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
+ npg, rw, (unsigned long long)isect, offset, len);
retry:
if (!bio) {
bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
if (!bio)
return ERR_PTR(-ENOMEM);
}
- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ if (bio_add_page(bio, page, len, offset) < len) {
bio = bl_submit_bio(rw, bio);
goto retry;
}
return bio;
}
+static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+ struct parallel_io *par)
+{
+ return do_add_page_to_bio(bio, npg, rw, isect, page, be,
+ end_io, par, 0, PAGE_CACHE_SIZE);
+}
+
/* This is basically copied from mpage_end_io_read */
static void bl_end_io_read(struct bio *bio, int err)
{
@@ -228,14 +243,6 @@ bl_end_par_io_read(void *data, int unused)
schedule_work(&rdata->task.u.tk_work);
}
-static bool
-bl_check_alignment(u64 offset, u32 len, unsigned long blkmask)
-{
- if ((offset & blkmask) || (len & blkmask))
- return false;
- return true;
-}
-
static enum pnfs_try_status
bl_read_pagelist(struct nfs_read_data *rdata)
{
@@ -246,15 +253,15 @@ bl_read_pagelist(struct nfs_read_data *rdata)
sector_t isect, extent_length = 0;
struct parallel_io *par;
loff_t f_offset = rdata->args.offset;
+ size_t bytes_left = rdata->args.count;
+ unsigned int pg_offset, pg_len;
struct page **pages = rdata->args.pages;
int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
+ const bool is_dio = (header->dreq != NULL);
dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
- if (!bl_check_alignment(f_offset, rdata->args.count, PAGE_CACHE_MASK))
- goto use_mds;
-
par = alloc_parallel(rdata);
if (!par)
goto use_mds;
@@ -284,36 +291,53 @@ bl_read_pagelist(struct nfs_read_data *rdata)
extent_length = min(extent_length, cow_length);
}
}
+
+ if (is_dio) {
+ pg_offset = f_offset & ~PAGE_CACHE_MASK;
+ if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
+ pg_len = PAGE_CACHE_SIZE - pg_offset;
+ else
+ pg_len = bytes_left;
+
+ f_offset += pg_len;
+ bytes_left -= pg_len;
+ isect += (pg_offset >> SECTOR_SHIFT);
+ } else {
+ pg_offset = 0;
+ pg_len = PAGE_CACHE_SIZE;
+ }
+
hole = is_hole(be, isect);
if (hole && !cow_read) {
bio = bl_submit_bio(READ, bio);
/* Fill hole w/ zeroes w/o accessing device */
dprintk("%s Zeroing page for hole\n", __func__);
- zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
+ zero_user_segment(pages[i], pg_offset, pg_len);
print_page(pages[i]);
SetPageUptodate(pages[i]);
} else {
struct pnfs_block_extent *be_read;
be_read = (hole && cow_read) ? cow_read : be;
- bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
+ bio = do_add_page_to_bio(bio, rdata->pages.npages - i,
READ,
isect, pages[i], be_read,
- bl_end_io_read, par);
+ bl_end_io_read, par,
+ pg_offset, pg_len);
if (IS_ERR(bio)) {
header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
}
- isect += PAGE_CACHE_SECTORS;
+ isect += (pg_len >> SECTOR_SHIFT);
extent_length -= PAGE_CACHE_SECTORS;
}
if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
rdata->res.eof = 1;
- rdata->res.count = header->inode->i_size - f_offset;
+ rdata->res.count = header->inode->i_size - rdata->args.offset;
} else {
- rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
+ rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset;
}
out:
bl_put_extent(be);
@@ -461,6 +485,106 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
return;
}
+static void
+bl_read_single_end_io(struct bio *bio, int error)
+{
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct page *page = bvec->bv_page;
+
+ /* Only one page in bvec */
+ unlock_page(page);
+}
+
+static int
+bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
+ unsigned int offset, unsigned int len)
+{
+ struct bio *bio;
+ struct page *shadow_page;
+ sector_t isect;
+ char *kaddr, *kshadow_addr;
+ int ret = 0;
+
+ dprintk("%s: offset %u len %u\n", __func__, offset, len);
+
+ shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (shadow_page == NULL)
+ return -ENOMEM;
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (bio == NULL)
+ return -ENOMEM;
+
+ isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
+ (offset / SECTOR_SIZE);
+
+ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+ bio->bi_bdev = be->be_mdev;
+ bio->bi_end_io = bl_read_single_end_io;
+
+ lock_page(shadow_page);
+ if (bio_add_page(bio, shadow_page,
+ SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
+ unlock_page(shadow_page);
+ bio_put(bio);
+ return -EIO;
+ }
+
+ submit_bio(READ, bio);
+ wait_on_page_locked(shadow_page);
+ if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
+ ret = -EIO;
+ } else {
+ kaddr = kmap_atomic(page);
+ kshadow_addr = kmap_atomic(shadow_page);
+ memcpy(kaddr + offset, kshadow_addr + offset, len);
+ kunmap_atomic(kshadow_addr);
+ kunmap_atomic(kaddr);
+ }
+ __free_page(shadow_page);
+ bio_put(bio);
+
+ return ret;
+}
+
+static int
+bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
+ unsigned int dirty_offset, unsigned int dirty_len,
+ bool full_page)
+{
+ int ret = 0;
+ unsigned int start, end;
+
+ if (full_page) {
+ start = 0;
+ end = PAGE_CACHE_SIZE;
+ } else {
+ start = round_down(dirty_offset, SECTOR_SIZE);
+ end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
+ }
+
+ dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
+ if (!be) {
+ zero_user_segments(page, start, dirty_offset,
+ dirty_offset + dirty_len, end);
+ if (start == 0 && end == PAGE_CACHE_SIZE &&
+ trylock_page(page)) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ return ret;
+ }
+
+ if (start != dirty_offset)
+ ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
+
+ if (!ret && (dirty_offset + dirty_len < end))
+ ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
+ end - dirty_offset - dirty_len);
+
+ return ret;
+}
+
/* Given an unmapped page, zero it or read in page for COW, page is locked
* by caller.
*/
@@ -494,7 +618,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
SetPageUptodate(page);
cleanup:
- bl_put_extent(cow_read);
if (bh)
free_buffer_head(bh);
if (ret) {
@@ -566,6 +689,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
struct parallel_io *par = NULL;
loff_t offset = wdata->args.offset;
size_t count = wdata->args.count;
+ unsigned int pg_offset, pg_len, saved_len;
struct page **pages = wdata->args.pages;
struct page *page;
pgoff_t index;
@@ -574,10 +698,13 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
- /* Check for alignment first */
- if (!bl_check_alignment(offset, count, PAGE_CACHE_MASK))
- goto out_mds;
+ if (header->dreq != NULL &&
+ (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
+ !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
+ dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
+ goto out_mds;
+ }
/* At this point, wdata->pages is a (sequential) list of nfs_pages.
* We want to write each, and if there is an error set pnfs_error
* to have it redone using nfs.
@@ -674,10 +801,11 @@ next_page:
if (!extent_length) {
/* We've used up the previous extent */
bl_put_extent(be);
+ bl_put_extent(cow_read);
bio = bl_submit_bio(WRITE, bio);
/* Get the next one */
be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
- isect, NULL);
+ isect, &cow_read);
if (!be || !is_writable(be, isect)) {
header->pnfs_error = -EINVAL;
goto out;
@@ -694,7 +822,26 @@ next_page:
extent_length = be->be_length -
(isect - be->be_f_offset);
}
- if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+
+ dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
+ pg_offset = offset & ~PAGE_CACHE_MASK;
+ if (pg_offset + count > PAGE_CACHE_SIZE)
+ pg_len = PAGE_CACHE_SIZE - pg_offset;
+ else
+ pg_len = count;
+
+ saved_len = pg_len;
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
+ !bl_is_sector_init(be->be_inval, isect)) {
+ ret = bl_read_partial_page_sync(pages[i], cow_read,
+ pg_offset, pg_len, true);
+ if (ret) {
+ dprintk("%s bl_read_partial_page_sync fail %d\n",
+ __func__, ret);
+ header->pnfs_error = ret;
+ goto out;
+ }
+
ret = bl_mark_sectors_init(be->be_inval, isect,
PAGE_CACHE_SECTORS);
if (unlikely(ret)) {
@@ -703,15 +850,35 @@ next_page:
header->pnfs_error = ret;
goto out;
}
+
+ /* Expand to full page write */
+ pg_offset = 0;
+ pg_len = PAGE_CACHE_SIZE;
+ } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
+ (pg_len & (SECTOR_SIZE - 1))){
+ /* ahh, nasty case. We have to do sync full sector
+ * read-modify-write cycles.
+ */
+ unsigned int saved_offset = pg_offset;
+ ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
+ pg_len, false);
+ pg_offset = round_down(pg_offset, SECTOR_SIZE);
+ pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
+ - pg_offset;
}
- bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
+
+
+ bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
isect, pages[i], be,
- bl_end_io_write, par);
+ bl_end_io_write, par,
+ pg_offset, pg_len);
if (IS_ERR(bio)) {
header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
+ offset += saved_len;
+ count -= saved_len;
isect += PAGE_CACHE_SECTORS;
last_isect = isect;
extent_length -= PAGE_CACHE_SECTORS;
@@ -729,17 +896,16 @@ next_page:
}
write_done:
- wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
- if (count < wdata->res.count) {
- wdata->res.count = count;
- }
+ wdata->res.count = wdata->args.count;
out:
bl_put_extent(be);
+ bl_put_extent(cow_read);
bl_submit_bio(WRITE, bio);
put_parallel(par);
return PNFS_ATTEMPTED;
out_mds:
bl_put_extent(be);
+ bl_put_extent(cow_read);
kfree(par);
return PNFS_NOT_ATTEMPTED;
}
@@ -874,7 +1040,7 @@ static void free_blk_mountid(struct block_mount_id *mid)
}
}
-/* This is mostly copied from the filelayout's get_device_info function.
+/* This is mostly copied from the filelayout_get_device_info function.
* It seems much of this should be at the generic pnfs level.
*/
static struct pnfs_block_dev *
@@ -1011,33 +1177,95 @@ bl_clear_layoutdriver(struct nfs_server *server)
return 0;
}
+static bool
+is_aligned_req(struct nfs_page *req, unsigned int alignment)
+{
+ return IS_ALIGNED(req->wb_offset, alignment) &&
+ IS_ALIGNED(req->wb_bytes, alignment);
+}
+
static void
bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
- if (!bl_check_alignment(req->wb_offset, req->wb_bytes, PAGE_CACHE_MASK))
+ if (pgio->pg_dreq != NULL &&
+ !is_aligned_req(req, SECTOR_SIZE))
nfs_pageio_reset_read_mds(pgio);
else
pnfs_generic_pg_init_read(pgio, req);
}
+static bool
+bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ if (pgio->pg_dreq != NULL &&
+ !is_aligned_req(req, SECTOR_SIZE))
+ return false;
+
+ return pnfs_generic_pg_test(pgio, prev, req);
+}
+
+/*
+ * Return the number of contiguous bytes for a given inode
+ * starting at page frame idx.
+ */
+static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
+{
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t end;
+
+ /* Optimize common case that writes from 0 to end of file */
+ end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
+ if (end != NFS_I(inode)->npages) {
+ rcu_read_lock();
+ end = radix_tree_next_hole(&mapping->page_tree, idx + 1, ULONG_MAX);
+ rcu_read_unlock();
+ }
+
+ if (!end)
+ return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
+ else
+ return (end - idx) << PAGE_CACHE_SHIFT;
+}
+
static void
bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
- if (!bl_check_alignment(req->wb_offset, req->wb_bytes, PAGE_CACHE_MASK))
+ if (pgio->pg_dreq != NULL &&
+ !is_aligned_req(req, PAGE_CACHE_SIZE)) {
nfs_pageio_reset_write_mds(pgio);
- else
- pnfs_generic_pg_init_write(pgio, req);
+ } else {
+ u64 wb_size;
+ if (pgio->pg_dreq == NULL)
+ wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
+ req->wb_index);
+ else
+ wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+
+ pnfs_generic_pg_init_write(pgio, req, wb_size);
+ }
+}
+
+static bool
+bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ if (pgio->pg_dreq != NULL &&
+ !is_aligned_req(req, PAGE_CACHE_SIZE))
+ return false;
+
+ return pnfs_generic_pg_test(pgio, prev, req);
}
static const struct nfs_pageio_ops bl_pg_read_ops = {
.pg_init = bl_pg_init_read,
- .pg_test = pnfs_generic_pg_test,
+ .pg_test = bl_pg_test_read,
.pg_doio = pnfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops bl_pg_write_ops = {
.pg_init = bl_pg_init_write,
- .pg_test = pnfs_generic_pg_test,
+ .pg_test = bl_pg_test_write,
.pg_doio = pnfs_generic_pg_writepages,
};
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 03350690118e..f4891bde8851 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -41,6 +41,7 @@
#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
struct block_mount_id {
spinlock_t bm_lock; /* protects list */
@@ -172,7 +173,6 @@ struct bl_msg_hdr {
/* blocklayoutdev.c */
ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t);
void bl_pipe_destroy_msg(struct rpc_pipe_msg *);
-struct block_device *nfs4_blkdev_get(dev_t dev);
int nfs4_blkdev_put(struct block_device *bdev);
struct pnfs_block_dev *nfs4_blk_decode_device(struct nfs_server *server,
struct pnfs_device *dev);
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index c96554245ccf..a86c5bdad9e3 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -53,22 +53,6 @@ static int decode_sector_number(__be32 **rp, sector_t *sp)
return 0;
}
-/* Open a block_device by device number. */
-struct block_device *nfs4_blkdev_get(dev_t dev)
-{
- struct block_device *bd;
-
- dprintk("%s enter\n", __func__);
- bd = blkdev_get_by_dev(dev, FMODE_READ, NULL);
- if (IS_ERR(bd))
- goto fail;
- return bd;
-fail:
- dprintk("%s failed to open device : %ld\n",
- __func__, PTR_ERR(bd));
- return NULL;
-}
-
/*
* Release the block device
*/
@@ -172,11 +156,12 @@ nfs4_blk_decode_device(struct nfs_server *server,
goto out;
}
- bd = nfs4_blkdev_get(MKDEV(reply->major, reply->minor));
+ bd = blkdev_get_by_dev(MKDEV(reply->major, reply->minor),
+ FMODE_READ, NULL);
if (IS_ERR(bd)) {
- rc = PTR_ERR(bd);
- dprintk("%s failed to open device : %d\n", __func__, rc);
- rv = ERR_PTR(rc);
+ dprintk("%s failed to open device : %ld\n", __func__,
+ PTR_ERR(bd));
+ rv = ERR_CAST(bd);
goto out;
}
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
index 1f9a6032796b..9c3e117c3ed1 100644
--- a/fs/nfs/blocklayout/extents.c
+++ b/fs/nfs/blocklayout/extents.c
@@ -683,8 +683,7 @@ encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT);
p = xdr_encode_hyper(p, 0LL);
*p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA);
- list_del(&lce->bse_node);
- list_add_tail(&lce->bse_node, &bl->bl_committing);
+ list_move_tail(&lce->bse_node, &bl->bl_committing);
bl->bl_count--;
count++;
}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 4c8459e5bdee..5088b57b078a 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -12,6 +12,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/nfs_fs.h>
+#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -23,6 +24,7 @@
#include "nfs4_fs.h"
#include "callback.h"
#include "internal.h"
+#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -37,7 +39,32 @@ static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
-unsigned short nfs_callback_tcpport6;
+static int nfs4_callback_up_net(struct svc_serv *serv, struct net *net)
+{
+ int ret;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ ret = svc_create_xprt(serv, "tcp", net, PF_INET,
+ nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
+ if (ret <= 0)
+ goto out_err;
+ nn->nfs_callback_tcpport = ret;
+ dprintk("NFS: Callback listener port = %u (af %u, net %p)\n",
+ nn->nfs_callback_tcpport, PF_INET, net);
+
+ ret = svc_create_xprt(serv, "tcp", net, PF_INET6,
+ nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
+ if (ret > 0) {
+ nn->nfs_callback_tcpport6 = ret;
+ dprintk("NFS: Callback listener port = %u (af %u, net %p)\n",
+ nn->nfs_callback_tcpport6, PF_INET6, net);
+ } else if (ret != -EAFNOSUPPORT)
+ goto out_err;
+ return 0;
+
+out_err:
+ return (ret) ? ret : -ENOMEM;
+}
/*
* This is the NFSv4 callback kernel thread.
@@ -45,7 +72,7 @@ unsigned short nfs_callback_tcpport6;
static int
nfs4_callback_svc(void *vrqstp)
{
- int err, preverr = 0;
+ int err;
struct svc_rqst *rqstp = vrqstp;
set_freezable();
@@ -55,20 +82,8 @@ nfs4_callback_svc(void *vrqstp)
* Listen for a request on the socket
*/
err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
- if (err == -EAGAIN || err == -EINTR) {
- preverr = err;
- continue;
- }
- if (err < 0) {
- if (err != preverr) {
- printk(KERN_WARNING "NFS: %s: unexpected error "
- "from svc_recv (%d)\n", __func__, err);
- preverr = err;
- }
- schedule_timeout_uninterruptible(HZ);
+ if (err == -EAGAIN || err == -EINTR)
continue;
- }
- preverr = err;
svc_process(rqstp);
}
return 0;
@@ -78,38 +93,23 @@ nfs4_callback_svc(void *vrqstp)
* Prepare to bring up the NFSv4 callback service
*/
static struct svc_rqst *
-nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
+nfs4_callback_up(struct svc_serv *serv)
{
- int ret;
-
- ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
- nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
- if (ret <= 0)
- goto out_err;
- nfs_callback_tcpport = ret;
- dprintk("NFS: Callback listener port = %u (af %u)\n",
- nfs_callback_tcpport, PF_INET);
-
- ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
- nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
- if (ret > 0) {
- nfs_callback_tcpport6 = ret;
- dprintk("NFS: Callback listener port = %u (af %u)\n",
- nfs_callback_tcpport6, PF_INET6);
- } else if (ret == -EAFNOSUPPORT)
- ret = 0;
- else
- goto out_err;
-
return svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
-
-out_err:
- if (ret == 0)
- ret = -ENOMEM;
- return ERR_PTR(ret);
}
#if defined(CONFIG_NFS_V4_1)
+static int nfs41_callback_up_net(struct svc_serv *serv, struct net *net)
+{
+ /*
+ * Create an svc_sock for the back channel service that shares the
+ * fore channel connection.
+ * Returns the input port (0) and sets the svc_serv bc_xprt on success
+ */
+ return svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0,
+ SVC_SOCK_ANONYMOUS);
+}
+
/*
* The callback service for NFSv4.1 callbacks
*/
@@ -149,28 +149,9 @@ nfs41_callback_svc(void *vrqstp)
* Bring up the NFSv4.1 callback service
*/
static struct svc_rqst *
-nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
+nfs41_callback_up(struct svc_serv *serv)
{
struct svc_rqst *rqstp;
- int ret;
-
- /*
- * Create an svc_sock for the back channel service that shares the
- * fore channel connection.
- * Returns the input port (0) and sets the svc_serv bc_xprt on success
- */
- ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
- SVC_SOCK_ANONYMOUS);
- if (ret < 0) {
- rqstp = ERR_PTR(ret);
- goto out;
- }
-
- /*
- * Save the svc_serv in the transport so that it can
- * be referenced when the session backchannel is initialized
- */
- xprt->bc_serv = serv;
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
@@ -180,90 +161,74 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
svc_xprt_put(serv->sv_bc_xprt);
serv->sv_bc_xprt = NULL;
}
-out:
dprintk("--> %s return %ld\n", __func__,
IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
return rqstp;
}
-static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
- struct svc_serv *serv, struct rpc_xprt *xprt,
+static void nfs_minorversion_callback_svc_setup(struct svc_serv *serv,
struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
{
- if (minorversion) {
- *rqstpp = nfs41_callback_up(serv, xprt);
- *callback_svc = nfs41_callback_svc;
- }
- return minorversion;
+ *rqstpp = nfs41_callback_up(serv);
+ *callback_svc = nfs41_callback_svc;
}
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
- struct nfs_callback_data *cb_info)
+ struct svc_serv *serv)
{
if (minorversion)
- xprt->bc_serv = cb_info->serv;
+ /*
+ * Save the svc_serv in the transport so that it can
+ * be referenced when the session backchannel is initialized
+ */
+ xprt->bc_serv = serv;
}
#else
-static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
- struct svc_serv *serv, struct rpc_xprt *xprt,
- struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+static int nfs41_callback_up_net(struct svc_serv *serv, struct net *net)
{
return 0;
}
+static void nfs_minorversion_callback_svc_setup(struct svc_serv *serv,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ *rqstpp = ERR_PTR(-ENOTSUPP);
+ *callback_svc = ERR_PTR(-ENOTSUPP);
+}
+
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
- struct nfs_callback_data *cb_info)
+ struct svc_serv *serv)
{
}
#endif /* CONFIG_NFS_V4_1 */
-/*
- * Bring up the callback thread if it is not already up.
- */
-int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
+ struct svc_serv *serv)
{
- struct svc_serv *serv = NULL;
struct svc_rqst *rqstp;
int (*callback_svc)(void *vrqstp);
struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
char svc_name[12];
- int ret = 0;
- int minorversion_setup;
- struct net *net = &init_net;
+ int ret;
- mutex_lock(&nfs_callback_mutex);
- if (cb_info->users++ || cb_info->task != NULL) {
- nfs_callback_bc_serv(minorversion, xprt, cb_info);
- goto out;
- }
- serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
- if (!serv) {
- ret = -ENOMEM;
- goto out_err;
- }
- /* As there is only one thread we need to over-ride the
- * default maximum of 80 connections
- */
- serv->sv_maxconn = 1024;
+ nfs_callback_bc_serv(minorversion, xprt, serv);
- ret = svc_bind(serv, net);
- if (ret < 0) {
- printk(KERN_WARNING "NFS: bind callback service failed\n");
- goto out_err;
- }
+ if (cb_info->task)
+ return 0;
- minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
- serv, xprt, &rqstp, &callback_svc);
- if (!minorversion_setup) {
+ switch (minorversion) {
+ case 0:
/* v4.0 callback setup */
- rqstp = nfs4_callback_up(serv, xprt);
+ rqstp = nfs4_callback_up(serv);
callback_svc = nfs4_callback_svc;
+ break;
+ default:
+ nfs_minorversion_callback_svc_setup(serv,
+ &rqstp, &callback_svc);
}
- if (IS_ERR(rqstp)) {
- ret = PTR_ERR(rqstp);
- goto out_err;
- }
+ if (IS_ERR(rqstp))
+ return PTR_ERR(rqstp);
svc_sock_update_bufs(serv);
@@ -276,41 +241,165 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
svc_exit_thread(cb_info->rqst);
cb_info->rqst = NULL;
cb_info->task = NULL;
- goto out_err;
+ return ret;
+ }
+ dprintk("nfs_callback_up: service started\n");
+ return 0;
+}
+
+static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ if (--nn->cb_users[minorversion])
+ return;
+
+ dprintk("NFS: destroy per-net callback data; net=%p\n", net);
+ svc_shutdown_net(serv, net);
+}
+
+static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ int ret;
+
+ if (nn->cb_users[minorversion]++)
+ return 0;
+
+ dprintk("NFS: create per-net callback data; net=%p\n", net);
+
+ ret = svc_bind(serv, net);
+ if (ret < 0) {
+ printk(KERN_WARNING "NFS: bind callback service failed\n");
+ goto err_bind;
+ }
+
+ switch (minorversion) {
+ case 0:
+ ret = nfs4_callback_up_net(serv, net);
+ break;
+ case 1:
+ ret = nfs41_callback_up_net(serv, net);
+ break;
+ default:
+ printk(KERN_ERR "NFS: unknown callback version: %d\n",
+ minorversion);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret < 0) {
+ printk(KERN_ERR "NFS: callback service start failed\n");
+ goto err_socks;
+ }
+ return 0;
+
+err_socks:
+ svc_rpcb_cleanup(serv, net);
+err_bind:
+ dprintk("NFS: Couldn't create callback socket: err = %d; "
+ "net = %p\n", ret, net);
+ return ret;
+}
+
+static struct svc_serv *nfs_callback_create_svc(int minorversion)
+{
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+ struct svc_serv *serv;
+
+ /*
+ * Check whether we're already up and running.
+ */
+ if (cb_info->task) {
+ /*
+ * Note: increase service usage, because later in case of error
+ * svc_destroy() will be called.
+ */
+ svc_get(cb_info->serv);
+ return cb_info->serv;
+ }
+
+ /*
+ * Sanity check: if there's no task,
+ * we should be the first user ...
+ */
+ if (cb_info->users)
+ printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
+ cb_info->users);
+
+ serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
+ if (!serv) {
+ printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ /* As there is only one thread we need to over-ride the
+ * default maximum of 80 connections
+ */
+ serv->sv_maxconn = 1024;
+ dprintk("nfs_callback_create_svc: service created\n");
+ return serv;
+}
+
+/*
+ * Bring up the callback thread if it is not already up.
+ */
+int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+{
+ struct svc_serv *serv;
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+ int ret;
+ struct net *net = xprt->xprt_net;
+
+ mutex_lock(&nfs_callback_mutex);
+
+ serv = nfs_callback_create_svc(minorversion);
+ if (IS_ERR(serv)) {
+ ret = PTR_ERR(serv);
+ goto err_create;
}
-out:
+
+ ret = nfs_callback_up_net(minorversion, serv, net);
+ if (ret < 0)
+ goto err_net;
+
+ ret = nfs_callback_start_svc(minorversion, xprt, serv);
+ if (ret < 0)
+ goto err_start;
+
+ cb_info->users++;
/*
* svc_create creates the svc_serv with sv_nrthreads == 1, and then
* svc_prepare_thread increments that. So we need to call svc_destroy
* on both success and failure so that the refcount is 1 when the
* thread exits.
*/
- if (serv)
- svc_destroy(serv);
+err_net:
+ svc_destroy(serv);
+err_create:
mutex_unlock(&nfs_callback_mutex);
return ret;
-out_err:
- dprintk("NFS: Couldn't create callback socket or server thread; "
- "err = %d\n", ret);
- cb_info->users--;
- if (serv)
- svc_shutdown_net(serv, net);
- goto out;
+
+err_start:
+ nfs_callback_down_net(minorversion, serv, net);
+ dprintk("NFS: Couldn't create server thread; err = %d\n", ret);
+ goto err_net;
}
/*
* Kill the callback thread if it's no longer being used.
*/
-void nfs_callback_down(int minorversion)
+void nfs_callback_down(int minorversion, struct net *net)
{
struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
mutex_lock(&nfs_callback_mutex);
+ nfs_callback_down_net(minorversion, cb_info->serv, net);
cb_info->users--;
if (cb_info->users == 0 && cb_info->task != NULL) {
kthread_stop(cb_info->task);
- svc_shutdown_net(cb_info->serv, &init_net);
+ dprintk("nfs_callback_down: service stopped\n");
svc_exit_thread(cb_info->rqst);
+ dprintk("nfs_callback_down: service destroyed\n");
cb_info->serv = NULL;
cb_info->rqst = NULL;
cb_info->task = NULL;
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index b44d7b128b71..4251c2ae06ad 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -194,7 +194,7 @@ extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
struct cb_process_state *cps);
#if IS_ENABLED(CONFIG_NFS_V4)
extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
-extern void nfs_callback_down(int minorversion);
+extern void nfs_callback_down(int minorversion, struct net *net);
extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
const nfs4_stateid *stateid);
extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
@@ -209,6 +209,5 @@ extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
extern unsigned int nfs_callback_set_tcpport;
extern unsigned short nfs_callback_tcpport;
-extern unsigned short nfs_callback_tcpport6;
#endif /* __LINUX_FS_NFS_CALLBACK_H */
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 1b5d809a105e..76b4a7a3e559 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -122,7 +122,15 @@ static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
ino = igrab(lo->plh_inode);
if (!ino)
continue;
- get_layout_hdr(lo);
+ spin_lock(&ino->i_lock);
+ /* Is this layout in the process of being freed? */
+ if (NFS_I(ino)->layout != lo) {
+ spin_unlock(&ino->i_lock);
+ iput(ino);
+ continue;
+ }
+ pnfs_get_layout_hdr(lo);
+ spin_unlock(&ino->i_lock);
return lo;
}
}
@@ -158,7 +166,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
ino = lo->plh_inode;
spin_lock(&ino->i_lock);
if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
- mark_matching_lsegs_invalid(lo, &free_me_list,
+ pnfs_mark_matching_lsegs_invalid(lo, &free_me_list,
&args->cbl_range))
rv = NFS4ERR_DELAY;
else
@@ -166,7 +174,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&free_me_list);
- put_layout_hdr(lo);
+ pnfs_put_layout_hdr(lo);
iput(ino);
return rv;
}
@@ -196,9 +204,18 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
continue;
list_for_each_entry(lo, &server->layouts, plh_layouts) {
- if (!igrab(lo->plh_inode))
+ ino = igrab(lo->plh_inode);
+ if (ino)
+ continue;
+ spin_lock(&ino->i_lock);
+ /* Is this layout in the process of being freed? */
+ if (NFS_I(ino)->layout != lo) {
+ spin_unlock(&ino->i_lock);
+ iput(ino);
continue;
- get_layout_hdr(lo);
+ }
+ pnfs_get_layout_hdr(lo);
+ spin_unlock(&ino->i_lock);
BUG_ON(!list_empty(&lo->plh_bulk_recall));
list_add(&lo->plh_bulk_recall, &recall_list);
}
@@ -211,12 +228,12 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
ino = lo->plh_inode;
spin_lock(&ino->i_lock);
set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
- if (mark_matching_lsegs_invalid(lo, &free_me_list, &range))
+ if (pnfs_mark_matching_lsegs_invalid(lo, &free_me_list, &range))
rv = NFS4ERR_DELAY;
list_del_init(&lo->plh_bulk_recall);
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&free_me_list);
- put_layout_hdr(lo);
+ pnfs_put_layout_hdr(lo);
iput(ino);
}
return rv;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 99694442b93f..8b39a42ac35e 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -93,10 +93,10 @@ static struct nfs_subversion *find_nfs_version(unsigned int version)
spin_unlock(&nfs_version_lock);
return nfs;
}
- };
+ }
spin_unlock(&nfs_version_lock);
- return ERR_PTR(-EPROTONOSUPPORT);;
+ return ERR_PTR(-EPROTONOSUPPORT);
}
struct nfs_subversion *get_nfs_version(unsigned int version)
@@ -498,7 +498,8 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
return nfs_found_client(cl_init, clp);
}
if (new) {
- list_add(&new->cl_share_link, &nn->nfs_client_list);
+ list_add_tail(&new->cl_share_link,
+ &nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
new->cl_flags = cl_init->init_flags;
return rpc_ops->init_client(new, timeparms, ip_addr,
@@ -668,7 +669,8 @@ int nfs_init_server_rpcclient(struct nfs_server *server,
{
struct nfs_client *clp = server->nfs_client;
- server->client = rpc_clone_client(clp->cl_rpcclient);
+ server->client = rpc_clone_client_set_auth(clp->cl_rpcclient,
+ pseudoflavour);
if (IS_ERR(server->client)) {
dprintk("%s: couldn't create rpc_client!\n", __func__);
return PTR_ERR(server->client);
@@ -678,16 +680,6 @@ int nfs_init_server_rpcclient(struct nfs_server *server,
timeo,
sizeof(server->client->cl_timeout_default));
server->client->cl_timeout = &server->client->cl_timeout_default;
-
- if (pseudoflavour != clp->cl_rpcclient->cl_auth->au_flavor) {
- struct rpc_auth *auth;
-
- auth = rpcauth_create(pseudoflavour, server->client);
- if (IS_ERR(auth)) {
- dprintk("%s: couldn't create credcache!\n", __func__);
- return PTR_ERR(auth);
- }
- }
server->client->cl_softrtry = 0;
if (server->flags & NFS_MOUNT_SOFT)
server->client->cl_softrtry = 1;
@@ -761,6 +753,8 @@ static int nfs_init_server(struct nfs_server *server,
data->timeo, data->retrans);
if (data->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (server->options & NFS_OPTION_MIGRATION)
+ set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
@@ -855,7 +849,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- server->pnfs_blksize = fsinfo->blksize;
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 627f108ede23..ce8cb926526b 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2072,7 +2072,7 @@ found:
nfs_access_free_entry(entry);
}
-static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
+void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
{
struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL)
@@ -2098,6 +2098,20 @@ static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *s
spin_unlock(&nfs_access_lru_lock);
}
}
+EXPORT_SYMBOL_GPL(nfs_access_add_cache);
+
+void nfs_access_set_mask(struct nfs_access_entry *entry, u32 access_result)
+{
+ entry->mask = 0;
+ if (access_result & NFS4_ACCESS_READ)
+ entry->mask |= MAY_READ;
+ if (access_result &
+ (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
+ entry->mask |= MAY_WRITE;
+ if (access_result & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
+ entry->mask |= MAY_EXEC;
+}
+EXPORT_SYMBOL_GPL(nfs_access_set_mask);
static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
{
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 1ba385b7c90d..cae26cbd59ee 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -46,6 +46,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/task_io_accounting_ops.h>
+#include <linux/module.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
@@ -78,6 +79,7 @@ struct nfs_direct_req {
atomic_t io_count; /* i/os we're waiting for */
spinlock_t lock; /* protect completion state */
ssize_t count, /* bytes actually processed */
+ bytes_left, /* bytes left to be sent */
error; /* any reported error */
struct completion completion; /* wait for i/o completion */
@@ -190,6 +192,12 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
kref_put(&dreq->kref, nfs_direct_req_free);
}
+ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
+{
+ return dreq->bytes_left;
+}
+EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
+
/*
* Collects and returns the final error value/byte-count.
*/
@@ -390,6 +398,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
user_addr += req_len;
pos += req_len;
count -= req_len;
+ dreq->bytes_left -= req_len;
}
/* The nfs_page now hold references to these pages */
nfs_direct_release_pages(pagevec, npages);
@@ -450,23 +459,28 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct nfs_direct_req *dreq;
+ struct nfs_lock_context *l_ctx;
dreq = nfs_direct_req_alloc();
if (dreq == NULL)
goto out;
dreq->inode = inode;
+ dreq->bytes_left = iov_length(iov, nr_segs);
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
- dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
- if (dreq->l_ctx == NULL)
+ l_ctx = nfs_get_lock_context(dreq->ctx);
+ if (IS_ERR(l_ctx)) {
+ result = PTR_ERR(l_ctx);
goto out_release;
+ }
+ dreq->l_ctx = l_ctx;
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
+ NFS_I(inode)->read_io += iov_length(iov, nr_segs);
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
if (!result)
result = nfs_direct_wait(dreq);
- NFS_I(inode)->read_io += result;
out_release:
nfs_direct_req_release(dreq);
out:
@@ -706,6 +720,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
user_addr += req_len;
pos += req_len;
count -= req_len;
+ dreq->bytes_left -= req_len;
}
/* The nfs_page now hold references to these pages */
nfs_direct_release_pages(pagevec, npages);
@@ -814,6 +829,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
get_dreq(dreq);
atomic_inc(&inode->i_dio_count);
+ NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
@@ -825,7 +841,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
pos += vec->iov_len;
}
nfs_pageio_complete(&desc);
- NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
/*
* If no bytes were started, return the error, and let the
@@ -849,16 +864,21 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct nfs_direct_req *dreq;
+ struct nfs_lock_context *l_ctx;
dreq = nfs_direct_req_alloc();
if (!dreq)
goto out;
dreq->inode = inode;
+ dreq->bytes_left = count;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
- dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
- if (dreq->l_ctx == NULL)
+ l_ctx = nfs_get_lock_context(dreq->ctx);
+ if (IS_ERR(l_ctx)) {
+ result = PTR_ERR(l_ctx);
goto out_release;
+ }
+ dreq->l_ctx = l_ctx;
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6a7fcab7ecb3..582bb8866131 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -259,7 +259,7 @@ nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync)
struct dentry *dentry = file->f_path.dentry;
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = dentry->d_inode;
- int have_error, status;
+ int have_error, do_resend, status;
int ret = 0;
dprintk("NFS: fsync file(%s/%s) datasync %d\n",
@@ -267,15 +267,23 @@ nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync)
datasync);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
+ do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
status = nfs_commit_inode(inode, FLUSH_SYNC);
- if (status >= 0 && ret < 0)
- status = ret;
have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
- if (have_error)
+ if (have_error) {
ret = xchg(&ctx->error, 0);
- if (!ret && status < 0)
+ if (ret)
+ goto out;
+ }
+ if (status < 0) {
ret = status;
+ goto out;
+ }
+ do_resend |= test_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
+ if (do_resend)
+ ret = -EAGAIN;
+out:
return ret;
}
EXPORT_SYMBOL_GPL(nfs_file_fsync_commit);
@@ -286,13 +294,22 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
int ret;
struct inode *inode = file->f_path.dentry->d_inode;
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (ret != 0)
- goto out;
- mutex_lock(&inode->i_mutex);
- ret = nfs_file_fsync_commit(file, start, end, datasync);
- mutex_unlock(&inode->i_mutex);
-out:
+ do {
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (ret != 0)
+ break;
+ mutex_lock(&inode->i_mutex);
+ ret = nfs_file_fsync_commit(file, start, end, datasync);
+ mutex_unlock(&inode->i_mutex);
+ /*
+ * If nfs_file_fsync_commit detected a server reboot, then
+ * resend all dirty pages that might have been covered by
+ * the NFS_CONTEXT_RESEND_WRITES flag
+ */
+ start = 0;
+ end = LLONG_MAX;
+ } while (ret == -EAGAIN);
+
return ret;
}
@@ -578,6 +595,7 @@ out:
static const struct vm_operations_struct nfs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = nfs_vm_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
static int nfs_need_sync_write(struct file *filp, struct inode *inode)
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 4654ced096a6..033803c36644 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -32,6 +32,8 @@
#include <asm/uaccess.h>
+#include "internal.h"
+
#define NFSDBG_FACILITY NFSDBG_CLIENT
/*
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index a850079467d8..9cc4a3fbf4b0 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -55,18 +55,19 @@
static const struct cred *id_resolver_cache;
static struct key_type key_type_id_resolver_legacy;
-struct idmap {
- struct rpc_pipe *idmap_pipe;
- struct key_construction *idmap_key_cons;
- struct mutex idmap_mutex;
-};
-
struct idmap_legacy_upcalldata {
struct rpc_pipe_msg pipe_msg;
struct idmap_msg idmap_msg;
+ struct key_construction *key_cons;
struct idmap *idmap;
};
+struct idmap {
+ struct rpc_pipe *idmap_pipe;
+ struct idmap_legacy_upcalldata *idmap_upcall_data;
+ struct mutex idmap_mutex;
+};
+
/**
* nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
* @fattr: fully initialised struct nfs_fattr
@@ -158,7 +159,7 @@ static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *re
return 0;
memcpy(buf, name, namelen);
buf[namelen] = '\0';
- if (strict_strtoul(buf, 0, &val) != 0)
+ if (kstrtoul(buf, 0, &val) != 0)
return 0;
*res = val;
return 1;
@@ -330,7 +331,6 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
name, namelen, type, data,
data_size, idmap);
- idmap->idmap_key_cons = NULL;
mutex_unlock(&idmap->idmap_mutex);
}
return ret;
@@ -364,7 +364,7 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
if (data_size <= 0) {
ret = -EINVAL;
} else {
- ret = strict_strtol(id_str, 10, &id_long);
+ ret = kstrtol(id_str, 10, &id_long);
*id = (__u32)id_long;
}
return ret;
@@ -465,8 +465,6 @@ nfs_idmap_new(struct nfs_client *clp)
struct rpc_pipe *pipe;
int error;
- BUG_ON(clp->cl_idmap != NULL);
-
idmap = kzalloc(sizeof(*idmap), GFP_KERNEL);
if (idmap == NULL)
return -ENOMEM;
@@ -510,7 +508,6 @@ static int __rpc_pipefs_event(struct nfs_client *clp, unsigned long event,
switch (event) {
case RPC_PIPEFS_MOUNT:
- BUG_ON(clp->cl_rpcclient->cl_dentry == NULL);
err = __nfs_idmap_register(clp->cl_rpcclient->cl_dentry,
clp->cl_idmap,
clp->cl_idmap->idmap_pipe);
@@ -632,9 +629,6 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
substring_t substr;
int token, ret;
- memset(im, 0, sizeof(*im));
- memset(msg, 0, sizeof(*msg));
-
im->im_type = IDMAP_TYPE_GROUP;
token = match_token(desc, nfs_idmap_tokens, &substr);
@@ -665,6 +659,35 @@ out:
return ret;
}
+static bool
+nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
+ struct idmap_legacy_upcalldata *data)
+{
+ if (idmap->idmap_upcall_data != NULL) {
+ WARN_ON_ONCE(1);
+ return false;
+ }
+ idmap->idmap_upcall_data = data;
+ return true;
+}
+
+static void
+nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
+{
+ struct key_construction *cons = idmap->idmap_upcall_data->key_cons;
+
+ kfree(idmap->idmap_upcall_data);
+ idmap->idmap_upcall_data = NULL;
+ complete_request_key(cons, ret);
+}
+
+static void
+nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
+{
+ if (idmap->idmap_upcall_data != NULL)
+ nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+}
+
static int nfs_idmap_legacy_upcall(struct key_construction *cons,
const char *op,
void *aux)
@@ -677,29 +700,28 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
int ret = -ENOMEM;
/* msg and im are freed in idmap_pipe_destroy_msg */
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out1;
msg = &data->pipe_msg;
im = &data->idmap_msg;
data->idmap = idmap;
+ data->key_cons = cons;
ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
if (ret < 0)
goto out2;
- BUG_ON(idmap->idmap_key_cons != NULL);
- idmap->idmap_key_cons = cons;
+ ret = -EAGAIN;
+ if (!nfs_idmap_prepare_pipe_upcall(idmap, data))
+ goto out2;
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
if (ret < 0)
- goto out3;
+ nfs_idmap_abort_pipe_upcall(idmap, ret);
return ret;
-
-out3:
- idmap->idmap_key_cons = NULL;
out2:
kfree(data);
out1:
@@ -714,21 +736,32 @@ static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *dat
authkey);
}
-static int nfs_idmap_read_message(struct idmap_msg *im, struct key *key, struct key *authkey)
+static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
+ struct idmap_msg *upcall,
+ struct key *key, struct key *authkey)
{
char id_str[NFS_UINT_MAXLEN];
- int ret = -EINVAL;
+ int ret = -ENOKEY;
+ /* ret = -ENOKEY */
+ if (upcall->im_type != im->im_type || upcall->im_conv != im->im_conv)
+ goto out;
switch (im->im_conv) {
case IDMAP_CONV_NAMETOID:
+ if (strcmp(upcall->im_name, im->im_name) != 0)
+ break;
sprintf(id_str, "%d", im->im_id);
ret = nfs_idmap_instantiate(key, authkey, id_str);
break;
case IDMAP_CONV_IDTONAME:
+ if (upcall->im_id != im->im_id)
+ break;
ret = nfs_idmap_instantiate(key, authkey, im->im_name);
break;
+ default:
+ ret = -EINVAL;
}
-
+out:
return ret;
}
@@ -740,14 +773,16 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
struct key_construction *cons;
struct idmap_msg im;
size_t namelen_in;
- int ret;
+ int ret = -ENOKEY;
/* If instantiation is successful, anyone waiting for key construction
* will have been woken up and someone else may now have used
* idmap_key_cons - so after this point we may no longer touch it.
*/
- cons = ACCESS_ONCE(idmap->idmap_key_cons);
- idmap->idmap_key_cons = NULL;
+ if (idmap->idmap_upcall_data == NULL)
+ goto out_noupcall;
+
+ cons = idmap->idmap_upcall_data->key_cons;
if (mlen != sizeof(im)) {
ret = -ENOSPC;
@@ -768,16 +803,19 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) {
ret = -EINVAL;
goto out;
- }
+}
- ret = nfs_idmap_read_message(&im, cons->key, cons->authkey);
+ ret = nfs_idmap_read_and_verify_message(&im,
+ &idmap->idmap_upcall_data->idmap_msg,
+ cons->key, cons->authkey);
if (ret >= 0) {
key_set_timeout(cons->key, nfs_idmap_cache_timeout);
ret = mlen;
}
out:
- complete_request_key(cons, ret);
+ nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
+out_noupcall:
return ret;
}
@@ -788,14 +826,9 @@ idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
struct idmap_legacy_upcalldata,
pipe_msg);
struct idmap *idmap = data->idmap;
- struct key_construction *cons;
- if (msg->errno) {
- cons = ACCESS_ONCE(idmap->idmap_key_cons);
- idmap->idmap_key_cons = NULL;
- complete_request_key(cons, msg->errno);
- }
- /* Free memory allocated in nfs_idmap_legacy_upcall() */
- kfree(data);
+
+ if (msg->errno)
+ nfs_idmap_abort_pipe_upcall(idmap, msg->errno);
}
static void
@@ -803,7 +836,8 @@ idmap_release_pipe(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
struct idmap *idmap = (struct idmap *)rpci->private;
- idmap->idmap_key_cons = NULL;
+
+ nfs_idmap_abort_pipe_upcall(idmap, -EPIPE);
}
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e4c716d374a8..5c7325c5c5e6 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -547,8 +547,8 @@ EXPORT_SYMBOL_GPL(nfs_getattr);
static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
{
atomic_set(&l_ctx->count, 1);
- l_ctx->lockowner = current->files;
- l_ctx->pid = current->tgid;
+ l_ctx->lockowner.l_owner = current->files;
+ l_ctx->lockowner.l_pid = current->tgid;
INIT_LIST_HEAD(&l_ctx->list);
}
@@ -557,9 +557,9 @@ static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context
struct nfs_lock_context *pos;
list_for_each_entry(pos, &ctx->lock_context.list, list) {
- if (pos->lockowner != current->files)
+ if (pos->lockowner.l_owner != current->files)
continue;
- if (pos->pid != current->tgid)
+ if (pos->lockowner.l_pid != current->tgid)
continue;
atomic_inc(&pos->count);
return pos;
@@ -578,7 +578,7 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
spin_unlock(&inode->i_lock);
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (new == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
nfs_init_lock_context(new);
spin_lock(&inode->i_lock);
res = __nfs_find_lock_context(ctx);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 31fdb03225cd..59b133c5d652 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -101,11 +101,11 @@ struct nfs_client_initdata {
*/
struct nfs_parsed_mount_data {
int flags;
- int rsize, wsize;
- int timeo, retrans;
- int acregmin, acregmax,
+ unsigned int rsize, wsize;
+ unsigned int timeo, retrans;
+ unsigned int acregmin, acregmax,
acdirmin, acdirmax;
- int namlen;
+ unsigned int namlen;
unsigned int options;
unsigned int bsize;
unsigned int auth_flavor_len;
@@ -464,6 +464,7 @@ static inline void nfs_inode_dio_wait(struct inode *inode)
{
inode_dio_wait(inode);
}
+extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_read_data *);
@@ -483,6 +484,12 @@ extern int _nfs4_call_sync_session(struct rpc_clnt *clnt,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply);
+extern int nfs40_walk_client_list(struct nfs_client *clp,
+ struct nfs_client **result,
+ struct rpc_cred *cred);
+extern int nfs41_walk_client_list(struct nfs_client *clp,
+ struct nfs_client **result,
+ struct rpc_cred *cred);
/*
* Determine the device name as a string
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index 0539de1b8d1f..8ee1fab83268 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -5,6 +5,7 @@
#ifndef __NFS_NETNS_H__
#define __NFS_NETNS_H__
+#include <linux/nfs4.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
@@ -22,6 +23,9 @@ struct nfs_net {
struct list_head nfs_volume_list;
#if IS_ENABLED(CONFIG_NFS_V4)
struct idr cb_ident_idr; /* Protected by nfs_client_lock */
+ unsigned short nfs_callback_tcpport;
+ unsigned short nfs_callback_tcpport6;
+ int cb_users[NFS4_MAX_MINOR_VERSION + 1];
#endif
spinlock_t nfs_client_lock;
struct timespec boot_time;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index da0618aeeadb..a525fdefccde 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -132,8 +132,8 @@ struct nfs4_lock_owner {
struct nfs4_lock_state {
struct list_head ls_locks; /* Other lock stateids */
struct nfs4_state * ls_state; /* Pointer to open state */
-#define NFS_LOCK_INITIALIZED 1
- int ls_flags;
+#define NFS_LOCK_INITIALIZED 0
+ unsigned long ls_flags;
struct nfs_seqid_counter ls_seqid;
nfs4_stateid ls_stateid;
atomic_t ls_count;
@@ -191,6 +191,8 @@ struct nfs4_state_recovery_ops {
int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
int (*reclaim_complete)(struct nfs_client *);
+ int (*detect_trunking)(struct nfs_client *, struct nfs_client **,
+ struct rpc_cred *);
};
struct nfs4_state_maintenance_ops {
@@ -223,7 +225,7 @@ extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
extern int nfs4_destroy_clientid(struct nfs_client *clp);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
+extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
struct nfs4_fs_locations *, struct page *);
@@ -320,9 +322,15 @@ extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
+int nfs4_discover_server_trunking(struct nfs_client *clp,
+ struct nfs_client **);
+int nfs40_discover_server_trunking(struct nfs_client *clp,
+ struct nfs_client **, struct rpc_cred *);
#if defined(CONFIG_NFS_V4_1)
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+int nfs41_discover_server_trunking(struct nfs_client *clp,
+ struct nfs_client **, struct rpc_cred *);
extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
#else
static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
@@ -351,7 +359,7 @@ extern void nfs41_handle_server_scope(struct nfs_client *,
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
- fmode_t, fl_owner_t, pid_t);
+ fmode_t, const struct nfs_lockowner *);
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
@@ -372,6 +380,9 @@ extern bool nfs4_disable_idmapping;
extern unsigned short max_session_slots;
extern unsigned short send_implementation_id;
+#define NFS4_CLIENT_ID_UNIQ_LEN (64)
+extern char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN];
+
/* nfs4sysctl.c */
#ifdef CONFIG_SYSCTL
int nfs4_register_sysctl(void);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 24eb663f8ed5..6bacfde1319a 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -84,7 +84,7 @@ error:
static void nfs4_destroy_callback(struct nfs_client *clp)
{
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down(clp->cl_mvops->minor_version);
+ nfs_callback_down(clp->cl_mvops->minor_version, clp->cl_net);
}
static void nfs4_shutdown_client(struct nfs_client *clp)
@@ -185,6 +185,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
rpc_authflavor_t authflavour)
{
char buf[INET6_ADDRSTRLEN + 1];
+ struct nfs_client *old;
int error;
if (clp->cl_cons_state == NFS_CS_READY) {
@@ -230,6 +231,17 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
if (!nfs4_has_session(clp))
nfs_mark_client_ready(clp, NFS_CS_READY);
+
+ error = nfs4_discover_server_trunking(clp, &old);
+ if (error < 0)
+ goto error;
+ if (clp != old) {
+ clp->cl_preserve_clid = true;
+ nfs_put_client(clp);
+ clp = old;
+ atomic_inc(&clp->cl_count);
+ }
+
return clp;
error:
@@ -239,6 +251,248 @@ error:
return ERR_PTR(error);
}
+/*
+ * SETCLIENTID just did a callback update with the callback ident in
+ * "drop," but server trunking discovery claims "drop" and "keep" are
+ * actually the same server. Swap the callback IDs so that "keep"
+ * will continue to use the callback ident the server now knows about,
+ * and so that "keep"'s original callback ident is destroyed when
+ * "drop" is freed.
+ */
+static void nfs4_swap_callback_idents(struct nfs_client *keep,
+ struct nfs_client *drop)
+{
+ struct nfs_net *nn = net_generic(keep->cl_net, nfs_net_id);
+ unsigned int save = keep->cl_cb_ident;
+
+ if (keep->cl_cb_ident == drop->cl_cb_ident)
+ return;
+
+ dprintk("%s: keeping callback ident %u and dropping ident %u\n",
+ __func__, keep->cl_cb_ident, drop->cl_cb_ident);
+
+ spin_lock(&nn->nfs_client_lock);
+
+ idr_replace(&nn->cb_ident_idr, keep, drop->cl_cb_ident);
+ keep->cl_cb_ident = drop->cl_cb_ident;
+
+ idr_replace(&nn->cb_ident_idr, drop, save);
+ drop->cl_cb_ident = save;
+
+ spin_unlock(&nn->nfs_client_lock);
+}
+
+/**
+ * nfs40_walk_client_list - Find server that recognizes a client ID
+ *
+ * @new: nfs_client with client ID to test
+ * @result: OUT: found nfs_client, or new
+ * @cred: credential to use for trunking test
+ *
+ * Returns zero, a negative errno, or a negative NFS4ERR status.
+ * If zero is returned, an nfs_client pointer is planted in "result."
+ *
+ * NB: nfs40_walk_client_list() relies on the new nfs_client being
+ * the last nfs_client on the list.
+ */
+int nfs40_walk_client_list(struct nfs_client *new,
+ struct nfs_client **result,
+ struct rpc_cred *cred)
+{
+ struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
+ struct nfs_client *pos, *n, *prev = NULL;
+ struct nfs4_setclientid_res clid = {
+ .clientid = new->cl_clientid,
+ .confirm = new->cl_confirm,
+ };
+ int status;
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
+ /* If "pos" isn't marked ready, we can't trust the
+ * remaining fields in "pos" */
+ if (pos->cl_cons_state < NFS_CS_READY)
+ continue;
+
+ if (pos->rpc_ops != new->rpc_ops)
+ continue;
+
+ if (pos->cl_proto != new->cl_proto)
+ continue;
+
+ if (pos->cl_minorversion != new->cl_minorversion)
+ continue;
+
+ if (pos->cl_clientid != new->cl_clientid)
+ continue;
+
+ atomic_inc(&pos->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+
+ if (prev)
+ nfs_put_client(prev);
+
+ status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
+ if (status == 0) {
+ nfs4_swap_callback_idents(pos, new);
+
+ nfs_put_client(pos);
+ *result = pos;
+ dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+ __func__, pos, atomic_read(&pos->cl_count));
+ return 0;
+ }
+ if (status != -NFS4ERR_STALE_CLIENTID) {
+ nfs_put_client(pos);
+ dprintk("NFS: <-- %s status = %d, no result\n",
+ __func__, status);
+ return status;
+ }
+
+ spin_lock(&nn->nfs_client_lock);
+ prev = pos;
+ }
+
+ /*
+ * No matching nfs_client found. This should be impossible,
+ * because the new nfs_client has already been added to
+ * nfs_client_list by nfs_get_client().
+ *
+ * Don't BUG(), since the caller is holding a mutex.
+ */
+ if (prev)
+ nfs_put_client(prev);
+ spin_unlock(&nn->nfs_client_lock);
+ pr_err("NFS: %s Error: no matching nfs_client found\n", __func__);
+ return -NFS4ERR_STALE_CLIENTID;
+}
+
+#ifdef CONFIG_NFS_V4_1
+/*
+ * Returns true if the client IDs match
+ */
+static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
+{
+ if (a->cl_clientid != b->cl_clientid) {
+ dprintk("NFS: --> %s client ID %llx does not match %llx\n",
+ __func__, a->cl_clientid, b->cl_clientid);
+ return false;
+ }
+ dprintk("NFS: --> %s client ID %llx matches %llx\n",
+ __func__, a->cl_clientid, b->cl_clientid);
+ return true;
+}
+
+/*
+ * Returns true if the server owners match
+ */
+static bool
+nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+{
+ struct nfs41_server_owner *o1 = a->cl_serverowner;
+ struct nfs41_server_owner *o2 = b->cl_serverowner;
+
+ if (o1->minor_id != o2->minor_id) {
+ dprintk("NFS: --> %s server owner minor IDs do not match\n",
+ __func__);
+ return false;
+ }
+
+ if (o1->major_id_sz != o2->major_id_sz)
+ goto out_major_mismatch;
+ if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
+ goto out_major_mismatch;
+
+ dprintk("NFS: --> %s server owners match\n", __func__);
+ return true;
+
+out_major_mismatch:
+ dprintk("NFS: --> %s server owner major IDs do not match\n",
+ __func__);
+ return false;
+}
+
+/**
+ * nfs41_walk_client_list - Find nfs_client that matches a client/server owner
+ *
+ * @new: nfs_client with client ID to test
+ * @result: OUT: found nfs_client, or new
+ * @cred: credential to use for trunking test
+ *
+ * Returns zero, a negative errno, or a negative NFS4ERR status.
+ * If zero is returned, an nfs_client pointer is planted in "result."
+ *
+ * NB: nfs41_walk_client_list() relies on the new nfs_client being
+ * the last nfs_client on the list.
+ */
+int nfs41_walk_client_list(struct nfs_client *new,
+ struct nfs_client **result,
+ struct rpc_cred *cred)
+{
+ struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
+ struct nfs_client *pos, *n, *prev = NULL;
+ int error;
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
+ /* If "pos" isn't marked ready, we can't trust the
+ * remaining fields in "pos", especially the client
+ * ID and serverowner fields. Wait for CREATE_SESSION
+ * to finish. */
+ if (pos->cl_cons_state < NFS_CS_READY) {
+ atomic_inc(&pos->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+
+ if (prev)
+ nfs_put_client(prev);
+ prev = pos;
+
+ error = nfs_wait_client_init_complete(pos);
+ if (error < 0) {
+ nfs_put_client(pos);
+ spin_lock(&nn->nfs_client_lock);
+ continue;
+ }
+
+ spin_lock(&nn->nfs_client_lock);
+ }
+
+ if (pos->rpc_ops != new->rpc_ops)
+ continue;
+
+ if (pos->cl_proto != new->cl_proto)
+ continue;
+
+ if (pos->cl_minorversion != new->cl_minorversion)
+ continue;
+
+ if (!nfs4_match_clientids(pos, new))
+ continue;
+
+ if (!nfs4_match_serverowners(pos, new))
+ continue;
+
+ spin_unlock(&nn->nfs_client_lock);
+ dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
+ __func__, pos, atomic_read(&pos->cl_count));
+
+ *result = pos;
+ return 0;
+ }
+
+ /*
+ * No matching nfs_client found. This should be impossible,
+ * because the new nfs_client has already been added to
+ * nfs_client_list by nfs_get_client().
+ *
+ * Don't BUG(), since the caller is holding a mutex.
+ */
+ spin_unlock(&nn->nfs_client_lock);
+ pr_err("NFS: %s Error: no matching nfs_client found\n", __func__);
+ return -NFS4ERR_STALE_CLIENTID;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static void nfs4_destroy_server(struct nfs_server *server)
{
nfs_server_return_all_delegations(server);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index eb5eb8eef4d3..afddd6639afb 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -95,16 +95,25 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
int ret;
struct inode *inode = file->f_path.dentry->d_inode;
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (ret != 0)
- goto out;
- mutex_lock(&inode->i_mutex);
- ret = nfs_file_fsync_commit(file, start, end, datasync);
- if (!ret && !datasync)
- /* application has asked for meta-data sync */
- ret = pnfs_layoutcommit_inode(inode, true);
- mutex_unlock(&inode->i_mutex);
-out:
+ do {
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (ret != 0)
+ break;
+ mutex_lock(&inode->i_mutex);
+ ret = nfs_file_fsync_commit(file, start, end, datasync);
+ if (!ret && !datasync)
+ /* application has asked for meta-data sync */
+ ret = pnfs_layoutcommit_inode(inode, true);
+ mutex_unlock(&inode->i_mutex);
+ /*
+ * If nfs_file_fsync_commit detected a server reboot, then
+ * resend all dirty pages that might have been covered by
+ * the NFS_CONTEXT_RESEND_WRITES flag
+ */
+ start = 0;
+ end = LLONG_MAX;
+ } while (ret == -EAGAIN);
+
return ret;
}
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 53f94d915bd1..2e45fd9c02a3 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -122,12 +122,21 @@ static void filelayout_reset_read(struct nfs_read_data *data)
}
}
+static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
+{
+ if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+ return;
+ clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
+ pnfs_return_layout(inode);
+}
+
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
struct pnfs_layout_segment *lseg)
{
- struct inode *inode = lseg->pls_layout->plh_inode;
+ struct pnfs_layout_hdr *lo = lseg->pls_layout;
+ struct inode *inode = lo->plh_inode;
struct nfs_server *mds_server = NFS_SERVER(inode);
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs_client *mds_client = mds_server->nfs_client;
@@ -190,8 +199,6 @@ static int filelayout_async_handle_error(struct rpc_task *task,
* i/o and all i/o waiting on the slot table to the MDS until
* layout is destroyed and a new valid layout is obtained.
*/
- set_bit(NFS_LAYOUT_INVALID,
- &NFS_I(inode)->layout->plh_flags);
pnfs_destroy_layout(NFS_I(inode));
rpc_wake_up(&tbl->slot_tbl_waitq);
goto reset;
@@ -205,11 +212,9 @@ static int filelayout_async_handle_error(struct rpc_task *task,
case -EPIPE:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
- filelayout_mark_devid_invalid(devid);
- clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
- _pnfs_return_layout(inode);
+ nfs4_mark_deviceid_unavailable(devid);
+ set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
rpc_wake_up(&tbl->slot_tbl_waitq);
- nfs4_ds_disconnect(clp);
/* fall through */
default:
reset:
@@ -269,6 +274,21 @@ filelayout_set_layoutcommit(struct nfs_write_data *wdata)
(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
}
+bool
+filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node)
+{
+ return filelayout_test_devid_invalid(node) ||
+ nfs4_test_deviceid_unavailable(node);
+}
+
+static bool
+filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_deviceid_node *node = FILELAYOUT_DEVID_NODE(lseg);
+
+ return filelayout_test_devid_unavailable(node);
+}
+
/*
* Call ops for the async read/write cases
* In the case of dense layouts, the offset needs to be reset to its
@@ -318,7 +338,9 @@ static void filelayout_read_count_stats(struct rpc_task *task, void *data)
static void filelayout_read_release(void *data)
{
struct nfs_read_data *rdata = data;
+ struct pnfs_layout_hdr *lo = rdata->header->lseg->pls_layout;
+ filelayout_fenceme(lo->plh_inode, lo);
nfs_put_client(rdata->ds_clp);
rdata->header->mds_ops->rpc_release(data);
}
@@ -416,7 +438,9 @@ static void filelayout_write_count_stats(struct rpc_task *task, void *data)
static void filelayout_write_release(void *data)
{
struct nfs_write_data *wdata = data;
+ struct pnfs_layout_hdr *lo = wdata->header->lseg->pls_layout;
+ filelayout_fenceme(lo->plh_inode, lo);
nfs_put_client(wdata->ds_clp);
wdata->header->mds_ops->rpc_release(data);
}
@@ -453,7 +477,7 @@ static void filelayout_commit_release(void *calldata)
struct nfs_commit_data *data = calldata;
data->completion_ops->completion(data);
- put_lseg(data->lseg);
+ pnfs_put_lseg(data->lseg);
nfs_put_client(data->ds_clp);
nfs_commitdata_release(data);
}
@@ -608,13 +632,13 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld,
NFS_SERVER(lo->plh_inode)->nfs_client, id);
if (d == NULL) {
- dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
+ dsaddr = filelayout_get_device_info(lo->plh_inode, id, gfp_flags);
if (dsaddr == NULL)
goto out;
} else
dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
- /* Found deviceid is being reaped */
- if (test_bit(NFS_DEVICEID_INVALID, &dsaddr->id_node.flags))
+ /* Found deviceid is unavailable */
+ if (filelayout_test_devid_unavailable(&dsaddr->id_node))
goto out_put;
fl->dsaddr = dsaddr;
@@ -726,7 +750,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
goto out_err;
if (fl->num_fh > 0) {
- fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+ fl->fh_array = kcalloc(fl->num_fh, sizeof(fl->fh_array[0]),
gfp_flags);
if (!fl->fh_array)
goto out_err;
@@ -931,7 +955,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
if (status < 0) {
- put_lseg(pgio->pg_lseg);
+ pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
goto out_mds;
}
@@ -985,7 +1009,7 @@ filelayout_clear_request_commit(struct nfs_page *req,
out:
nfs_request_remove_commit_list(req, cinfo);
spin_unlock(cinfo->lock);
- put_lseg(freeme);
+ pnfs_put_lseg(freeme);
}
static struct list_head *
@@ -1018,7 +1042,7 @@ filelayout_choose_commit_list(struct nfs_page *req,
* off due to a rewrite, in which case it will be done in
* filelayout_clear_request_commit
*/
- buckets[i].wlseg = get_lseg(lseg);
+ buckets[i].wlseg = pnfs_get_lseg(lseg);
}
set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
cinfo->ds->nwritten++;
@@ -1128,7 +1152,7 @@ filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
if (list_empty(src))
bucket->wlseg = NULL;
else
- get_lseg(bucket->clseg);
+ pnfs_get_lseg(bucket->clseg);
}
return ret;
}
@@ -1159,12 +1183,12 @@ static void filelayout_recover_commit_reqs(struct list_head *dst,
/* NOTE cinfo->lock is NOT held, relying on fact that this is
* only called on single thread per dreq.
- * Can't take the lock because need to do put_lseg
+ * Can't take the lock because need to do pnfs_put_lseg
*/
for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
BUG_ON(!list_empty(&b->written));
- put_lseg(b->wlseg);
+ pnfs_put_lseg(b->wlseg);
b->wlseg = NULL;
}
}
@@ -1200,7 +1224,7 @@ alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
if (list_empty(&bucket->committing))
continue;
nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
- put_lseg(bucket->clseg);
+ pnfs_put_lseg(bucket->clseg);
bucket->clseg = NULL;
}
/* Caller will clean up entries put on list */
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 43fe802dd678..8c07241fe52b 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -129,23 +129,13 @@ filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
}
static inline bool
-filelayout_test_layout_invalid(struct pnfs_layout_hdr *lo)
-{
- return test_bit(NFS_LAYOUT_INVALID, &lo->plh_flags);
-}
-
-static inline bool
filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
{
return test_bit(NFS_DEVICEID_INVALID, &node->flags);
}
-static inline bool
-filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
-{
- return filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg)) ||
- filelayout_test_layout_invalid(lseg->pls_layout);
-}
+extern bool
+filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node);
extern struct nfs_fh *
nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
@@ -158,7 +148,6 @@ struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg,
extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
-void nfs4_ds_disconnect(struct nfs_client *clp);
+filelayout_get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index f81231f30d94..a8eaa9b7bb0f 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -149,28 +149,6 @@ _data_server_lookup_locked(const struct list_head *dsaddrs)
}
/*
- * Lookup DS by nfs_client pointer. Zero data server client pointer
- */
-void nfs4_ds_disconnect(struct nfs_client *clp)
-{
- struct nfs4_pnfs_ds *ds;
- struct nfs_client *found = NULL;
-
- dprintk("%s clp %p\n", __func__, clp);
- spin_lock(&nfs4_ds_cache_lock);
- list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
- if (ds->ds_clp && ds->ds_clp == clp) {
- found = ds->ds_clp;
- ds->ds_clp = NULL;
- }
- spin_unlock(&nfs4_ds_cache_lock);
- if (found) {
- set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
- nfs_put_client(clp);
- }
-}
-
-/*
* Create an rpc connection to the nfs4_pnfs_ds data server
* Currently only supports IPv4 and IPv6 addresses
*/
@@ -690,7 +668,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_fl
* of available devices, and return it.
*/
struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
+filelayout_get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
{
struct pnfs_device *pdev = NULL;
u32 max_resp_sz;
@@ -804,13 +782,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
- if (filelayout_test_devid_invalid(devid))
+ if (filelayout_test_devid_unavailable(devid))
return NULL;
if (ds == NULL) {
printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
__func__, ds_idx);
- goto mark_dev_invalid;
+ filelayout_mark_devid_invalid(devid);
+ return NULL;
}
if (!ds->ds_clp) {
@@ -818,14 +797,12 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
int err;
err = nfs4_ds_connect(s, ds);
- if (err)
- goto mark_dev_invalid;
+ if (err) {
+ nfs4_mark_deviceid_unavailable(devid);
+ return NULL;
+ }
}
return ds;
-
-mark_dev_invalid:
- filelayout_mark_devid_invalid(devid);
- return NULL;
}
module_param(dataserver_retrans, uint, 0644);
diff --git a/fs/nfs/nfs4getroot.c b/fs/nfs/nfs4getroot.c
index 6a83780e0ce6..549462e5b9b0 100644
--- a/fs/nfs/nfs4getroot.c
+++ b/fs/nfs/nfs4getroot.c
@@ -5,6 +5,7 @@
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
+#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 017b4b01a69c..79fbb61ce202 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -192,25 +192,13 @@ out:
struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
struct qstr *name)
{
- struct rpc_clnt *clone;
- struct rpc_auth *auth;
rpc_authflavor_t flavor;
flavor = nfs4_negotiate_security(inode, name);
if ((int)flavor < 0)
- return ERR_PTR(flavor);
+ return ERR_PTR((int)flavor);
- clone = rpc_clone_client(clnt);
- if (IS_ERR(clone))
- return clone;
-
- auth = rpcauth_create(flavor, clone);
- if (!auth) {
- rpc_shutdown_client(clone);
- clone = ERR_PTR(-EIO);
- }
-
- return clone;
+ return rpc_clone_client_set_auth(clnt, flavor);
}
static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1e50326d00dd..68b21d81b7ac 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -104,6 +104,8 @@ static int nfs4_map_errors(int err)
return -EACCES;
case -NFS4ERR_MINOR_VERS_MISMATCH:
return -EPROTONOSUPPORT;
+ case -NFS4ERR_ACCESS:
+ return -EACCES;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -150,6 +152,12 @@ static const u32 nfs4_pnfs_open_bitmap[3] = {
FATTR4_WORD2_MDSTHRESHOLD
};
+static const u32 nfs4_open_noattr_bitmap[3] = {
+ FATTR4_WORD0_TYPE
+ | FATTR4_WORD0_CHANGE
+ | FATTR4_WORD0_FILEID,
+};
+
const u32 nfs4_statfs_bitmap[2] = {
FATTR4_WORD0_FILES_AVAIL
| FATTR4_WORD0_FILES_FREE
@@ -832,6 +840,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
+ p->o_res.access_request = p->o_arg.access;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
@@ -860,6 +869,14 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
p->o_arg.fh = NFS_FH(dir);
p->o_arg.open_flags = flags;
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
+ /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
+ * will return permission denied for all bits until close */
+ if (!(flags & O_EXCL)) {
+ /* ask server to check for all possible rights as results
+ * are cached */
+ p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
+ NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
+ }
p->o_arg.clientid = server->nfs_client->cl_clientid;
p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
@@ -1115,11 +1132,80 @@ out_return_state:
return state;
}
-static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
+static void
+nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
+{
+ struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
+ struct nfs_delegation *delegation;
+ int delegation_flags = 0;
+
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(state->inode)->delegation);
+ if (delegation)
+ delegation_flags = delegation->flags;
+ rcu_read_unlock();
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+ pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+ "returning a delegation for "
+ "OPEN(CLAIM_DELEGATE_CUR)\n",
+ clp->cl_hostname);
+ } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+ nfs_inode_set_delegation(state->inode,
+ data->owner->so_cred,
+ &data->o_res);
+ else
+ nfs_inode_reclaim_delegation(state->inode,
+ data->owner->so_cred,
+ &data->o_res);
+}
+
+/*
+ * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
+ * and update the nfs4_state.
+ */
+static struct nfs4_state *
+_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+{
+ struct inode *inode = data->state->inode;
+ struct nfs4_state *state = data->state;
+ int ret;
+
+ if (!data->rpc_done) {
+ ret = data->rpc_status;
+ goto err;
+ }
+
+ ret = -ESTALE;
+ if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) ||
+ !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) ||
+ !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE))
+ goto err;
+
+ ret = -ENOMEM;
+ state = nfs4_get_open_state(inode, data->owner);
+ if (state == NULL)
+ goto err;
+
+ ret = nfs_refresh_inode(inode, &data->f_attr);
+ if (ret)
+ goto err;
+
+ if (data->o_res.delegation_type != 0)
+ nfs4_opendata_check_deleg(data, state);
+ update_open_stateid(state, &data->o_res.stateid, NULL,
+ data->o_arg.fmode);
+
+ return state;
+err:
+ return ERR_PTR(ret);
+
+}
+
+static struct nfs4_state *
+_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
{
struct inode *inode;
struct nfs4_state *state = NULL;
- struct nfs_delegation *delegation;
int ret;
if (!data->rpc_done) {
@@ -1138,30 +1224,8 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
state = nfs4_get_open_state(inode, data->owner);
if (state == NULL)
goto err_put_inode;
- if (data->o_res.delegation_type != 0) {
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
- int delegation_flags = 0;
-
- rcu_read_lock();
- delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (delegation)
- delegation_flags = delegation->flags;
- rcu_read_unlock();
- if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
- pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
- "returning a delegation for "
- "OPEN(CLAIM_DELEGATE_CUR)\n",
- clp->cl_hostname);
- } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
- nfs_inode_set_delegation(state->inode,
- data->owner->so_cred,
- &data->o_res);
- else
- nfs_inode_reclaim_delegation(state->inode,
- data->owner->so_cred,
- &data->o_res);
- }
-
+ if (data->o_res.delegation_type != 0)
+ nfs4_opendata_check_deleg(data, state);
update_open_stateid(state, &data->o_res.stateid, NULL,
data->o_arg.fmode);
iput(inode);
@@ -1173,6 +1237,14 @@ err:
return ERR_PTR(ret);
}
+static struct nfs4_state *
+nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
+{
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
+ return _nfs4_opendata_reclaim_to_nfs4_state(data);
+ return _nfs4_opendata_to_nfs4_state(data);
+}
+
static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
@@ -1494,6 +1566,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
+ data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
@@ -1526,7 +1599,8 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
return;
if (task->tk_status == 0) {
- switch (data->o_res.f_attr->mode & S_IFMT) {
+ if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
+ switch (data->o_res.f_attr->mode & S_IFMT) {
case S_IFREG:
break;
case S_IFLNK:
@@ -1537,6 +1611,7 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
break;
default:
data->rpc_status = -ENOTDIR;
+ }
}
renew_lease(data->o_res.server, data->timestamp);
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
@@ -1643,6 +1718,39 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
return status;
}
+static int nfs4_opendata_access(struct rpc_cred *cred,
+ struct nfs4_opendata *opendata,
+ struct nfs4_state *state, fmode_t fmode)
+{
+ struct nfs_access_entry cache;
+ u32 mask;
+
+ /* access call failed or for some reason the server doesn't
+ * support any access modes -- defer access call until later */
+ if (opendata->o_res.access_supported == 0)
+ return 0;
+
+ mask = 0;
+ /* don't check MAY_WRITE - a newly created file may not have
+ * write mode bits, but POSIX allows the creating process to write */
+ if (fmode & FMODE_READ)
+ mask |= MAY_READ;
+ if (fmode & FMODE_EXEC)
+ mask |= MAY_EXEC;
+
+ cache.cred = cred;
+ cache.jiffies = jiffies;
+ nfs_access_set_mask(&cache, opendata->o_res.access_result);
+ nfs_access_add_cache(state->inode, &cache);
+
+ if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
+ return 0;
+
+ /* even though OPEN succeeded, access is denied. Close the file */
+ nfs4_close_state(state, fmode);
+ return -NFS4ERR_ACCESS;
+}
+
/*
* Note: On error, nfs4_proc_open will free the struct nfs4_opendata
*/
@@ -1774,7 +1882,11 @@ static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
* informs us the stateid is unrecognized. */
if (status != -NFS4ERR_BAD_STATEID)
nfs41_free_stateid(server, stateid);
+ nfs_remove_bad_delegation(state->inode);
+ write_seqlock(&state->seqlock);
+ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+ write_sequnlock(&state->seqlock);
clear_bit(NFS_DELEGATED_STATE, &state->flags);
}
}
@@ -1790,7 +1902,7 @@ static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
static int nfs41_check_open_stateid(struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
- nfs4_stateid *stateid = &state->stateid;
+ nfs4_stateid *stateid = &state->open_stateid;
int status;
/* If a state reset has been done, test_stateid is unneeded */
@@ -1896,6 +2008,10 @@ static int _nfs4_do_open(struct inode *dir,
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+ status = nfs4_opendata_access(cred, opendata, state, fmode);
+ if (status != 0)
+ goto err_opendata_put;
+
if (opendata->o_arg.open_flags & O_EXCL) {
nfs4_exclusive_attrset(opendata, sattr);
@@ -1941,7 +2057,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
struct nfs4_state *res;
int status;
- fmode &= FMODE_READ|FMODE_WRITE;
+ fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC;
do {
status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
&res, ctx_th);
@@ -2013,8 +2129,12 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
nfs_fattr_init(fattr);
if (state != NULL) {
+ struct nfs_lockowner lockowner = {
+ .l_owner = current->files,
+ .l_pid = current->tgid,
+ };
nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
- current->files, current->tgid);
+ &lockowner);
} else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
FMODE_WRITE)) {
/* Use that stateid */
@@ -2133,6 +2253,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
+ struct inode *inode = calldata->inode;
int call_close = 0;
dprintk("%s: begin!\n", __func__);
@@ -2166,16 +2287,13 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
if (calldata->arg.fmode == 0) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
if (calldata->roc &&
- pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
- rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
- task, NULL);
+ pnfs_roc_drain(inode, &calldata->roc_barrier, task))
goto out;
- }
}
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
- if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
+ if (nfs4_setup_sequence(NFS_SERVER(inode),
&calldata->arg.seq_args,
&calldata->res.seq_res,
task))
@@ -2202,7 +2320,7 @@ static const struct rpc_call_ops nfs4_close_ops = {
*
* NOTE: Caller must be holding the sp->so_owner semaphore!
*/
-int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
+int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_closedata *calldata;
@@ -2238,7 +2356,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
- calldata->roc = roc;
+ calldata->roc = pnfs_roc(state->inode);
nfs_sb_active(calldata->inode->i_sb);
msg.rpc_argp = &calldata->arg;
@@ -2255,8 +2373,6 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
out_free_calldata:
kfree(calldata);
out:
- if (roc)
- pnfs_roc_release(state->inode);
nfs4_put_open_state(state);
nfs4_put_state_owner(sp);
return status;
@@ -2399,7 +2515,7 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl
int ret;
auth = rpcauth_create(flavor, server->client);
- if (!auth) {
+ if (IS_ERR(auth)) {
ret = -EIO;
goto out;
}
@@ -2767,13 +2883,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
if (!status) {
- entry->mask = 0;
- if (res.access & NFS4_ACCESS_READ)
- entry->mask |= MAY_READ;
- if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
- entry->mask |= MAY_WRITE;
- if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
- entry->mask |= MAY_EXEC;
+ nfs_access_set_mask(entry, res.access);
nfs_refresh_inode(inode, res.fattr);
}
nfs_free_fattr(res.fattr);
@@ -3362,8 +3472,11 @@ static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, s
nfs_fattr_init(fsinfo->fattr);
error = nfs4_do_fsinfo(server, fhandle, fsinfo);
- if (error == 0)
+ if (error == 0) {
+ /* block layout checks this! */
+ server->pnfs_blksize = fsinfo->blksize;
set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
+ }
return error;
}
@@ -4007,6 +4120,36 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
memcpy(bootverf->data, verf, sizeof(bootverf->data));
}
+static unsigned int
+nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
+ char *buf, size_t len)
+{
+ unsigned int result;
+
+ rcu_read_lock();
+ result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
+ clp->cl_ipaddr,
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR),
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_PROTO));
+ rcu_read_unlock();
+ return result;
+}
+
+static unsigned int
+nfs4_init_uniform_client_string(const struct nfs_client *clp,
+ char *buf, size_t len)
+{
+ char *nodename = clp->cl_rpcclient->cl_nodename;
+
+ if (nfs4_client_id_uniquifier[0] != '\0')
+ nodename = nfs4_client_id_uniquifier;
+ return scnprintf(buf, len, "Linux NFSv%u.%u %s",
+ clp->rpc_ops->version, clp->cl_minorversion,
+ nodename);
+}
+
/**
* nfs4_proc_setclientid - Negotiate client ID
* @clp: state data structure
@@ -4037,15 +4180,18 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
/* nfs_client_id4 */
nfs4_init_boot_verifier(clp, &sc_verifier);
- rcu_read_lock();
- setclientid.sc_name_len = scnprintf(setclientid.sc_name,
- sizeof(setclientid.sc_name), "%s/%s %s",
- clp->cl_ipaddr,
- rpc_peeraddr2str(clp->cl_rpcclient,
- RPC_DISPLAY_ADDR),
- rpc_peeraddr2str(clp->cl_rpcclient,
- RPC_DISPLAY_PROTO));
+ if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
+ setclientid.sc_name_len =
+ nfs4_init_uniform_client_string(clp,
+ setclientid.sc_name,
+ sizeof(setclientid.sc_name));
+ else
+ setclientid.sc_name_len =
+ nfs4_init_nonuniform_client_string(clp,
+ setclientid.sc_name,
+ sizeof(setclientid.sc_name));
/* cb_client4 */
+ rcu_read_lock();
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid),
rpc_peeraddr2str(clp->cl_rpcclient,
@@ -4391,7 +4537,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
- if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
+ if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
task->tk_action = NULL;
return;
@@ -4585,7 +4731,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
}
if (data->rpc_status == 0) {
nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
- data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
+ set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags);
renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
}
out:
@@ -4632,7 +4778,7 @@ static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_
case -NFS4ERR_BAD_STATEID:
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
if (new_lock_owner != 0 ||
- (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
nfs4_schedule_stateid_recovery(server, lsp->ls_state);
break;
case -NFS4ERR_STALE_STATEID:
@@ -4756,7 +4902,7 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
struct nfs_server *server = NFS_SERVER(state->inode);
list_for_each_entry(lsp, &state->lock_states, ls_locks) {
- if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
+ if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
status = nfs41_test_stateid(server, &lsp->ls_stateid);
if (status != NFS_OK) {
/* Free the stateid unless the server
@@ -4764,7 +4910,7 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
if (status != -NFS4ERR_BAD_STATEID)
nfs41_free_stateid(server,
&lsp->ls_stateid);
- lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
+ clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
ret = status;
}
}
@@ -5267,10 +5413,8 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
};
nfs4_init_boot_verifier(clp, &verifier);
- args.id_len = scnprintf(args.id, sizeof(args.id),
- "%s/%s",
- clp->cl_ipaddr,
- clp->cl_rpcclient->cl_nodename);
+ args.id_len = nfs4_init_uniform_client_string(clp, args.id,
+ sizeof(args.id));
dprintk("NFS call exchange_id auth=%s, '%.*s'\n",
clp->cl_rpcclient->cl_auth->au_ops->au_name,
args.id_len, args.id);
@@ -5391,6 +5535,8 @@ int nfs4_destroy_clientid(struct nfs_client *clp)
goto out;
if (clp->cl_exchange_flags == 0)
goto out;
+ if (clp->cl_preserve_clid)
+ goto out;
cred = nfs4_get_exchange_id_cred(clp);
ret = nfs4_proc_destroy_clientid(clp, cred);
if (cred)
@@ -6196,26 +6342,44 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
- struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct inode *inode = lgp->args.inode;
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct pnfs_layout_hdr *lo;
+ struct nfs4_state *state = NULL;
dprintk("--> %s\n", __func__);
if (!nfs4_sequence_done(task, &lgp->res.seq_res))
- return;
+ goto out;
switch (task->tk_status) {
case 0:
- break;
+ goto out;
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
task->tk_status = -NFS4ERR_DELAY;
- /* Fall through */
- default:
- if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
- rpc_restart_call_prepare(task);
- return;
+ break;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_BAD_STATEID:
+ spin_lock(&inode->i_lock);
+ lo = NFS_I(inode)->layout;
+ if (!lo || list_empty(&lo->plh_segs)) {
+ spin_unlock(&inode->i_lock);
+ /* If the open stateid was bad, then recover it. */
+ state = lgp->args.ctx->state;
+ } else {
+ LIST_HEAD(head);
+
+ pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
+ spin_unlock(&inode->i_lock);
+ /* Mark the bad layout state as invalid, then
+ * retry using the open stateid. */
+ pnfs_free_lseg_list(&head);
}
}
+ if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+out:
dprintk("<-- %s\n", __func__);
}
@@ -6282,7 +6446,8 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
.rpc_release = nfs4_layoutget_release,
};
-void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+struct pnfs_layout_segment *
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
{
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
size_t max_pages = max_response_pages(server);
@@ -6299,6 +6464,7 @@ void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
.callback_data = lgp,
.flags = RPC_TASK_ASYNC,
};
+ struct pnfs_layout_segment *lseg = NULL;
int status = 0;
dprintk("--> %s\n", __func__);
@@ -6306,7 +6472,7 @@ void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
if (!lgp->args.layout.pages) {
nfs4_layoutget_release(lgp);
- return;
+ return ERR_PTR(-ENOMEM);
}
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
@@ -6315,15 +6481,17 @@ void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
- return;
+ return ERR_CAST(task);
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
if (status == 0)
- status = pnfs_layout_process(lgp);
+ lseg = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
- return;
+ if (status)
+ return ERR_PTR(status);
+ return lseg;
}
static void
@@ -6342,7 +6510,6 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
struct nfs_server *server;
- struct pnfs_layout_hdr *lo = lrp->args.layout;
dprintk("--> %s\n", __func__);
@@ -6354,20 +6521,21 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
rpc_restart_call_prepare(task);
return;
}
- spin_lock(&lo->plh_inode->i_lock);
- if (task->tk_status == 0 && lrp->res.lrs_present)
- pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
- lo->plh_block_lgets--;
- spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutreturn_release(void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
+ struct pnfs_layout_hdr *lo = lrp->args.layout;
dprintk("--> %s\n", __func__);
- put_layout_hdr(lrp->args.layout);
+ spin_lock(&lo->plh_inode->i_lock);
+ if (lrp->res.lrs_present)
+ pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ lo->plh_block_lgets--;
+ spin_unlock(&lo->plh_inode->i_lock);
+ pnfs_put_layout_hdr(lrp->args.layout);
kfree(calldata);
dprintk("<-- %s\n", __func__);
}
@@ -6541,7 +6709,7 @@ static void nfs4_layoutcommit_release(void *calldata)
list_del_init(&lseg->pls_lc_list);
if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
&lseg->pls_flags))
- put_lseg(lseg);
+ pnfs_put_lseg(lseg);
}
clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
@@ -6800,6 +6968,7 @@ static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs4_init_clientid,
.get_clid_cred = nfs4_get_setclientid_cred,
+ .detect_trunking = nfs40_discover_server_trunking,
};
#if defined(CONFIG_NFS_V4_1)
@@ -6811,6 +6980,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
.establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
.reclaim_complete = nfs41_proc_reclaim_complete,
+ .detect_trunking = nfs41_discover_server_trunking,
};
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 55148def5540..c351e6b39838 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -51,18 +51,21 @@
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/sunrpc/clnt.h>
+
#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
#include "internal.h"
#include "pnfs.h"
+#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_STATE
#define OPENOWNER_POOL_SIZE 8
const nfs4_stateid zero_stateid;
-
+static DEFINE_MUTEX(nfs_clid_init_mutex);
static LIST_HEAD(nfs4_clientid_list);
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
@@ -73,12 +76,13 @@ int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
};
unsigned short port;
int status;
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
goto do_confirm;
- port = nfs_callback_tcpport;
+ port = nn->nfs_callback_tcpport;
if (clp->cl_addr.ss_family == AF_INET6)
- port = nfs_callback_tcpport6;
+ port = nn->nfs_callback_tcpport6;
status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
if (status != 0)
@@ -96,6 +100,56 @@ out:
return status;
}
+/**
+ * nfs40_discover_server_trunking - Detect server IP address trunking (mv0)
+ *
+ * @clp: nfs_client under test
+ * @result: OUT: found nfs_client, or clp
+ * @cred: credential to use for trunking test
+ *
+ * Returns zero, a negative errno, or a negative NFS4ERR status.
+ * If zero is returned, an nfs_client pointer is planted in
+ * "result".
+ *
+ * Note: The returned client may not yet be marked ready.
+ */
+int nfs40_discover_server_trunking(struct nfs_client *clp,
+ struct nfs_client **result,
+ struct rpc_cred *cred)
+{
+ struct nfs4_setclientid_res clid = {
+ .clientid = clp->cl_clientid,
+ .confirm = clp->cl_confirm,
+ };
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+ unsigned short port;
+ int status;
+
+ port = nn->nfs_callback_tcpport;
+ if (clp->cl_addr.ss_family == AF_INET6)
+ port = nn->nfs_callback_tcpport6;
+
+ status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
+ if (status != 0)
+ goto out;
+ clp->cl_clientid = clid.clientid;
+ clp->cl_confirm = clid.confirm;
+
+ status = nfs40_walk_client_list(clp, result, cred);
+ switch (status) {
+ case -NFS4ERR_STALE_CLIENTID:
+ set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ case 0:
+ /* Sustain the lease, even if it's empty. If the clientid4
+ * goes stale it's of no use for trunking discovery. */
+ nfs4_schedule_state_renewal(*result);
+ break;
+ }
+
+out:
+ return status;
+}
+
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
@@ -275,6 +329,33 @@ out:
return status;
}
+/**
+ * nfs41_discover_server_trunking - Detect server IP address trunking (mv1)
+ *
+ * @clp: nfs_client under test
+ * @result: OUT: found nfs_client, or clp
+ * @cred: credential to use for trunking test
+ *
+ * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status.
+ * If NFS4_OK is returned, an nfs_client pointer is planted in
+ * "result".
+ *
+ * Note: The returned client may not yet be marked ready.
+ */
+int nfs41_discover_server_trunking(struct nfs_client *clp,
+ struct nfs_client **result,
+ struct rpc_cred *cred)
+{
+ int status;
+
+ status = nfs4_proc_exchange_id(clp, cred);
+ if (status != NFS4_OK)
+ return status;
+ set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+
+ return nfs41_walk_client_list(clp, result, cred);
+}
+
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
@@ -729,11 +810,8 @@ static void __nfs4_close(struct nfs4_state *state,
if (!call_close) {
nfs4_put_open_state(state);
nfs4_put_state_owner(owner);
- } else {
- bool roc = pnfs_roc(state->inode);
-
- nfs4_do_close(state, gfp_mask, wait, roc);
- }
+ } else
+ nfs4_do_close(state, gfp_mask, wait);
}
void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
@@ -865,7 +943,7 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
if (list_empty(&state->lock_states))
clear_bit(LK_STATE_IN_USE, &state->flags);
spin_unlock(&state->state_lock);
- if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
+ if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
if (nfs4_release_lockowner(lsp) == 0)
return;
}
@@ -911,17 +989,25 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
}
static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state,
- fl_owner_t fl_owner, pid_t fl_pid)
+ const struct nfs_lockowner *lockowner)
{
struct nfs4_lock_state *lsp;
+ fl_owner_t fl_owner;
+ pid_t fl_pid;
bool ret = false;
+
+ if (lockowner == NULL)
+ goto out;
+
if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
goto out;
+ fl_owner = lockowner->l_owner;
+ fl_pid = lockowner->l_pid;
spin_lock(&state->state_lock);
lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
- if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
+ if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
nfs4_stateid_copy(dst, &lsp->ls_stateid);
ret = true;
}
@@ -946,11 +1032,11 @@ static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
* requests.
*/
void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
- fmode_t fmode, fl_owner_t fl_owner, pid_t fl_pid)
+ fmode_t fmode, const struct nfs_lockowner *lockowner)
{
if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
return;
- if (nfs4_copy_lock_stateid(dst, state, fl_owner, fl_pid))
+ if (nfs4_copy_lock_stateid(dst, state, lockowner))
return;
nfs4_copy_open_stateid(dst, state);
}
@@ -1289,7 +1375,7 @@ restart:
if (status >= 0) {
spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
- if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
+ if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
pr_warn_ratelimited("NFS: "
"%s: Lock reclaim "
"failed!\n", __func__);
@@ -1361,7 +1447,7 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
lock->ls_seqid.flags = 0;
- lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
+ clear_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags);
}
spin_unlock(&state->state_lock);
}
@@ -1595,8 +1681,8 @@ out:
return nfs4_recovery_handle_error(clp, status);
}
-/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
- * on EXCHANGE_ID for v4.1
+/* Set NFS4CLNT_LEASE_EXPIRED and reclaim reboot state for all v4.0 errors
+ * and for recoverable errors on EXCHANGE_ID for v4.1
*/
static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
{
@@ -1606,8 +1692,12 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
return -ESERVERFAULT;
/* Lease confirmation error: retry after purging the lease */
ssleep(1);
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ break;
case -NFS4ERR_STALE_CLIENTID:
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ nfs4_state_clear_reclaim_reboot(clp);
+ nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_CLID_INUSE:
pr_err("NFS: Server %s reports our clientid is in use\n",
@@ -1698,6 +1788,109 @@ static int nfs4_purge_lease(struct nfs_client *clp)
return 0;
}
+/**
+ * nfs4_discover_server_trunking - Detect server IP address trunking
+ *
+ * @clp: nfs_client under test
+ * @result: OUT: found nfs_client, or clp
+ *
+ * Returns zero or a negative errno. If zero is returned,
+ * an nfs_client pointer is planted in "result".
+ *
+ * Note: since we are invoked in process context, and
+ * not from inside the state manager, we cannot use
+ * nfs4_handle_reclaim_lease_error().
+ */
+int nfs4_discover_server_trunking(struct nfs_client *clp,
+ struct nfs_client **result)
+{
+ const struct nfs4_state_recovery_ops *ops =
+ clp->cl_mvops->reboot_recovery_ops;
+ rpc_authflavor_t *flavors, flav, save;
+ struct rpc_clnt *clnt;
+ struct rpc_cred *cred;
+ int i, len, status;
+
+ dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname);
+
+ len = NFS_MAX_SECFLAVORS;
+ flavors = kcalloc(len, sizeof(*flavors), GFP_KERNEL);
+ if (flavors == NULL) {
+ status = -ENOMEM;
+ goto out;
+ }
+ len = rpcauth_list_flavors(flavors, len);
+ if (len < 0) {
+ status = len;
+ goto out_free;
+ }
+ clnt = clp->cl_rpcclient;
+ save = clnt->cl_auth->au_flavor;
+ i = 0;
+
+ mutex_lock(&nfs_clid_init_mutex);
+ status = -ENOENT;
+again:
+ cred = ops->get_clid_cred(clp);
+ if (cred == NULL)
+ goto out_unlock;
+
+ status = ops->detect_trunking(clp, result, cred);
+ put_rpccred(cred);
+ switch (status) {
+ case 0:
+ break;
+
+ case -EACCES:
+ if (clp->cl_machine_cred == NULL)
+ break;
+ /* Handle case where the user hasn't set up machine creds */
+ nfs4_clear_machine_cred(clp);
+ case -NFS4ERR_DELAY:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ ssleep(1);
+ dprintk("NFS: %s after status %d, retrying\n",
+ __func__, status);
+ goto again;
+
+ case -NFS4ERR_CLID_INUSE:
+ case -NFS4ERR_WRONGSEC:
+ status = -EPERM;
+ if (i >= len)
+ break;
+
+ flav = flavors[i++];
+ if (flav == save)
+ flav = flavors[i++];
+ clnt = rpc_clone_client_set_auth(clnt, flav);
+ if (IS_ERR(clnt)) {
+ status = PTR_ERR(clnt);
+ break;
+ }
+ clp->cl_rpcclient = clnt;
+ goto again;
+
+ case -NFS4ERR_MINOR_VERS_MISMATCH:
+ status = -EPROTONOSUPPORT;
+ break;
+
+ case -EKEYEXPIRED:
+ nfs4_warn_keyexpired(clp->cl_hostname);
+ case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+ * in nfs4_exchange_id */
+ status = -EKEYEXPIRED;
+ }
+
+out_unlock:
+ mutex_unlock(&nfs_clid_init_mutex);
+out_free:
+ kfree(flavors);
+out:
+ dprintk("NFS: %s: status = %d\n", __func__, status);
+ return status;
+}
+
#ifdef CONFIG_NFS_V4_1
void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
@@ -2008,6 +2201,7 @@ out_error:
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
+ ssleep(1);
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
}
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 5729bc8aa75d..2628d921b7e3 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -9,6 +9,7 @@
#include <linux/nfs_idmap.h>
#include <linux/nfs_fs.h>
+#include "nfs4_fs.h"
#include "callback.h"
static const int nfs_set_port_min = 0;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 8dba6bd48557..40836ee5dc3a 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -447,12 +447,14 @@ static int nfs4_stat_to_errno(int);
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_maxsz + \
+ encode_access_maxsz + \
encode_getfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_maxsz + \
+ decode_access_maxsz + \
decode_getfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_confirm_sz \
@@ -467,11 +469,13 @@ static int nfs4_stat_to_errno(int);
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_maxsz + \
+ encode_access_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_maxsz + \
+ decode_access_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_downgrade_sz \
(compound_encode_hdr_maxsz + \
@@ -1509,8 +1513,12 @@ static void encode_open_stateid(struct xdr_stream *xdr,
nfs4_stateid stateid;
if (ctx->state != NULL) {
+ const struct nfs_lockowner *lockowner = NULL;
+
+ if (l_ctx != NULL)
+ lockowner = &l_ctx->lockowner;
nfs4_select_rw_stateid(&stateid, ctx->state,
- fmode, l_ctx->lockowner, l_ctx->pid);
+ fmode, lockowner);
if (zero_seqid)
stateid.seqid = 0;
encode_nfs4_stateid(xdr, &stateid);
@@ -2216,6 +2224,8 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_putfh(xdr, args->fh, &hdr);
encode_open(xdr, args, &hdr);
encode_getfh(xdr, &hdr);
+ if (args->access)
+ encode_access(xdr, args->access, &hdr);
encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
encode_nops(&hdr);
}
@@ -2252,7 +2262,9 @@ static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_open(xdr, args, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
+ if (args->access)
+ encode_access(xdr, args->access, &hdr);
+ encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
encode_nops(&hdr);
}
@@ -4095,7 +4107,7 @@ out_overflow:
return -EIO;
}
-static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
+static int decode_access(struct xdr_stream *xdr, u32 *supported, u32 *access)
{
__be32 *p;
uint32_t supp, acc;
@@ -4109,8 +4121,8 @@ static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
goto out_overflow;
supp = be32_to_cpup(p++);
acc = be32_to_cpup(p);
- access->supported = supp;
- access->access = acc;
+ *supported = supp;
+ *access = acc;
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -5642,7 +5654,8 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
* and places the remaining xdr data in xdr_buf->tail
*/
pdev->mincount = be32_to_cpup(p);
- xdr_read_pages(xdr, pdev->mincount); /* include space for the length */
+ if (xdr_read_pages(xdr, pdev->mincount) != pdev->mincount)
+ goto out_overflow;
/* Parse notification bitmap, verifying that it is zero. */
p = xdr_inline_decode(xdr, 4);
@@ -5887,7 +5900,7 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status != 0)
goto out;
- status = decode_access(xdr, res);
+ status = decode_access(xdr, &res->supported, &res->access);
if (status != 0)
goto out;
decode_getfattr(xdr, res->fattr, res->server);
@@ -6228,6 +6241,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_getfh(xdr, &res->fh);
if (status)
goto out;
+ if (res->access_request)
+ decode_access(xdr, &res->access_supported, &res->access_result);
decode_getfattr(xdr, res->f_attr, res->server);
out:
return status;
@@ -6276,6 +6291,8 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
status = decode_open(xdr, res);
if (status)
goto out;
+ if (res->access_request)
+ decode_access(xdr, &res->access_supported, &res->access_result);
decode_getfattr(xdr, res->f_attr, res->server);
out:
return status;
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index ea6d111b03e9..c6f990656f89 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -41,6 +41,7 @@
#include <scsi/osd_ore.h>
#include "objlayout.h"
+#include "../internal.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
@@ -368,7 +369,7 @@ void objio_free_result(struct objlayout_io_res *oir)
kfree(objios);
}
-enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
+static enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
{
switch (oep) {
case OSD_ERR_PRI_NO_ERROR:
@@ -573,7 +574,7 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
(unsigned long)pgio->pg_layout_private;
}
-void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+static void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
pnfs_generic_pg_init_read(pgio, req);
if (unlikely(pgio->pg_lseg == NULL))
@@ -603,11 +604,17 @@ static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
return false;
}
-void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+static void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
unsigned long stripe_end = 0;
+ u64 wb_size;
- pnfs_generic_pg_init_write(pgio, req);
+ if (pgio->pg_dreq == NULL)
+ wb_size = i_size_read(pgio->pg_inode) - req_offset(req);
+ else
+ wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+
+ pnfs_generic_pg_init_write(pgio, req, wb_size);
if (unlikely(pgio->pg_lseg == NULL))
return; /* Not pNFS */
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 311a79681e2b..e56e846e9d2d 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -102,6 +102,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
unsigned int offset, unsigned int count)
{
struct nfs_page *req;
+ struct nfs_lock_context *l_ctx;
/* try to allocate the request struct */
req = nfs_page_alloc();
@@ -109,11 +110,12 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
return ERR_PTR(-ENOMEM);
/* get lock context early so we can deal with alloc failures */
- req->wb_lock_context = nfs_get_lock_context(ctx);
- if (req->wb_lock_context == NULL) {
+ l_ctx = nfs_get_lock_context(ctx);
+ if (IS_ERR(l_ctx)) {
nfs_page_free(req);
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(l_ctx);
}
+ req->wb_lock_context = l_ctx;
/* Initialize the request struct. Initially, we assume a
* long write-back delay. This will be adjusted in
@@ -290,7 +292,9 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
{
if (req->wb_context->cred != prev->wb_context->cred)
return false;
- if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
+ if (req->wb_lock_context->lockowner.l_owner != prev->wb_lock_context->lockowner.l_owner)
+ return false;
+ if (req->wb_lock_context->lockowner.l_pid != prev->wb_lock_context->lockowner.l_pid)
return false;
if (req->wb_context->state != prev->wb_context->state)
return false;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 2e00feacd4be..fe624c91bd00 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -35,6 +35,7 @@
#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
+#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
/* Locking:
*
@@ -190,7 +191,7 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
/* Need to hold i_lock if caller does not already hold reference */
void
-get_layout_hdr(struct pnfs_layout_hdr *lo)
+pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
{
atomic_inc(&lo->plh_refcount);
}
@@ -199,43 +200,107 @@ static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
- return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
- kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
+ return ld->alloc_layout_hdr(ino, gfp_flags);
}
static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
- struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
+ struct nfs_server *server = NFS_SERVER(lo->plh_inode);
+ struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
+
+ if (!list_empty(&lo->plh_layouts)) {
+ struct nfs_client *clp = server->nfs_client;
+
+ spin_lock(&clp->cl_lock);
+ list_del_init(&lo->plh_layouts);
+ spin_unlock(&clp->cl_lock);
+ }
put_rpccred(lo->plh_lc_cred);
- return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
+ return ld->free_layout_hdr(lo);
}
static void
-destroy_layout_hdr(struct pnfs_layout_hdr *lo)
+pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
{
+ struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
dprintk("%s: freeing layout cache %p\n", __func__, lo);
- BUG_ON(!list_empty(&lo->plh_layouts));
- NFS_I(lo->plh_inode)->layout = NULL;
- pnfs_free_layout_hdr(lo);
+ nfsi->layout = NULL;
+ /* Reset MDS Threshold I/O counters */
+ nfsi->write_io = 0;
+ nfsi->read_io = 0;
+}
+
+void
+pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct inode *inode = lo->plh_inode;
+
+ if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+ pnfs_detach_layout_hdr(lo);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_layout_hdr(lo);
+ }
+}
+
+static int
+pnfs_iomode_to_fail_bit(u32 iomode)
+{
+ return iomode == IOMODE_RW ?
+ NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
}
static void
-put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
- if (atomic_dec_and_test(&lo->plh_refcount))
- destroy_layout_hdr(lo);
+ lo->plh_retry_timestamp = jiffies;
+ if (test_and_set_bit(fail_bit, &lo->plh_flags))
+ atomic_inc(&lo->plh_refcount);
}
-void
-put_layout_hdr(struct pnfs_layout_hdr *lo)
+static void
+pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
+{
+ if (test_and_clear_bit(fail_bit, &lo->plh_flags))
+ atomic_dec(&lo->plh_refcount);
+}
+
+static void
+pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
struct inode *inode = lo->plh_inode;
+ struct pnfs_layout_range range = {
+ .iomode = iomode,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+ LIST_HEAD(head);
- if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
- destroy_layout_hdr(lo);
- spin_unlock(&inode->i_lock);
+ spin_lock(&inode->i_lock);
+ pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
+ pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&head);
+ dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
+ iomode == IOMODE_RW ? "RW" : "READ");
+}
+
+static bool
+pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
+{
+ unsigned long start, end;
+ int fail_bit = pnfs_iomode_to_fail_bit(iomode);
+
+ if (test_bit(fail_bit, &lo->plh_flags) == 0)
+ return false;
+ end = jiffies;
+ start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
+ if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
+ /* It is time to retry the failed layoutgets */
+ pnfs_layout_clear_fail_bit(lo, fail_bit);
+ return false;
}
+ return true;
}
static void
@@ -249,33 +314,32 @@ init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
lseg->pls_layout = lo;
}
-static void free_lseg(struct pnfs_layout_segment *lseg)
+static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
{
struct inode *ino = lseg->pls_layout->plh_inode;
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
- /* Matched by get_layout_hdr in pnfs_insert_layout */
- put_layout_hdr(NFS_I(ino)->layout);
}
static void
-put_lseg_common(struct pnfs_layout_segment *lseg)
+pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg)
{
- struct inode *inode = lseg->pls_layout->plh_inode;
+ struct inode *inode = lo->plh_inode;
WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
list_del_init(&lseg->pls_list);
- if (list_empty(&lseg->pls_layout->plh_segs)) {
- set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
- /* Matched by initial refcount set in alloc_init_layout_hdr */
- put_layout_hdr_locked(lseg->pls_layout);
- }
+ /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
+ atomic_dec(&lo->plh_refcount);
+ if (list_empty(&lo->plh_segs))
+ clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}
void
-put_lseg(struct pnfs_layout_segment *lseg)
+pnfs_put_lseg(struct pnfs_layout_segment *lseg)
{
+ struct pnfs_layout_hdr *lo;
struct inode *inode;
if (!lseg)
@@ -284,17 +348,17 @@ put_lseg(struct pnfs_layout_segment *lseg)
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
- inode = lseg->pls_layout->plh_inode;
+ lo = lseg->pls_layout;
+ inode = lo->plh_inode;
if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
- LIST_HEAD(free_me);
-
- put_lseg_common(lseg);
- list_add(&lseg->pls_list, &free_me);
+ pnfs_get_layout_hdr(lo);
+ pnfs_layout_remove_lseg(lo, lseg);
spin_unlock(&inode->i_lock);
- pnfs_free_lseg_list(&free_me);
+ pnfs_free_lseg(lseg);
+ pnfs_put_layout_hdr(lo);
}
}
-EXPORT_SYMBOL_GPL(put_lseg);
+EXPORT_SYMBOL_GPL(pnfs_put_lseg);
static inline u64
end_offset(u64 start, u64 len)
@@ -378,7 +442,7 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
dprintk("%s: lseg %p ref %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount));
if (atomic_dec_and_test(&lseg->pls_refcount)) {
- put_lseg_common(lseg);
+ pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
list_add(&lseg->pls_list, tmp_list);
rv = 1;
}
@@ -390,7 +454,7 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
* after call.
*/
int
-mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
struct pnfs_layout_range *recall_range)
{
@@ -399,14 +463,8 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
dprintk("%s:Begin lo %p\n", __func__, lo);
- if (list_empty(&lo->plh_segs)) {
- /* Reset MDS Threshold I/O counters */
- NFS_I(lo->plh_inode)->write_io = 0;
- NFS_I(lo->plh_inode)->read_io = 0;
- if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
- put_layout_hdr_locked(lo);
+ if (list_empty(&lo->plh_segs))
return 0;
- }
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (!recall_range ||
should_free_lseg(&lseg->pls_range, recall_range)) {
@@ -426,25 +484,13 @@ void
pnfs_free_lseg_list(struct list_head *free_me)
{
struct pnfs_layout_segment *lseg, *tmp;
- struct pnfs_layout_hdr *lo;
if (list_empty(free_me))
return;
- lo = list_first_entry(free_me, struct pnfs_layout_segment,
- pls_list)->pls_layout;
-
- if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
- struct nfs_client *clp;
-
- clp = NFS_SERVER(lo->plh_inode)->nfs_client;
- spin_lock(&clp->cl_lock);
- list_del_init(&lo->plh_layouts);
- spin_unlock(&clp->cl_lock);
- }
list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
list_del(&lseg->pls_list);
- free_lseg(lseg);
+ pnfs_free_lseg(lseg);
}
}
@@ -458,10 +504,15 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
lo = nfsi->layout;
if (lo) {
lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
- mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
- }
- spin_unlock(&nfsi->vfs_inode.i_lock);
- pnfs_free_lseg_list(&tmp_list);
+ pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+ pnfs_get_layout_hdr(lo);
+ pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
+ pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+ pnfs_put_layout_hdr(lo);
+ } else
+ spin_unlock(&nfsi->vfs_inode.i_lock);
}
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
@@ -498,46 +549,54 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
}
}
+/*
+ * Compare 2 layout stateid sequence ids, to see which is newer,
+ * taking into account wraparound issues.
+ */
+static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
+{
+ return (s32)s1 - (s32)s2 > 0;
+}
+
/* update lo->plh_stateid with new if is more recent */
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
bool update_barrier)
{
- u32 oldseq, newseq;
+ u32 oldseq, newseq, new_barrier;
+ int empty = list_empty(&lo->plh_segs);
oldseq = be32_to_cpu(lo->plh_stateid.seqid);
newseq = be32_to_cpu(new->seqid);
- if ((int)(newseq - oldseq) > 0) {
+ if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
nfs4_stateid_copy(&lo->plh_stateid, new);
if (update_barrier) {
- u32 new_barrier = be32_to_cpu(new->seqid);
-
- if ((int)(new_barrier - lo->plh_barrier))
- lo->plh_barrier = new_barrier;
+ new_barrier = be32_to_cpu(new->seqid);
} else {
/* Because of wraparound, we want to keep the barrier
- * "close" to the current seqids. It needs to be
- * within 2**31 to count as "behind", so if it
- * gets too near that limit, give us a litle leeway
- * and bring it to within 2**30.
- * NOTE - and yes, this is all unsigned arithmetic.
+ * "close" to the current seqids.
*/
- if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
- lo->plh_barrier = newseq - (1 << 30);
+ new_barrier = newseq - atomic_read(&lo->plh_outstanding);
}
+ if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
+ lo->plh_barrier = new_barrier;
}
}
+static bool
+pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *stateid)
+{
+ u32 seqid = be32_to_cpu(stateid->seqid);
+
+ return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
+}
+
/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
-pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
- int lget)
+pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
{
- if ((stateid) &&
- (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0)
- return true;
return lo->plh_block_lgets ||
- test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
(list_empty(&lo->plh_segs) &&
(atomic_read(&lo->plh_outstanding) > lget));
@@ -551,7 +610,7 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
dprintk("--> %s\n", __func__);
spin_lock(&lo->plh_inode->i_lock);
- if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
+ if (pnfs_layoutgets_blocked(lo, 1)) {
status = -EAGAIN;
} else if (list_empty(&lo->plh_segs)) {
int seq;
@@ -582,7 +641,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
struct inode *ino = lo->plh_inode;
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
- struct pnfs_layout_segment *lseg = NULL;
+ struct pnfs_layout_segment *lseg;
dprintk("--> %s\n", __func__);
@@ -599,16 +658,22 @@ send_layoutget(struct pnfs_layout_hdr *lo,
lgp->args.type = server->pnfs_curr_ld->id;
lgp->args.inode = ino;
lgp->args.ctx = get_nfs_open_context(ctx);
- lgp->lsegpp = &lseg;
lgp->gfp_flags = gfp_flags;
/* Synchronously retrieve layout information from server and
* store in lseg.
*/
- nfs4_proc_layoutget(lgp, gfp_flags);
- if (!lseg) {
- /* remember that LAYOUTGET failed and suspend trying */
- set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
+ lseg = nfs4_proc_layoutget(lgp, gfp_flags);
+ if (IS_ERR(lseg)) {
+ switch (PTR_ERR(lseg)) {
+ case -ENOMEM:
+ case -ERESTARTSYS:
+ break;
+ default:
+ /* remember that LAYOUTGET failed and suspend trying */
+ pnfs_layout_io_set_failed(lo, range->iomode);
+ }
+ return NULL;
}
return lseg;
@@ -636,25 +701,24 @@ _pnfs_return_layout(struct inode *ino)
spin_lock(&ino->i_lock);
lo = nfsi->layout;
- if (!lo || pnfs_test_layout_returned(lo)) {
+ if (!lo) {
spin_unlock(&ino->i_lock);
dprintk("NFS: %s no layout to return\n", __func__);
goto out;
}
stateid = nfsi->layout->plh_stateid;
/* Reference matched in nfs4_layoutreturn_release */
- get_layout_hdr(lo);
+ pnfs_get_layout_hdr(lo);
empty = list_empty(&lo->plh_segs);
- mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+ pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
/* Don't send a LAYOUTRETURN if list was initially empty */
if (empty) {
spin_unlock(&ino->i_lock);
- put_layout_hdr(lo);
+ pnfs_put_layout_hdr(lo);
dprintk("NFS: %s no layout segments to return\n", __func__);
goto out;
}
lo->plh_block_lgets++;
- pnfs_mark_layout_returned(lo);
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
@@ -663,10 +727,10 @@ _pnfs_return_layout(struct inode *ino)
lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
if (unlikely(lrp == NULL)) {
status = -ENOMEM;
- set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
- set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
- pnfs_clear_layout_returned(lo);
- put_layout_hdr(lo);
+ spin_lock(&ino->i_lock);
+ lo->plh_block_lgets--;
+ spin_unlock(&ino->i_lock);
+ pnfs_put_layout_hdr(lo);
goto out;
}
@@ -703,7 +767,7 @@ bool pnfs_roc(struct inode *ino)
if (!found)
goto out_nolayout;
lo->plh_block_lgets++;
- get_layout_hdr(lo); /* matched in pnfs_roc_release */
+ pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
return true;
@@ -720,8 +784,12 @@ void pnfs_roc_release(struct inode *ino)
spin_lock(&ino->i_lock);
lo = NFS_I(ino)->layout;
lo->plh_block_lgets--;
- put_layout_hdr_locked(lo);
- spin_unlock(&ino->i_lock);
+ if (atomic_dec_and_test(&lo->plh_refcount)) {
+ pnfs_detach_layout_hdr(lo);
+ spin_unlock(&ino->i_lock);
+ pnfs_free_layout_hdr(lo);
+ } else
+ spin_unlock(&ino->i_lock);
}
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
@@ -730,32 +798,34 @@ void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
spin_lock(&ino->i_lock);
lo = NFS_I(ino)->layout;
- if ((int)(barrier - lo->plh_barrier) > 0)
+ if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
lo->plh_barrier = barrier;
spin_unlock(&ino->i_lock);
}
-bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
{
struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg;
+ u32 current_seqid;
bool found = false;
spin_lock(&ino->i_lock);
list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+ rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
found = true;
- break;
+ goto out;
}
- if (!found) {
- struct pnfs_layout_hdr *lo = nfsi->layout;
- u32 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
+ lo = nfsi->layout;
+ current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
- /* Since close does not return a layout stateid for use as
- * a barrier, we choose the worst-case barrier.
- */
- *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
- }
+ /* Since close does not return a layout stateid for use as
+ * a barrier, we choose the worst-case barrier.
+ */
+ *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
+out:
spin_unlock(&ino->i_lock);
return found;
}
@@ -786,14 +856,13 @@ cmp_layout(struct pnfs_layout_range *l1,
}
static void
-pnfs_insert_layout(struct pnfs_layout_hdr *lo,
+pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
struct pnfs_layout_segment *lseg)
{
struct pnfs_layout_segment *lp;
dprintk("%s:Begin\n", __func__);
- assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry(lp, &lo->plh_segs, pls_list) {
if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
continue;
@@ -813,7 +882,7 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
__func__, lseg, lseg->pls_range.iomode,
lseg->pls_range.offset, lseg->pls_range.length);
out:
- get_layout_hdr(lo);
+ pnfs_get_layout_hdr(lo);
dprintk("%s:Return\n", __func__);
}
@@ -847,21 +916,19 @@ pnfs_find_alloc_layout(struct inode *ino,
dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
- assert_spin_locked(&ino->i_lock);
- if (nfsi->layout) {
- if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
- return NULL;
- else
- return nfsi->layout;
- }
+ if (nfsi->layout != NULL)
+ goto out_existing;
spin_unlock(&ino->i_lock);
new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
spin_lock(&ino->i_lock);
- if (likely(nfsi->layout == NULL)) /* Won the race? */
+ if (likely(nfsi->layout == NULL)) { /* Won the race? */
nfsi->layout = new;
- else
- pnfs_free_layout_hdr(new);
+ return new;
+ }
+ pnfs_free_layout_hdr(new);
+out_existing:
+ pnfs_get_layout_hdr(nfsi->layout);
return nfsi->layout;
}
@@ -904,11 +971,10 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
dprintk("%s:Begin\n", __func__);
- assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
is_matching_lseg(&lseg->pls_range, range)) {
- ret = get_lseg(lseg);
+ ret = pnfs_get_lseg(lseg);
break;
}
if (lseg->pls_range.offset > range->offset)
@@ -1013,7 +1079,6 @@ pnfs_update_layout(struct inode *ino,
.length = count,
};
unsigned pg_offset;
- struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_server *server = NFS_SERVER(ino);
struct nfs_client *clp = server->nfs_client;
struct pnfs_layout_hdr *lo;
@@ -1021,16 +1086,16 @@ pnfs_update_layout(struct inode *ino,
bool first = false;
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
- return NULL;
+ goto out;
if (pnfs_within_mdsthreshold(ctx, ino, iomode))
- return NULL;
+ goto out;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
- dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
- goto out_unlock;
+ spin_unlock(&ino->i_lock);
+ goto out;
}
/* Do we even need to bother with this? */
@@ -1040,7 +1105,7 @@ pnfs_update_layout(struct inode *ino,
}
/* if LAYOUTGET already failed once we don't try again */
- if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
+ if (pnfs_layout_io_test_failed(lo, iomode))
goto out_unlock;
/* Check to see if the layout for the given range already exists */
@@ -1048,17 +1113,13 @@ pnfs_update_layout(struct inode *ino,
if (lseg)
goto out_unlock;
- if (pnfs_layoutgets_blocked(lo, NULL, 0))
+ if (pnfs_layoutgets_blocked(lo, 0))
goto out_unlock;
atomic_inc(&lo->plh_outstanding);
- get_layout_hdr(lo);
if (list_empty(&lo->plh_segs))
first = true;
- /* Enable LAYOUTRETURNs */
- pnfs_clear_layout_returned(lo);
-
spin_unlock(&ino->i_lock);
if (first) {
/* The lo must be on the clp list if there is any
@@ -1079,24 +1140,26 @@ pnfs_update_layout(struct inode *ino,
arg.length = PAGE_CACHE_ALIGN(arg.length);
lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
- if (!lseg && first) {
- spin_lock(&clp->cl_lock);
- list_del_init(&lo->plh_layouts);
- spin_unlock(&clp->cl_lock);
- }
atomic_dec(&lo->plh_outstanding);
- put_layout_hdr(lo);
+out_put_layout_hdr:
+ pnfs_put_layout_hdr(lo);
out:
- dprintk("%s end, state 0x%lx lseg %p\n", __func__,
- nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
+ dprintk("%s: inode %s/%llu pNFS layout segment %s for "
+ "(%s, offset: %llu, length: %llu)\n",
+ __func__, ino->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(ino),
+ lseg == NULL ? "not found" : "found",
+ iomode==IOMODE_RW ? "read/write" : "read-only",
+ (unsigned long long)pos,
+ (unsigned long long)count);
return lseg;
out_unlock:
spin_unlock(&ino->i_lock);
- goto out;
+ goto out_put_layout_hdr;
}
EXPORT_SYMBOL_GPL(pnfs_update_layout);
-int
+struct pnfs_layout_segment *
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
@@ -1123,25 +1186,29 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
goto out_forget_reply;
}
- if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
+ if (pnfs_layoutgets_blocked(lo, 1) ||
+ pnfs_layout_stateid_blocked(lo, &res->stateid)) {
dprintk("%s forget reply due to state\n", __func__);
goto out_forget_reply;
}
+
+ /* Done processing layoutget. Set the layout stateid */
+ pnfs_set_layout_stateid(lo, &res->stateid, false);
+
init_lseg(lo, lseg);
lseg->pls_range = res->range;
- *lgp->lsegpp = get_lseg(lseg);
- pnfs_insert_layout(lo, lseg);
+ pnfs_get_lseg(lseg);
+ pnfs_layout_insert_lseg(lo, lseg);
if (res->return_on_close) {
set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
}
- /* Done processing layoutget. Set the layout stateid */
- pnfs_set_layout_stateid(lo, &res->stateid, false);
spin_unlock(&ino->i_lock);
+ return lseg;
out:
- return status;
+ return ERR_PTR(status);
out_forget_reply:
spin_unlock(&ino->i_lock);
@@ -1153,16 +1220,24 @@ out_forget_reply:
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
+ u64 rd_size = req->wb_bytes;
+
BUG_ON(pgio->pg_lseg != NULL);
if (req->wb_offset != req->wb_pgbase) {
nfs_pageio_reset_read_mds(pgio);
return;
}
+
+ if (pgio->pg_dreq == NULL)
+ rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
+ else
+ rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
- req->wb_bytes,
+ rd_size,
IOMODE_READ,
GFP_KERNEL);
/* If no lseg, fall back to read through mds */
@@ -1173,7 +1248,8 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
void
-pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req, u64 wb_size)
{
BUG_ON(pgio->pg_lseg != NULL);
@@ -1181,10 +1257,11 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
nfs_pageio_reset_write_mds(pgio);
return;
}
+
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
- req->wb_bytes,
+ wb_size,
IOMODE_RW,
GFP_NOFS);
/* If no lseg, fall back to write through mds */
@@ -1362,12 +1439,12 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
if (trypnfs == PNFS_NOT_ATTEMPTED)
pnfs_write_through_mds(desc, data);
}
- put_lseg(lseg);
+ pnfs_put_lseg(lseg);
}
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
{
- put_lseg(hdr->lseg);
+ pnfs_put_lseg(hdr->lseg);
nfs_writehdr_free(hdr);
}
EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
@@ -1382,17 +1459,17 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
whdr = nfs_writehdr_alloc();
if (!whdr) {
desc->pg_completion_ops->error_cleanup(&desc->pg_list);
- put_lseg(desc->pg_lseg);
+ pnfs_put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
return -ENOMEM;
}
hdr = &whdr->header;
nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
- hdr->lseg = get_lseg(desc->pg_lseg);
+ hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
atomic_inc(&hdr->refcnt);
ret = nfs_generic_flush(desc, hdr);
if (ret != 0) {
- put_lseg(desc->pg_lseg);
+ pnfs_put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
} else
pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
@@ -1517,12 +1594,12 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
if (trypnfs == PNFS_NOT_ATTEMPTED)
pnfs_read_through_mds(desc, data);
}
- put_lseg(lseg);
+ pnfs_put_lseg(lseg);
}
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
{
- put_lseg(hdr->lseg);
+ pnfs_put_lseg(hdr->lseg);
nfs_readhdr_free(hdr);
}
EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
@@ -1538,17 +1615,17 @@ pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
if (!rhdr) {
desc->pg_completion_ops->error_cleanup(&desc->pg_list);
ret = -ENOMEM;
- put_lseg(desc->pg_lseg);
+ pnfs_put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
return ret;
}
hdr = &rhdr->header;
nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
- hdr->lseg = get_lseg(desc->pg_lseg);
+ hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
atomic_inc(&hdr->refcnt);
ret = nfs_generic_pagein(desc, hdr);
if (ret != 0) {
- put_lseg(desc->pg_lseg);
+ pnfs_put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
} else
pnfs_do_multiple_reads(desc, &hdr->rpc_list);
@@ -1574,13 +1651,7 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
- if (lseg->pls_range.iomode == IOMODE_RW) {
- dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- } else {
- dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
- }
+ pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
@@ -1601,7 +1672,7 @@ pnfs_set_layoutcommit(struct nfs_write_data *wdata)
}
if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
/* references matched in nfs4_layoutcommit_release */
- get_lseg(hdr->lseg);
+ pnfs_get_lseg(hdr->lseg);
}
if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 745aa1b39e7c..dbf7bba52da0 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -62,9 +62,7 @@ enum {
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
NFS_LAYOUT_ROC, /* some lseg had roc bit set */
- NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
- NFS_LAYOUT_INVALID, /* layout is being destroyed */
- NFS_LAYOUT_RETURNED, /* layout has already been returned */
+ NFS_LAYOUT_RETURN, /* Return this layout ASAP */
};
enum layoutdriver_policy_flags {
@@ -140,6 +138,7 @@ struct pnfs_layout_hdr {
atomic_t plh_outstanding; /* number of RPCs out */
unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */
u32 plh_barrier; /* ignore lower seqids */
+ unsigned long plh_retry_timestamp;
unsigned long plh_flags;
loff_t plh_lwb; /* last write byte for layoutcommit */
struct rpc_cred *plh_lc_cred; /* layoutcommit cred */
@@ -172,12 +171,12 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
struct pnfs_devicelist *devlist);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev);
-extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
+extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
/* pnfs.c */
-void get_layout_hdr(struct pnfs_layout_hdr *lo);
-void put_lseg(struct pnfs_layout_segment *lseg);
+void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo);
+void pnfs_put_lseg(struct pnfs_layout_segment *lseg);
void pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
const struct nfs_pgio_completion_ops *);
@@ -188,28 +187,29 @@ void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
void unset_pnfs_layoutdriver(struct nfs_server *);
void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *);
int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
-void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *, struct nfs_page *);
+void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req, u64 wb_size);
int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req);
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg);
-int pnfs_layout_process(struct nfs4_layoutget *lgp);
+struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
void pnfs_destroy_all_layouts(struct nfs_client *);
-void put_layout_hdr(struct pnfs_layout_hdr *lo);
+void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
const nfs4_stateid *new,
bool update_barrier);
int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
struct pnfs_layout_hdr *lo,
struct nfs4_state *open_state);
-int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
struct pnfs_layout_range *recall_range);
bool pnfs_roc(struct inode *ino);
void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
-bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task);
void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
@@ -233,6 +233,7 @@ struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
/* nfs4_deviceid_flags */
enum {
NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */
+ NFS_DEVICEID_UNAVAILABLE, /* device temporarily unavailable */
};
/* pnfs_dev.c */
@@ -242,6 +243,7 @@ struct nfs4_deviceid_node {
const struct pnfs_layoutdriver_type *ld;
const struct nfs_client *nfs_client;
unsigned long flags;
+ unsigned long timestamp_unavailable;
struct nfs4_deviceid deviceid;
atomic_t ref;
};
@@ -254,34 +256,12 @@ void nfs4_init_deviceid_node(struct nfs4_deviceid_node *,
const struct nfs4_deviceid *);
struct nfs4_deviceid_node *nfs4_insert_deviceid_node(struct nfs4_deviceid_node *);
bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *);
+void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node);
+bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node);
void nfs4_deviceid_purge_client(const struct nfs_client *);
-static inline void
-pnfs_mark_layout_returned(struct pnfs_layout_hdr *lo)
-{
- set_bit(NFS_LAYOUT_RETURNED, &lo->plh_flags);
-}
-
-static inline void
-pnfs_clear_layout_returned(struct pnfs_layout_hdr *lo)
-{
- clear_bit(NFS_LAYOUT_RETURNED, &lo->plh_flags);
-}
-
-static inline bool
-pnfs_test_layout_returned(struct pnfs_layout_hdr *lo)
-{
- return test_bit(NFS_LAYOUT_RETURNED, &lo->plh_flags);
-}
-
-static inline int lo_fail_bit(u32 iomode)
-{
- return iomode == IOMODE_RW ?
- NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
-}
-
static inline struct pnfs_layout_segment *
-get_lseg(struct pnfs_layout_segment *lseg)
+pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{
if (lseg) {
atomic_inc(&lseg->pls_refcount);
@@ -406,12 +386,12 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
}
static inline struct pnfs_layout_segment *
-get_lseg(struct pnfs_layout_segment *lseg)
+pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{
return NULL;
}
-static inline void put_lseg(struct pnfs_layout_segment *lseg)
+static inline void pnfs_put_lseg(struct pnfs_layout_segment *lseg)
{
}
@@ -443,7 +423,7 @@ pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
}
static inline bool
-pnfs_roc_drain(struct inode *ino, u32 *barrier)
+pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
{
return false;
}
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index 73f701f1f4d3..d35b62e83ea6 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -40,6 +40,8 @@
#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
+#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
+
static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
static DEFINE_SPINLOCK(nfs4_deviceid_lock);
@@ -218,6 +220,30 @@ nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
}
EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
+void
+nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
+{
+ node->timestamp_unavailable = jiffies;
+ set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
+}
+EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);
+
+bool
+nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
+{
+ if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
+ unsigned long start, end;
+
+ end = jiffies;
+ start = end - PNFS_DEVICE_RETRY_TIMEOUT;
+ if (time_in_range(node->timestamp_unavailable, start, end))
+ return true;
+ clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);
+
static void
_deviceid_purge_client(const struct nfs_client *clp, long hash)
{
@@ -276,3 +302,4 @@ nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
}
rcu_read_unlock();
}
+
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index d2c7f5db0847..e831bce49766 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -88,6 +88,7 @@ enum {
Opt_sharecache, Opt_nosharecache,
Opt_resvport, Opt_noresvport,
Opt_fscache, Opt_nofscache,
+ Opt_migration, Opt_nomigration,
/* Mount options that take integer arguments */
Opt_port,
@@ -147,6 +148,8 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_noresvport, "noresvport" },
{ Opt_fscache, "fsc" },
{ Opt_nofscache, "nofsc" },
+ { Opt_migration, "migration" },
+ { Opt_nomigration, "nomigration" },
{ Opt_port, "port=%s" },
{ Opt_rsize, "rsize=%s" },
@@ -676,6 +679,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
if (nfss->options & NFS_OPTION_FSCACHE)
seq_printf(m, ",fsc");
+ if (nfss->options & NFS_OPTION_MIGRATION)
+ seq_printf(m, ",migration");
+
if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
seq_printf(m, ",lookupcache=none");
@@ -1106,7 +1112,7 @@ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
string = match_strdup(args);
if (string == NULL)
return -ENOMEM;
- rc = strict_strtoul(string, 10, option);
+ rc = kstrtoul(string, 10, option);
kfree(string);
return rc;
@@ -1243,6 +1249,12 @@ static int nfs_parse_mount_options(char *raw,
kfree(mnt->fscache_uniq);
mnt->fscache_uniq = NULL;
break;
+ case Opt_migration:
+ mnt->options |= NFS_OPTION_MIGRATION;
+ break;
+ case Opt_nomigration:
+ mnt->options &= NFS_OPTION_MIGRATION;
+ break;
/*
* options that take numeric values
@@ -1535,6 +1547,10 @@ static int nfs_parse_mount_options(char *raw,
if (mnt->minorversion && mnt->version != 4)
goto out_minorversion_mismatch;
+ if (mnt->options & NFS_OPTION_MIGRATION &&
+ mnt->version != 4 && mnt->minorversion != 0)
+ goto out_migration_misuse;
+
/*
* verify that any proto=/mountproto= options match the address
* families in the addr=/mountaddr= options.
@@ -1572,6 +1588,10 @@ out_minorversion_mismatch:
printk(KERN_INFO "NFS: mount option vers=%u does not support "
"minorversion=%u\n", mnt->version, mnt->minorversion);
return 0;
+out_migration_misuse:
+ printk(KERN_INFO
+ "NFS: 'migration' not supported for this NFS version\n");
+ return 0;
out_nomem:
printk(KERN_INFO "NFS: not enough memory to parse option\n");
return 0;
@@ -2494,7 +2514,7 @@ EXPORT_SYMBOL_GPL(nfs_kill_super);
/*
* Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
*/
-struct dentry *
+static struct dentry *
nfs_xdev_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
@@ -2642,6 +2662,7 @@ unsigned int nfs_idmap_cache_timeout = 600;
bool nfs4_disable_idmapping = true;
unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
unsigned short send_implementation_id = 1;
+char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN] = "";
EXPORT_SYMBOL_GPL(nfs_callback_set_tcpport);
EXPORT_SYMBOL_GPL(nfs_callback_tcpport);
@@ -2649,6 +2670,7 @@ EXPORT_SYMBOL_GPL(nfs_idmap_cache_timeout);
EXPORT_SYMBOL_GPL(nfs4_disable_idmapping);
EXPORT_SYMBOL_GPL(max_session_slots);
EXPORT_SYMBOL_GPL(send_implementation_id);
+EXPORT_SYMBOL_GPL(nfs4_client_id_uniquifier);
#define NFS_CALLBACK_MAXPORTNR (65535U)
@@ -2659,7 +2681,7 @@ static int param_set_portnr(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = strict_strtoul(val, 0, &num);
+ ret = kstrtoul(val, 0, &num);
if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
return -EINVAL;
*((unsigned int *)kp->arg) = num;
@@ -2674,6 +2696,8 @@ static struct kernel_param_ops param_ops_portnr = {
module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
module_param(nfs_idmap_cache_timeout, int, 0644);
module_param(nfs4_disable_idmapping, bool, 0644);
+module_param_string(nfs4_unique_id, nfs4_client_id_uniquifier,
+ NFS4_CLIENT_ID_UNIQ_LEN, 0600);
MODULE_PARM_DESC(nfs4_disable_idmapping,
"Turn off NFSv4 idmapping when using 'sec=sys'");
module_param(max_session_slots, ushort, 0644);
@@ -2682,6 +2706,7 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
module_param(send_implementation_id, ushort, 0644);
MODULE_PARM_DESC(send_implementation_id,
"Send implementation ID with NFSv4.1 exchange_id");
+MODULE_PARM_DESC(nfs4_unique_id, "nfs_client_id4 uniquifier string");
MODULE_ALIAS("nfs4");
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e3b55372726c..9347ab7c9574 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -846,6 +846,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
int nfs_flush_incompatible(struct file *file, struct page *page)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct nfs_lock_context *l_ctx;
struct nfs_page *req;
int do_flush, status;
/*
@@ -860,9 +861,12 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
req = nfs_page_find_request(page);
if (req == NULL)
return 0;
- do_flush = req->wb_page != page || req->wb_context != ctx ||
- req->wb_lock_context->lockowner != current->files ||
- req->wb_lock_context->pid != current->tgid;
+ l_ctx = req->wb_lock_context;
+ do_flush = req->wb_page != page || req->wb_context != ctx;
+ if (l_ctx) {
+ do_flush |= l_ctx->lockowner.l_owner != current->files
+ || l_ctx->lockowner.l_pid != current->tgid;
+ }
nfs_release_request(req);
if (!do_flush)
return 0;
@@ -1576,6 +1580,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* We have a mismatch. Write the page again */
dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
+ set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
next:
nfs_unlock_and_release_request(req);
}
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 6aa5590c3679..b314888825d5 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -218,8 +218,7 @@ static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
* There must be an encoding function for void results so svc_process
* will work properly.
*/
-int
-nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 9095f3c21df9..97d90d1c8608 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -247,7 +247,7 @@ nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp,
/* Now create the file and set attributes */
nfserr = do_nfsd_create(rqstp, dirfhp, argp->name, argp->len,
attr, newfhp,
- argp->createmode, argp->verf, NULL, NULL);
+ argp->createmode, (u32 *)argp->verf, NULL, NULL);
RETURN_STATUS(nfserr);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 4c7bd35b1876..bdf29c96e4cd 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1028,7 +1028,6 @@ void nfsd4_cb_recall(struct nfs4_delegation *dp)
cb->cb_msg.rpc_cred = callback_cred;
cb->cb_ops = &nfsd4_cb_recall_ops;
- dp->dl_retries = 1;
INIT_LIST_HEAD(&cb->cb_per_client);
cb->cb_done = true;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index fdc91a6fc9c4..a1f10c0a6255 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -478,7 +478,7 @@ nfsd_idmap_init(struct net *net)
goto destroy_idtoname_cache;
nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net);
if (IS_ERR(nn->nametoid_cache)) {
- rv = PTR_ERR(nn->idtoname_cache);
+ rv = PTR_ERR(nn->nametoid_cache);
goto unregister_idtoname_cache;
}
rv = cache_register_net(nn->nametoid_cache, net);
@@ -598,7 +598,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
/* Just to make sure it's null-terminated: */
memcpy(buf, name, namelen);
buf[namelen] = '\0';
- ret = kstrtouint(name, 10, id);
+ ret = kstrtouint(buf, 10, id);
return ret == 0;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index c9c1c0a25417..6c9a4b291dba 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -370,7 +370,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
- status = nfs4_check_open_reclaim(&open->op_clientid);
+ status = nfs4_check_open_reclaim(&open->op_clientid, cstate->minorversion);
if (status)
goto out;
case NFS4_OPEN_CLAIM_FH:
@@ -1054,8 +1054,8 @@ struct nfsd4_operation {
char *op_name;
/* Try to get response size before operation */
nfsd4op_rsize op_rsize_bop;
- stateid_setter op_get_currentstateid;
- stateid_getter op_set_currentstateid;
+ stateid_getter op_get_currentstateid;
+ stateid_setter op_set_currentstateid;
};
static struct nfsd4_operation nfsd4_ops[];
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 48a1bad37334..d0237f872cc4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -758,7 +758,7 @@ static void nfsd4_put_drc_mem(int slotsize, int num)
spin_unlock(&nfsd_drc_lock);
}
-static struct nfsd4_session *alloc_session(int slotsize, int numslots)
+static struct nfsd4_session *__alloc_session(int slotsize, int numslots)
{
struct nfsd4_session *new;
int mem, i;
@@ -852,35 +852,28 @@ static int nfsd4_register_conn(struct nfsd4_conn *conn)
return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
}
-static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
+static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
- struct nfsd4_conn *conn;
int ret;
- conn = alloc_conn(rqstp, dir);
- if (!conn)
- return nfserr_jukebox;
nfsd4_hash_conn(conn, ses);
ret = nfsd4_register_conn(conn);
if (ret)
/* oops; xprt is already down: */
nfsd4_conn_lost(&conn->cn_xpt_user);
- if (ses->se_client->cl_cb_state == NFSD4_CB_DOWN &&
- dir & NFS4_CDFC4_BACK) {
+ if (conn->cn_flags & NFS4_CDFC4_BACK) {
/* callback channel may be back up */
nfsd4_probe_callback(ses->se_client);
}
- return nfs_ok;
}
-static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
+static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
{
u32 dir = NFS4_CDFC4_FORE;
- if (ses->se_flags & SESSION4_BACK_CHAN)
+ if (cses->flags & SESSION4_BACK_CHAN)
dir |= NFS4_CDFC4_BACK;
-
- return nfsd4_new_conn(rqstp, ses, dir);
+ return alloc_conn(rqstp, dir);
}
/* must be called under client_lock */
@@ -903,20 +896,21 @@ static void nfsd4_del_conns(struct nfsd4_session *s)
spin_unlock(&clp->cl_lock);
}
+static void __free_session(struct nfsd4_session *ses)
+{
+ nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs);
+ free_session_slots(ses);
+ kfree(ses);
+}
+
static void free_session(struct kref *kref)
{
struct nfsd4_session *ses;
- int mem;
lockdep_assert_held(&client_lock);
ses = container_of(kref, struct nfsd4_session, se_ref);
nfsd4_del_conns(ses);
- spin_lock(&nfsd_drc_lock);
- mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
- nfsd_drc_mem_used -= mem;
- spin_unlock(&nfsd_drc_lock);
- free_session_slots(ses);
- kfree(ses);
+ __free_session(ses);
}
void nfsd4_put_session(struct nfsd4_session *ses)
@@ -926,14 +920,10 @@ void nfsd4_put_session(struct nfsd4_session *ses)
spin_unlock(&client_lock);
}
-static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
+static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan)
{
struct nfsd4_session *new;
- struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
int numslots, slotsize;
- __be32 status;
- int idx;
-
/*
* Note decreasing slot size below client's request may
* make it difficult for client to function correctly, whereas
@@ -946,12 +936,18 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
if (numslots < 1)
return NULL;
- new = alloc_session(slotsize, numslots);
+ new = __alloc_session(slotsize, numslots);
if (!new) {
nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
return NULL;
}
init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
+ return new;
+}
+
+static struct nfsd4_session *init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
+{
+ int idx;
new->se_client = clp;
gen_sessionid(new);
@@ -970,14 +966,6 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
spin_unlock(&clp->cl_lock);
spin_unlock(&client_lock);
- status = nfsd4_new_conn_from_crses(rqstp, new);
- /* whoops: benny points out, status is ignored! (err, or bogus) */
- if (status) {
- spin_lock(&client_lock);
- free_session(&new->se_ref);
- spin_unlock(&client_lock);
- return NULL;
- }
if (cses->flags & SESSION4_BACK_CHAN) {
struct sockaddr *sa = svc_addr(rqstp);
/*
@@ -990,7 +978,6 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
}
- nfsd4_probe_callback(clp);
return new;
}
@@ -1131,7 +1118,7 @@ unhash_client_locked(struct nfs4_client *clp)
}
static void
-expire_client(struct nfs4_client *clp)
+destroy_client(struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
struct nfs4_delegation *dp;
@@ -1165,6 +1152,12 @@ expire_client(struct nfs4_client *clp)
spin_unlock(&client_lock);
}
+static void expire_client(struct nfs4_client *clp)
+{
+ nfsd4_client_record_remove(clp);
+ destroy_client(clp);
+}
+
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{
memcpy(target->cl_verifier.data, source->data,
@@ -1223,10 +1216,26 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
return true;
}
+/*
+ * RFC 3530 language requires clid_inuse be returned when the
+ * "principal" associated with a requests differs from that previously
+ * used. We use uid, gid's, and gss principal string as our best
+ * approximation. We also don't want to allow non-gss use of a client
+ * established using gss: in theory cr_principal should catch that
+ * change, but in practice cr_principal can be null even in the gss case
+ * since gssd doesn't always pass down a principal string.
+ */
+static bool is_gss_cred(struct svc_cred *cr)
+{
+ /* Is cr_flavor one of the gss "pseudoflavors"?: */
+ return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
+}
+
+
static bool
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
- if ((cr1->cr_flavor != cr2->cr_flavor)
+ if ((is_gss_cred(cr1) != is_gss_cred(cr2))
|| (cr1->cr_uid != cr2->cr_uid)
|| (cr1->cr_gid != cr2->cr_gid)
|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
@@ -1340,13 +1349,15 @@ move_to_confirmed(struct nfs4_client *clp)
}
static struct nfs4_client *
-find_confirmed_client(clientid_t *clid)
+find_confirmed_client(clientid_t *clid, bool sessions)
{
struct nfs4_client *clp;
unsigned int idhashval = clientid_hashval(clid->cl_id);
list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
if (same_clid(&clp->cl_clientid, clid)) {
+ if ((bool)clp->cl_minorversion != sessions)
+ return NULL;
renew_client(clp);
return clp;
}
@@ -1355,14 +1366,17 @@ find_confirmed_client(clientid_t *clid)
}
static struct nfs4_client *
-find_unconfirmed_client(clientid_t *clid)
+find_unconfirmed_client(clientid_t *clid, bool sessions)
{
struct nfs4_client *clp;
unsigned int idhashval = clientid_hashval(clid->cl_id);
list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
- if (same_clid(&clp->cl_clientid, clid))
+ if (same_clid(&clp->cl_clientid, clid)) {
+ if ((bool)clp->cl_minorversion != sessions)
+ return NULL;
return clp;
+ }
}
return NULL;
}
@@ -1651,6 +1665,7 @@ out_new:
status = nfserr_jukebox;
goto out;
}
+ new->cl_minorversion = 1;
gen_clid(new);
add_to_unconfirmed(new, strhashval);
@@ -1743,67 +1758,71 @@ nfsd4_create_session(struct svc_rqst *rqstp,
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
struct nfsd4_session *new;
+ struct nfsd4_conn *conn;
struct nfsd4_clid_slot *cs_slot = NULL;
- bool confirm_me = false;
__be32 status = 0;
if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
return nfserr_inval;
+ if (check_forechannel_attrs(cr_ses->fore_channel))
+ return nfserr_toosmall;
+ new = alloc_session(&cr_ses->fore_channel);
+ if (!new)
+ return nfserr_jukebox;
+ status = nfserr_jukebox;
+ conn = alloc_conn_from_crses(rqstp, cr_ses);
+ if (!conn)
+ goto out_free_session;
nfs4_lock_state();
- unconf = find_unconfirmed_client(&cr_ses->clientid);
- conf = find_confirmed_client(&cr_ses->clientid);
+ unconf = find_unconfirmed_client(&cr_ses->clientid, true);
+ conf = find_confirmed_client(&cr_ses->clientid, true);
if (conf) {
cs_slot = &conf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status == nfserr_replay_cache) {
status = nfsd4_replay_create_session(cr_ses, cs_slot);
- goto out;
+ goto out_free_conn;
} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
status = nfserr_seq_misordered;
- goto out;
+ goto out_free_conn;
}
} else if (unconf) {
+ unsigned int hash;
+ struct nfs4_client *old;
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
status = nfserr_clid_inuse;
- goto out;
+ goto out_free_conn;
}
cs_slot = &unconf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
if (status) {
/* an unconfirmed replay returns misordered */
status = nfserr_seq_misordered;
- goto out;
+ goto out_free_conn;
}
- confirm_me = true;
+ hash = clientstr_hashval(unconf->cl_recdir);
+ old = find_confirmed_client_by_str(unconf->cl_recdir, hash);
+ if (old)
+ expire_client(old);
+ move_to_confirmed(unconf);
conf = unconf;
} else {
status = nfserr_stale_clientid;
- goto out;
+ goto out_free_conn;
}
-
- /*
- * XXX: we should probably set this at creation time, and check
- * for consistent minorversion use throughout:
- */
- conf->cl_minorversion = 1;
+ status = nfs_ok;
/*
* We do not support RDMA or persistent sessions
*/
cr_ses->flags &= ~SESSION4_PERSIST;
cr_ses->flags &= ~SESSION4_RDMA;
- status = nfserr_toosmall;
- if (check_forechannel_attrs(cr_ses->fore_channel))
- goto out;
+ init_session(rqstp, new, conf, cr_ses);
+ nfsd4_init_conn(rqstp, conn, new);
- status = nfserr_jukebox;
- new = alloc_init_session(rqstp, conf, cr_ses);
- if (!new)
- goto out;
- status = nfs_ok;
memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
memcpy(&cr_ses->fore_channel, &new->se_fchannel,
@@ -1813,18 +1832,15 @@ nfsd4_create_session(struct svc_rqst *rqstp,
/* cache solo and embedded create sessions under the state lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
- if (confirm_me) {
- unsigned int hash = clientstr_hashval(unconf->cl_recdir);
- struct nfs4_client *old =
- find_confirmed_client_by_str(conf->cl_recdir, hash);
- if (old)
- expire_client(old);
- move_to_confirmed(conf);
- }
out:
nfs4_unlock_state();
dprintk("%s returns %d\n", __func__, ntohl(status));
return status;
+out_free_conn:
+ free_conn(conn);
+out_free_session:
+ __free_session(new);
+ goto out;
}
static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
@@ -1854,6 +1870,7 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_bind_conn_to_session *bcts)
{
__be32 status;
+ struct nfsd4_conn *conn;
if (!nfsd4_last_compound_op(rqstp))
return nfserr_not_only_op;
@@ -1870,9 +1887,13 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
return nfserr_badsession;
status = nfsd4_map_bcts_dir(&bcts->dir);
- if (!status)
- nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
- return status;
+ if (status)
+ return status;
+ conn = alloc_conn(rqstp, bcts->dir);
+ if (!conn)
+ return nfserr_jukebox;
+ nfsd4_init_conn(rqstp, conn, cstate->session);
+ return nfs_ok;
}
static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
@@ -2085,8 +2106,8 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
__be32 status = 0;
nfs4_lock_state();
- unconf = find_unconfirmed_client(&dc->clientid);
- conf = find_confirmed_client(&dc->clientid);
+ unconf = find_unconfirmed_client(&dc->clientid, true);
+ conf = find_confirmed_client(&dc->clientid, true);
if (conf) {
clp = conf;
@@ -2200,10 +2221,6 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
copy_clid(new, conf);
else /* case 4 (new client) or cases 2, 3 (client reboot): */
gen_clid(new);
- /*
- * XXX: we should probably set this at creation time, and check
- * for consistent minorversion use throughout:
- */
new->cl_minorversion = 0;
gen_callback(new, setclid, rqstp);
add_to_unconfirmed(new, strhashval);
@@ -2232,8 +2249,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
return nfserr_stale_clientid;
nfs4_lock_state();
- conf = find_confirmed_client(clid);
- unconf = find_unconfirmed_client(clid);
+ conf = find_confirmed_client(clid, false);
+ unconf = find_unconfirmed_client(clid, false);
/*
* We try hard to give out unique clientid's, so if we get an
* attempt to confirm the same clientid with a different cred,
@@ -2262,10 +2279,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
unsigned int hash = clientstr_hashval(unconf->cl_recdir);
conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
- if (conf) {
- nfsd4_client_record_remove(conf);
+ if (conf)
expire_client(conf);
- }
move_to_confirmed(unconf);
nfsd4_probe_callback(unconf);
}
@@ -2447,16 +2462,20 @@ same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
}
static struct nfs4_openowner *
-find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
+find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, bool sessions)
{
struct nfs4_stateowner *so;
struct nfs4_openowner *oo;
+ struct nfs4_client *clp;
list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
if (!so->so_is_open_owner)
continue;
if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
oo = openowner(so);
+ clp = oo->oo_owner.so_client;
+ if ((bool)clp->cl_minorversion != sessions)
+ return NULL;
renew_client(oo->oo_owner.so_client);
return oo;
}
@@ -2600,10 +2619,10 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
return nfserr_jukebox;
strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
- oo = find_openstateowner_str(strhashval, open);
+ oo = find_openstateowner_str(strhashval, open, cstate->minorversion);
open->op_openowner = oo;
if (!oo) {
- clp = find_confirmed_client(clientid);
+ clp = find_confirmed_client(clientid, cstate->minorversion);
if (clp == NULL)
return nfserr_expired;
goto new_owner;
@@ -2705,11 +2724,6 @@ nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_st
return nfs_ok;
}
-static void nfs4_free_stateid(struct nfs4_ol_stateid *s)
-{
- kmem_cache_free(stateid_slab, s);
-}
-
static inline int nfs4_access_to_access(u32 nfs4_access)
{
int flags = 0;
@@ -3087,7 +3101,7 @@ void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
if (open->op_file)
nfsd4_free_file(open->op_file);
if (open->op_stp)
- nfs4_free_stateid(open->op_stp);
+ free_generic_stateid(open->op_stp);
}
__be32
@@ -3104,7 +3118,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_stale_clientid;
if (STALE_CLIENTID(clid, nn))
goto out;
- clp = find_confirmed_client(clid);
+ clp = find_confirmed_client(clid, cstate->minorversion);
status = nfserr_expired;
if (clp == NULL) {
/* We assume the client took too long to RENEW. */
@@ -3180,7 +3194,6 @@ nfs4_laundromat(void)
clp = list_entry(pos, struct nfs4_client, cl_lru);
dprintk("NFSD: purging unused client (clientid %08x)\n",
clp->cl_clientid.cl_id);
- nfsd4_client_record_remove(clp);
expire_client(clp);
}
spin_lock(&recall_lock);
@@ -3372,7 +3385,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
return nfs_ok;
}
-static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
+static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s, bool sessions)
{
struct nfs4_client *cl;
struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
@@ -3381,7 +3394,7 @@ static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, s
return nfserr_bad_stateid;
if (STALE_STATEID(stateid, nn))
return nfserr_stale_stateid;
- cl = find_confirmed_client(&stateid->si_opaque.so_clid);
+ cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions);
if (!cl)
return nfserr_expired;
*s = find_stateid_by_type(cl, stateid, typemask);
@@ -3414,7 +3427,7 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return check_special_stateids(net, current_fh, stateid, flags);
- status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s);
+ status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s, cstate->minorversion);
if (status)
return status;
status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
@@ -3564,7 +3577,7 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
seqid, STATEID_VAL(stateid));
*stpp = NULL;
- status = nfsd4_lookup_stateid(stateid, typemask, &s);
+ status = nfsd4_lookup_stateid(stateid, typemask, &s, cstate->minorversion);
if (status)
return status;
*stpp = openlockstateid(s);
@@ -3765,6 +3778,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
nfsd4_close_open_stateid(stp);
+ release_last_closed_stateid(oo);
oo->oo_last_closed_stid = stp;
if (list_empty(&oo->oo_owner.so_stateids)) {
@@ -3801,7 +3815,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
inode = cstate->current_fh.fh_dentry->d_inode;
nfs4_lock_state();
- status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s);
+ status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s, cstate->minorversion);
if (status)
goto out;
dp = delegstateid(s);
@@ -4045,8 +4059,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_lockowner *lock_sop = NULL;
struct nfs4_ol_stateid *lock_stp;
struct file *filp = NULL;
- struct file_lock file_lock;
- struct file_lock conflock;
+ struct file_lock *file_lock = NULL;
+ struct file_lock *conflock = NULL;
__be32 status = 0;
bool new_state = false;
int lkflg;
@@ -4116,21 +4130,28 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (!locks_in_grace(SVC_NET(rqstp)) && lock->lk_reclaim)
goto out;
- locks_init_lock(&file_lock);
+ file_lock = locks_alloc_lock();
+ if (!file_lock) {
+ dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
+ status = nfserr_jukebox;
+ goto out;
+ }
+
+ locks_init_lock(file_lock);
switch (lock->lk_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
filp = find_readable_file(lock_stp->st_file);
if (filp)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
- file_lock.fl_type = F_RDLCK;
+ file_lock->fl_type = F_RDLCK;
break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
filp = find_writeable_file(lock_stp->st_file);
if (filp)
get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
- file_lock.fl_type = F_WRLCK;
+ file_lock->fl_type = F_WRLCK;
break;
default:
status = nfserr_inval;
@@ -4140,22 +4161,23 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_openmode;
goto out;
}
- file_lock.fl_owner = (fl_owner_t)lock_sop;
- file_lock.fl_pid = current->tgid;
- file_lock.fl_file = filp;
- file_lock.fl_flags = FL_POSIX;
- file_lock.fl_lmops = &nfsd_posix_mng_ops;
-
- file_lock.fl_start = lock->lk_offset;
- file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
- nfs4_transform_lock_offset(&file_lock);
-
- /*
- * Try to lock the file in the VFS.
- * Note: locks.c uses the BKL to protect the inode's lock list.
- */
+ file_lock->fl_owner = (fl_owner_t)lock_sop;
+ file_lock->fl_pid = current->tgid;
+ file_lock->fl_file = filp;
+ file_lock->fl_flags = FL_POSIX;
+ file_lock->fl_lmops = &nfsd_posix_mng_ops;
+ file_lock->fl_start = lock->lk_offset;
+ file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
+ nfs4_transform_lock_offset(file_lock);
+
+ conflock = locks_alloc_lock();
+ if (!conflock) {
+ dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
+ status = nfserr_jukebox;
+ goto out;
+ }
- err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
+ err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
switch (-err) {
case 0: /* success! */
update_stateid(&lock_stp->st_stid.sc_stateid);
@@ -4166,7 +4188,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
case (EAGAIN): /* conflock holds conflicting lock */
status = nfserr_denied;
dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
- nfs4_set_lock_denied(&conflock, &lock->lk_denied);
+ nfs4_set_lock_denied(conflock, &lock->lk_denied);
break;
case (EDEADLK):
status = nfserr_deadlock;
@@ -4181,6 +4203,10 @@ out:
release_lockowner(lock_sop);
if (!cstate->replay_owner)
nfs4_unlock_state();
+ if (file_lock)
+ locks_free_lock(file_lock);
+ if (conflock)
+ locks_free_lock(conflock);
return status;
}
@@ -4209,7 +4235,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_lockt *lockt)
{
struct inode *inode;
- struct file_lock file_lock;
+ struct file_lock *file_lock = NULL;
struct nfs4_lockowner *lo;
__be32 status;
struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
@@ -4230,15 +4256,21 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
inode = cstate->current_fh.fh_dentry->d_inode;
- locks_init_lock(&file_lock);
+ file_lock = locks_alloc_lock();
+ if (!file_lock) {
+ dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
+ status = nfserr_jukebox;
+ goto out;
+ }
+ locks_init_lock(file_lock);
switch (lockt->lt_type) {
case NFS4_READ_LT:
case NFS4_READW_LT:
- file_lock.fl_type = F_RDLCK;
+ file_lock->fl_type = F_RDLCK;
break;
case NFS4_WRITE_LT:
case NFS4_WRITEW_LT:
- file_lock.fl_type = F_WRLCK;
+ file_lock->fl_type = F_WRLCK;
break;
default:
dprintk("NFSD: nfs4_lockt: bad lock type!\n");
@@ -4248,25 +4280,27 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
if (lo)
- file_lock.fl_owner = (fl_owner_t)lo;
- file_lock.fl_pid = current->tgid;
- file_lock.fl_flags = FL_POSIX;
+ file_lock->fl_owner = (fl_owner_t)lo;
+ file_lock->fl_pid = current->tgid;
+ file_lock->fl_flags = FL_POSIX;
- file_lock.fl_start = lockt->lt_offset;
- file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
+ file_lock->fl_start = lockt->lt_offset;
+ file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
- nfs4_transform_lock_offset(&file_lock);
+ nfs4_transform_lock_offset(file_lock);
- status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
+ status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
if (status)
goto out;
- if (file_lock.fl_type != F_UNLCK) {
+ if (file_lock->fl_type != F_UNLCK) {
status = nfserr_denied;
- nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
+ nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
}
out:
nfs4_unlock_state();
+ if (file_lock)
+ locks_free_lock(file_lock);
return status;
}
@@ -4276,7 +4310,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfs4_ol_stateid *stp;
struct file *filp = NULL;
- struct file_lock file_lock;
+ struct file_lock *file_lock = NULL;
__be32 status;
int err;
@@ -4298,23 +4332,29 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_lock_range;
goto out;
}
- BUG_ON(!filp);
- locks_init_lock(&file_lock);
- file_lock.fl_type = F_UNLCK;
- file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
- file_lock.fl_pid = current->tgid;
- file_lock.fl_file = filp;
- file_lock.fl_flags = FL_POSIX;
- file_lock.fl_lmops = &nfsd_posix_mng_ops;
- file_lock.fl_start = locku->lu_offset;
-
- file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
- nfs4_transform_lock_offset(&file_lock);
+ file_lock = locks_alloc_lock();
+ if (!file_lock) {
+ dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
+ status = nfserr_jukebox;
+ goto out;
+ }
+ locks_init_lock(file_lock);
+ file_lock->fl_type = F_UNLCK;
+ file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
+ file_lock->fl_pid = current->tgid;
+ file_lock->fl_file = filp;
+ file_lock->fl_flags = FL_POSIX;
+ file_lock->fl_lmops = &nfsd_posix_mng_ops;
+ file_lock->fl_start = locku->lu_offset;
+
+ file_lock->fl_end = last_byte_offset(locku->lu_offset,
+ locku->lu_length);
+ nfs4_transform_lock_offset(file_lock);
/*
* Try to unlock the file in the VFS.
*/
- err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
+ err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
if (err) {
dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
goto out_nfserr;
@@ -4328,6 +4368,8 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
out:
if (!cstate->replay_owner)
nfs4_unlock_state();
+ if (file_lock)
+ locks_free_lock(file_lock);
return status;
out_nfserr:
@@ -4501,12 +4543,12 @@ nfsd4_find_reclaim_client(struct nfs4_client *clp)
* Called from OPEN. Look for clientid in reclaim list.
*/
__be32
-nfs4_check_open_reclaim(clientid_t *clid)
+nfs4_check_open_reclaim(clientid_t *clid, bool sessions)
{
struct nfs4_client *clp;
/* find clientid in conf_id_hashtbl */
- clp = find_confirmed_client(clid);
+ clp = find_confirmed_client(clid, sessions);
if (clp == NULL)
return nfserr_reclaim_bad;
@@ -4522,7 +4564,6 @@ void nfsd_forget_clients(u64 num)
nfs4_lock_state();
list_for_each_entry_safe(clp, next, &client_lru, cl_lru) {
- nfsd4_client_record_remove(clp);
expire_client(clp);
if (++count == num)
break;
@@ -4582,7 +4623,7 @@ void nfsd_forget_openowners(u64 num)
printk(KERN_INFO "NFSD: Forgot %d open owners", count);
}
-int nfsd_process_n_delegations(u64 num, struct list_head *list)
+static int nfsd_process_n_delegations(u64 num, struct list_head *list)
{
int i, count = 0;
struct nfs4_file *fp, *fnext;
@@ -4747,11 +4788,11 @@ __nfs4_state_shutdown(void)
for (i = 0; i < CLIENT_HASH_SIZE; i++) {
while (!list_empty(&conf_id_hashtbl[i])) {
clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
- expire_client(clp);
+ destroy_client(clp);
}
while (!list_empty(&unconf_str_hashtbl[i])) {
clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
- expire_client(clp);
+ destroy_client(clp);
}
}
INIT_LIST_HEAD(&reaplist);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 6322df36031f..fd548d155088 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2659,7 +2659,7 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp,
RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8);
WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
WRITE32(bcts->dir);
- /* XXX: ? */
+ /* Sorry, we do not yet support RDMA over 4.1: */
WRITE32(0);
ADJUST_ARGS();
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index fa49cff5ee65..dab350dfc376 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -406,7 +406,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return rv;
if (newthreads < 0)
return -EINVAL;
- rv = nfsd_svc(NFS_PORT, newthreads);
+ rv = nfsd_svc(newthreads);
if (rv < 0)
return rv;
} else
@@ -683,25 +683,6 @@ static ssize_t __write_ports_addfd(char *buf)
}
/*
- * A '-' followed by the 'name' of a socket means we close the socket.
- */
-static ssize_t __write_ports_delfd(char *buf)
-{
- char *toclose;
- int len = 0;
-
- toclose = kstrdup(buf + 1, GFP_KERNEL);
- if (toclose == NULL)
- return -ENOMEM;
-
- if (nfsd_serv != NULL)
- len = svc_sock_names(nfsd_serv, buf,
- SIMPLE_TRANSACTION_LIMIT, toclose);
- kfree(toclose);
- return len;
-}
-
-/*
* A transport listener is added by writing it's transport name and
* a port number.
*/
@@ -712,7 +693,7 @@ static ssize_t __write_ports_addxprt(char *buf)
int port, err;
struct net *net = &init_net;
- if (sscanf(buf, "%15s %4u", transport, &port) != 2)
+ if (sscanf(buf, "%15s %5u", transport, &port) != 2)
return -EINVAL;
if (port < 1 || port > USHRT_MAX)
@@ -746,31 +727,6 @@ out_err:
return err;
}
-/*
- * A transport listener is removed by writing a "-", it's transport
- * name, and it's port number.
- */
-static ssize_t __write_ports_delxprt(char *buf)
-{
- struct svc_xprt *xprt;
- char transport[16];
- int port;
-
- if (sscanf(&buf[1], "%15s %4u", transport, &port) != 2)
- return -EINVAL;
-
- if (port < 1 || port > USHRT_MAX || nfsd_serv == NULL)
- return -EINVAL;
-
- xprt = svc_find_xprt(nfsd_serv, transport, &init_net, AF_UNSPEC, port);
- if (xprt == NULL)
- return -ENOTCONN;
-
- svc_close_xprt(xprt);
- svc_xprt_put(xprt);
- return 0;
-}
-
static ssize_t __write_ports(struct file *file, char *buf, size_t size)
{
if (size == 0)
@@ -779,15 +735,9 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size)
if (isdigit(buf[0]))
return __write_ports_addfd(buf);
- if (buf[0] == '-' && isdigit(buf[1]))
- return __write_ports_delfd(buf);
-
if (isalpha(buf[0]))
return __write_ports_addxprt(buf);
- if (buf[0] == '-' && isalpha(buf[1]))
- return __write_ports_delxprt(buf);
-
return -EINVAL;
}
@@ -825,21 +775,6 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size)
* OR
*
* Input:
- * buf: C string containing a "-" followed
- * by an integer value representing a
- * previously passed in socket file
- * descriptor
- * size: non-zero length of C string in @buf
- * Output:
- * On success: NFS service no longer listens on that socket;
- * passed-in buffer filled with a '\n'-terminated C
- * string containing a unique name of the listener;
- * return code is the size in bytes of the string
- * On error: return code is a negative errno value
- *
- * OR
- *
- * Input:
* buf: C string containing a transport
* name and an unsigned integer value
* representing the port to listen on,
@@ -848,19 +783,6 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size)
* Output:
* On success: returns zero; NFS service is started
* On error: return code is a negative errno value
- *
- * OR
- *
- * Input:
- * buf: C string containing a "-" followed
- * by a transport name and an unsigned
- * integer value representing the port
- * to listen on, separated by whitespace
- * size: non-zero length of C string in @buf
- * Output:
- * On success: returns zero; NFS service no longer listens
- * on that transport
- * On error: return code is a negative errno value
*/
static ssize_t write_ports(struct file *file, char *buf, size_t size)
{
@@ -1008,8 +930,6 @@ static ssize_t write_gracetime(struct file *file, char *buf, size_t size)
return nfsd4_write_time(file, buf, size, &nfsd4_grace);
}
-extern char *nfs4_recoverydir(void);
-
static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size)
{
char *mesg = buf;
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 2244222368ab..80d5ce40aadb 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -65,7 +65,7 @@ extern const struct seq_operations nfs_exports_op;
/*
* Function prototypes.
*/
-int nfsd_svc(unsigned short port, int nrservs);
+int nfsd_svc(int nrservs);
int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp);
int nfsd_nrthreads(void);
@@ -124,6 +124,7 @@ int nfs4_state_start(void);
void nfs4_state_shutdown(void);
void nfs4_reset_lease(time_t leasetime);
int nfs4_reset_recoverydir(char *recdir);
+char * nfs4_recoverydir(void);
#else
static inline void nfs4_state_init(void) { }
static inline int nfsd4_init_slabs(void) { return 0; }
@@ -132,6 +133,7 @@ static inline int nfs4_state_start(void) { return 0; }
static inline void nfs4_state_shutdown(void) { }
static inline void nfs4_reset_lease(time_t leasetime) { }
static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
+static inline char * nfs4_recoverydir(void) {return NULL; }
#endif
/*
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 240473cb708f..2013aa001dab 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -183,18 +183,18 @@ int nfsd_nrthreads(void)
return rv;
}
-static int nfsd_init_socks(int port)
+static int nfsd_init_socks(void)
{
int error;
if (!list_empty(&nfsd_serv->sv_permsocks))
return 0;
- error = svc_create_xprt(nfsd_serv, "udp", &init_net, PF_INET, port,
+ error = svc_create_xprt(nfsd_serv, "udp", &init_net, PF_INET, NFS_PORT,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
- error = svc_create_xprt(nfsd_serv, "tcp", &init_net, PF_INET, port,
+ error = svc_create_xprt(nfsd_serv, "tcp", &init_net, PF_INET, NFS_PORT,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
@@ -204,7 +204,7 @@ static int nfsd_init_socks(int port)
static bool nfsd_up = false;
-static int nfsd_startup(unsigned short port, int nrservs)
+static int nfsd_startup(int nrservs)
{
int ret;
@@ -218,7 +218,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
ret = nfsd_racache_init(2*nrservs);
if (ret)
return ret;
- ret = nfsd_init_socks(port);
+ ret = nfsd_init_socks();
if (ret)
goto out_racache;
ret = lockd_up(&init_net);
@@ -436,7 +436,7 @@ int nfsd_set_nrthreads(int n, int *nthreads)
* this is the first time nrservs is nonzero.
*/
int
-nfsd_svc(unsigned short port, int nrservs)
+nfsd_svc(int nrservs)
{
int error;
bool nfsd_up_before;
@@ -458,7 +458,7 @@ nfsd_svc(unsigned short port, int nrservs)
nfsd_up_before = nfsd_up;
- error = nfsd_startup(port, nrservs);
+ error = nfsd_startup(nrservs);
if (error)
goto out_destroy;
error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
@@ -487,7 +487,7 @@ static int
nfsd(void *vrqstp)
{
struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
- int err, preverr = 0;
+ int err;
/* Lock module and set up kernel thread */
mutex_lock(&nfsd_mutex);
@@ -534,16 +534,6 @@ nfsd(void *vrqstp)
;
if (err == -EINTR)
break;
- else if (err < 0) {
- if (err != preverr) {
- printk(KERN_WARNING "%s: unexpected error "
- "from svc_recv (%d)\n", __func__, -err);
- preverr = err;
- }
- schedule_timeout_uninterruptible(HZ);
- continue;
- }
-
validate_process_creds();
svc_process(rqstp);
validate_process_creds();
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 22bd0a66c356..e036894bce57 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -373,11 +373,7 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
return container_of(so, struct nfs4_lockowner, lo_owner);
}
-/*
-* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
-* o fi_perfile list is used to search for conflicting
-* share_acces, share_deny on the file.
-*/
+/* nfs4_file: a file opened by some number of (open) nfs4_stateowners. */
struct nfs4_file {
atomic_t fi_ref;
struct list_head fi_hash; /* hash by "struct inode *" */
@@ -459,7 +455,7 @@ extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void);
extern void nfs4_release_reclaim(void);
extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(struct nfs4_client *crp);
-extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
+extern __be32 nfs4_check_open_reclaim(clientid_t *clid, bool sessions);
extern void nfs4_free_openowner(struct nfs4_openowner *);
extern void nfs4_free_lockowner(struct nfs4_lockowner *);
extern int set_callback_cred(void);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 3f67b8e12251..c120b48ec305 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1581,7 +1581,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
*/
oldfs = get_fs(); set_fs(KERNEL_DS);
- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
+ host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
set_fs(oldfs);
if (host_err < 0)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index a4d56ac02e6c..16f35f7423c5 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -116,6 +116,7 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (unlikely(ret))
goto out;
+ file_update_time(vma->vm_file);
ret = __block_page_mkwrite(vma, vmf, nilfs_get_block);
if (ret) {
nilfs_transaction_abort(inode->i_sb);
@@ -134,13 +135,13 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
static const struct vm_operations_struct nilfs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = nilfs_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &nilfs_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index d150372fd81d..47a87dda54ce 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -173,6 +173,7 @@ out:
static const struct vm_operations_struct ocfs2_file_vm_ops = {
.fault = ocfs2_fault,
.page_mkwrite = ocfs2_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
@@ -188,7 +189,6 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
ocfs2_inode_unlock(file->f_dentry->d_inode, lock_level);
out:
vma->vm_ops = &ocfs2_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 2c6d95257a4d..77e3cb2962b4 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -146,8 +146,7 @@ static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
be64_to_cpu(entry->e_blocks);
if (omfs_allocate_block(inode->i_sb, new_block)) {
- entry->e_blocks =
- cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1);
+ be64_add_cpu(&entry->e_blocks, 1);
terminator->e_blocks = ~(cpu_to_be64(
be64_to_cpu(~terminator->e_blocks) + 1));
goto out;
@@ -177,7 +176,7 @@ static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
be64_to_cpu(~terminator->e_blocks) + (u64) new_count));
/* write in new entry */
- oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count));
+ be32_add_cpu(&oe->e_extent_count, 1);
out:
*ret_block = new_block;
diff --git a/fs/open.c b/fs/open.c
index 44da0feeca2c..59071f55bf7f 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -478,7 +478,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
file = fget(fd);
if (file) {
- audit_inode(NULL, file->f_path.dentry);
+ audit_inode(NULL, file->f_path.dentry, 0);
err = chmod_common(&file->f_path, mode);
fput(file);
}
@@ -588,7 +588,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
error = mnt_want_write_file(f.file);
if (error)
goto out_fput;
- audit_inode(NULL, f.file->f_path.dentry);
+ audit_inode(NULL, f.file->f_path.dentry, 0);
error = chown_common(&f.file->f_path, user, group);
mnt_drop_write_file(f.file);
out_fput:
@@ -859,6 +859,24 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
}
/**
+ * file_open_name - open file and return file pointer
+ *
+ * @name: struct filename containing path to open
+ * @flags: open flags as per the open(2) second argument
+ * @mode: mode for the new file if O_CREAT is set, else ignored
+ *
+ * This is the helper to open a file from kernelspace if you really
+ * have to. But in generally you should not do this, so please move
+ * along, nothing to see here..
+ */
+struct file *file_open_name(struct filename *name, int flags, umode_t mode)
+{
+ struct open_flags op;
+ int lookup = build_open_flags(flags, mode, &op);
+ return do_filp_open(AT_FDCWD, name, &op, lookup);
+}
+
+/**
* filp_open - open file and return file pointer
*
* @filename: path to open
@@ -871,9 +889,8 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
*/
struct file *filp_open(const char *filename, int flags, umode_t mode)
{
- struct open_flags op;
- int lookup = build_open_flags(flags, mode, &op);
- return do_filp_open(AT_FDCWD, filename, &op, lookup);
+ struct filename name = {.name = filename};
+ return file_open_name(&name, flags, mode);
}
EXPORT_SYMBOL(filp_open);
@@ -895,7 +912,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
int lookup = build_open_flags(flags, mode, &op);
- char *tmp = getname(filename);
+ struct filename *tmp = getname(filename);
int fd = PTR_ERR(tmp);
if (!IS_ERR(tmp)) {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d295af993677..144a96732dd7 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -873,111 +873,6 @@ static const struct file_operations proc_environ_operations = {
.release = mem_release,
};
-static ssize_t oom_adjust_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
- char buffer[PROC_NUMBUF];
- size_t len;
- int oom_adjust = OOM_DISABLE;
- unsigned long flags;
-
- if (!task)
- return -ESRCH;
-
- if (lock_task_sighand(task, &flags)) {
- oom_adjust = task->signal->oom_adj;
- unlock_task_sighand(task, &flags);
- }
-
- put_task_struct(task);
-
- len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust);
-
- return simple_read_from_buffer(buf, count, ppos, buffer, len);
-}
-
-static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct task_struct *task;
- char buffer[PROC_NUMBUF];
- int oom_adjust;
- unsigned long flags;
- int err;
-
- memset(buffer, 0, sizeof(buffer));
- if (count > sizeof(buffer) - 1)
- count = sizeof(buffer) - 1;
- if (copy_from_user(buffer, buf, count)) {
- err = -EFAULT;
- goto out;
- }
-
- err = kstrtoint(strstrip(buffer), 0, &oom_adjust);
- if (err)
- goto out;
- if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
- oom_adjust != OOM_DISABLE) {
- err = -EINVAL;
- goto out;
- }
-
- task = get_proc_task(file->f_path.dentry->d_inode);
- if (!task) {
- err = -ESRCH;
- goto out;
- }
-
- task_lock(task);
- if (!task->mm) {
- err = -EINVAL;
- goto err_task_lock;
- }
-
- if (!lock_task_sighand(task, &flags)) {
- err = -ESRCH;
- goto err_task_lock;
- }
-
- if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
- err = -EACCES;
- goto err_sighand;
- }
-
- /*
- * Warn that /proc/pid/oom_adj is deprecated, see
- * Documentation/feature-removal-schedule.txt.
- */
- printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
- current->comm, task_pid_nr(current), task_pid_nr(task),
- task_pid_nr(task));
- task->signal->oom_adj = oom_adjust;
- /*
- * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
- * value is always attainable.
- */
- if (task->signal->oom_adj == OOM_ADJUST_MAX)
- task->signal->oom_score_adj = OOM_SCORE_ADJ_MAX;
- else
- task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
- -OOM_DISABLE;
- trace_oom_score_adj_update(task);
-err_sighand:
- unlock_task_sighand(task, &flags);
-err_task_lock:
- task_unlock(task);
- put_task_struct(task);
-out:
- return err < 0 ? err : count;
-}
-
-static const struct file_operations proc_oom_adjust_operations = {
- .read = oom_adjust_read,
- .write = oom_adjust_write,
- .llseek = generic_file_llseek,
-};
-
static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -1051,15 +946,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
task->signal->oom_score_adj_min = oom_score_adj;
trace_oom_score_adj_update(task);
- /*
- * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
- * always attainable.
- */
- if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
- task->signal->oom_adj = OOM_DISABLE;
- else
- task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) /
- OOM_SCORE_ADJ_MAX;
+
err_sighand:
unlock_task_sighand(task, &flags);
err_task_lock:
@@ -2371,7 +2258,8 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
pid_t tgid = task_tgid_nr_ns(current, ns);
char *name = ERR_PTR(-ENOENT);
if (tgid) {
- name = __getname();
+ /* 11 for max length of signed int in decimal + NULL term */
+ name = kmalloc(12, GFP_KERNEL);
if (!name)
name = ERR_PTR(-ENOMEM);
else
@@ -2386,7 +2274,7 @@ static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
{
char *s = nd_get_link(nd);
if (!IS_ERR(s))
- __putname(s);
+ kfree(s);
}
static const struct inode_operations proc_self_inode_operations = {
@@ -2710,7 +2598,6 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
@@ -3077,7 +2964,6 @@ static const struct pid_entry tid_base_stuff[] = {
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index b3647fe6a608..0d80cef4cfb9 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -427,7 +427,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
pde_get(de);
spin_unlock(&proc_subdir_lock);
- error = -EINVAL;
+ error = -ENOMEM;
inode = proc_get_inode(dir->i_sb, de);
goto out_unlock;
}
@@ -605,7 +605,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
unsigned int len;
/* make sure name is valid */
- if (!name || !strlen(name)) goto out;
+ if (!name || !strlen(name))
+ goto out;
if (xlate_proc_name(name, parent, &fn) != 0)
goto out;
@@ -616,20 +617,18 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
len = strlen(fn);
- ent = kmalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
- if (!ent) goto out;
+ ent = kzalloc(sizeof(struct proc_dir_entry) + len + 1, GFP_KERNEL);
+ if (!ent)
+ goto out;
- memset(ent, 0, sizeof(struct proc_dir_entry));
memcpy(ent->name, fn, len + 1);
ent->namelen = len;
ent->mode = mode;
ent->nlink = nlink;
atomic_set(&ent->count, 1);
- ent->pde_users = 0;
spin_lock_init(&ent->pde_unload_lock);
- ent->pde_unload_completion = NULL;
INIT_LIST_HEAD(&ent->pde_openers);
- out:
+out:
return ent;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 7ac817b64a71..3b22bbdee9ec 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -450,7 +450,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
return NULL;
if (inode->i_state & I_NEW) {
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
- PROC_I(inode)->fd = 0;
PROC_I(inode)->pde = de;
if (de->mode) {
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 67925a7bd8cb..43973b084abf 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/proc_fs.h>
struct ctl_table_header;
+struct mempolicy;
extern struct proc_dir_entry proc_root;
#ifdef CONFIG_PROC_SYSCTL
@@ -74,6 +75,9 @@ struct proc_maps_private {
#ifdef CONFIG_MMU
struct vm_area_struct *tail_vma;
#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *task_mempolicy;
+#endif
};
void proc_init_inodecache(void);
@@ -103,7 +107,7 @@ static inline int task_dumpable(struct task_struct *task)
if (mm)
dumpable = get_dumpable(mm);
task_unlock(task);
- if(dumpable == 1)
+ if (dumpable == SUID_DUMPABLE_ENABLED)
return 1;
return 0;
}
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 7fcd0d60a968..b8730d9ebaee 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -115,7 +115,13 @@ u64 stable_page_flags(struct page *page)
u |= 1 << KPF_COMPOUND_TAIL;
if (PageHuge(page))
u |= 1 << KPF_HUGE;
- else if (PageTransCompound(page))
+ /*
+ * PageTransCompound can be true for non-huge compound pages (slab
+ * pages or pages allocated by drivers with __GFP_COMP) because it
+ * just checks PG_head/PG_tail, so we need to check PageLRU to make
+ * sure a given page is a thp, not a non-huge compound page.
+ */
+ else if (PageTransCompound(page) && PageLRU(compound_trans_head(page)))
u |= 1 << KPF_THP;
/*
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index eb7cc91b7258..a781bdf06694 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -142,6 +142,7 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
}
rb_link_node(node, parent, p);
+ rb_insert_color(node, &head->parent->root);
return 0;
}
@@ -168,10 +169,8 @@ static void init_header(struct ctl_table_header *head,
head->node = node;
if (node) {
struct ctl_table *entry;
- for (entry = table; entry->procname; entry++, node++) {
- rb_init_node(&node->node);
+ for (entry = table; entry->procname; entry++, node++)
node->header = head;
- }
}
}
@@ -266,8 +265,7 @@ void sysctl_head_put(struct ctl_table_header *head)
static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head)
{
- if (!head)
- BUG();
+ BUG_ON(!head);
spin_lock(&sysctl_lock);
if (!use_table(head))
head = ERR_PTR(-ENOENT);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 9a2d9fd7cadd..9889a92d2e01 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -61,7 +61,7 @@ static int proc_parse_options(char *options, struct pid_namespace *pid)
if (!*p)
continue;
- args[0].to = args[0].from = 0;
+ args[0].to = args[0].from = NULL;
token = match_token(p, tokens, args);
switch (token) {
case Opt_gid:
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 64c3b3172367..e296572c73ed 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -45,10 +45,13 @@ static cputime64_t get_iowait_time(int cpu)
static u64 get_idle_time(int cpu)
{
- u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
+ u64 idle, idle_time = -1ULL;
+
+ if (cpu_online(cpu))
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
if (idle_time == -1ULL)
- /* !NO_HZ so we can rely on cpustat.idle */
+ /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
else
idle = usecs_to_cputime64(idle_time);
@@ -58,10 +61,13 @@ static u64 get_idle_time(int cpu)
static u64 get_iowait_time(int cpu)
{
- u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
+ u64 iowait, iowait_time = -1ULL;
+
+ if (cpu_online(cpu))
+ iowait_time = get_cpu_iowait_time_us(cpu, NULL);
if (iowait_time == -1ULL)
- /* !NO_HZ so we can rely on cpustat.iowait */
+ /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
iowait = usecs_to_cputime64(iowait_time);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 4540b8f76f16..90c63f9392a5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -54,7 +54,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"VmPTE:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
- (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
+ total_vm << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
mm->pinned_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
@@ -90,10 +90,55 @@ static void pad_len_spaces(struct seq_file *m, int len)
seq_printf(m, "%*c", len, ' ');
}
+#ifdef CONFIG_NUMA
+/*
+ * These functions are for numa_maps but called in generic **maps seq_file
+ * ->start(), ->stop() ops.
+ *
+ * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
+ * Each mempolicy object is controlled by reference counting. The problem here
+ * is how to avoid accessing dead mempolicy object.
+ *
+ * Because we're holding mmap_sem while reading seq_file, it's safe to access
+ * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
+ *
+ * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
+ * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
+ * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
+ * gurantee the task never exits under us. But taking task_lock() around
+ * get_vma_plicy() causes lock order problem.
+ *
+ * To access task->mempolicy without lock, we hold a reference count of an
+ * object pointed by task->mempolicy and remember it. This will guarantee
+ * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
+ */
+static void hold_task_mempolicy(struct proc_maps_private *priv)
+{
+ struct task_struct *task = priv->task;
+
+ task_lock(task);
+ priv->task_mempolicy = task->mempolicy;
+ mpol_get(priv->task_mempolicy);
+ task_unlock(task);
+}
+static void release_task_mempolicy(struct proc_maps_private *priv)
+{
+ mpol_put(priv->task_mempolicy);
+}
+#else
+static void hold_task_mempolicy(struct proc_maps_private *priv)
+{
+}
+static void release_task_mempolicy(struct proc_maps_private *priv)
+{
+}
+#endif
+
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma && vma != priv->tail_vma) {
struct mm_struct *mm = vma->vm_mm;
+ release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
mmput(mm);
}
@@ -132,7 +177,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma;
-
+ hold_task_mempolicy(priv);
/* Start with last addr hint */
vma = find_vma(mm, last_addr);
if (last_addr && vma) {
@@ -159,6 +204,7 @@ out:
if (vma)
return vma;
+ release_task_mempolicy(priv);
/* End of vmas has been reached */
m->version = (tail_vma != NULL)? 0: -1UL;
up_read(&mm->mmap_sem);
@@ -1158,6 +1204,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
struct vm_area_struct *vma = v;
struct numa_maps *md = &numa_priv->md;
struct file *file = vma->vm_file;
+ struct task_struct *task = proc_priv->task;
struct mm_struct *mm = vma->vm_mm;
struct mm_walk walk = {};
struct mempolicy *pol;
@@ -1177,7 +1224,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
walk.private = md;
walk.mm = mm;
- pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
+ pol = get_vma_policy(task, vma, vma->vm_start);
mpol_to_str(buffer, sizeof(buffer), pol, 0);
mpol_cond_put(pol);
@@ -1189,7 +1236,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_printf(m, " heap");
} else {
- pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid);
+ pid_t tid = vm_is_stack(task, vma, is_pid);
if (tid != 0) {
/*
* Thread stack in /proc/PID/task/TID/maps or
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index d39bb5cce883..ca71db69da07 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -23,6 +23,7 @@ config PSTORE_FTRACE
bool "Persistent function tracer"
depends on PSTORE
depends on FUNCTION_TRACER
+ depends on DEBUG_FS
help
With this option kernel traces function calls into a persistent
ram buffer that can be decoded and dumped after reboot through
diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
index a130d484b7d3..2d57e1ac0115 100644
--- a/fs/pstore/ftrace.c
+++ b/fs/pstore/ftrace.c
@@ -17,19 +17,113 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/cache.h>
#include <asm/barrier.h>
#include "internal.h"
-void notrace pstore_ftrace_call(unsigned long ip, unsigned long parent_ip)
+static void notrace pstore_ftrace_call(unsigned long ip,
+ unsigned long parent_ip)
{
+ unsigned long flags;
struct pstore_ftrace_record rec = {};
if (unlikely(oops_in_progress))
return;
+ local_irq_save(flags);
+
rec.ip = ip;
rec.parent_ip = parent_ip;
pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
sizeof(rec), psinfo);
+
+ local_irq_restore(flags);
+}
+
+static struct ftrace_ops pstore_ftrace_ops __read_mostly = {
+ .func = pstore_ftrace_call,
+};
+
+static DEFINE_MUTEX(pstore_ftrace_lock);
+static bool pstore_ftrace_enabled;
+
+static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 on;
+ ssize_t ret;
+
+ ret = kstrtou8_from_user(buf, count, 2, &on);
+ if (ret)
+ return ret;
+
+ mutex_lock(&pstore_ftrace_lock);
+
+ if (!on ^ pstore_ftrace_enabled)
+ goto out;
+
+ if (on)
+ ret = register_ftrace_function(&pstore_ftrace_ops);
+ else
+ ret = unregister_ftrace_function(&pstore_ftrace_ops);
+ if (ret) {
+ pr_err("%s: unable to %sregister ftrace ops: %zd\n",
+ __func__, on ? "" : "un", ret);
+ goto err;
+ }
+
+ pstore_ftrace_enabled = on;
+out:
+ ret = count;
+err:
+ mutex_unlock(&pstore_ftrace_lock);
+
+ return ret;
+}
+
+static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char val[] = { '0' + pstore_ftrace_enabled, '\n' };
+
+ return simple_read_from_buffer(buf, count, ppos, val, sizeof(val));
+}
+
+static const struct file_operations pstore_knob_fops = {
+ .open = simple_open,
+ .read = pstore_ftrace_knob_read,
+ .write = pstore_ftrace_knob_write,
+};
+
+void pstore_register_ftrace(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+
+ if (!psinfo->write_buf)
+ return;
+
+ dir = debugfs_create_dir("pstore", NULL);
+ if (!dir) {
+ pr_err("%s: unable to create pstore directory\n", __func__);
+ return;
+ }
+
+ file = debugfs_create_file("record_ftrace", 0600, dir, NULL,
+ &pstore_knob_fops);
+ if (!file) {
+ pr_err("%s: unable to create record_ftrace file\n", __func__);
+ goto err_file;
+ }
+
+ return;
+err_file:
+ debugfs_remove(dir);
}
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 0d0d3b7d5f12..4847f588b7d5 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -39,6 +39,12 @@ pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
#endif
}
+#ifdef CONFIG_PSTORE_FTRACE
+extern void pstore_register_ftrace(void);
+#else
+static inline void pstore_register_ftrace(void) {}
+#endif
+
extern struct pstore_info *psinfo;
extern void pstore_set_kmsg_bytes(int);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 29996e8793a7..a40da07e93d6 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -164,7 +164,13 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c)
if (c > psinfo->bufsize)
c = psinfo->bufsize;
- spin_lock_irqsave(&psinfo->buf_lock, flags);
+
+ if (oops_in_progress) {
+ if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
+ break;
+ } else {
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ }
memcpy(psinfo->buf, s, c);
psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo);
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
@@ -236,6 +242,7 @@ int pstore_register(struct pstore_info *psi)
kmsg_dump_register(&pstore_dumper);
pstore_register_console();
+ pstore_register_ftrace();
if (pstore_update_ms >= 0) {
pstore_timer.expires = jiffies +
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 0b311bc18916..1a4f6da58eab 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -32,6 +32,7 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/compiler.h>
#include <linux/pstore_ram.h>
#define RAMOOPS_KERNMSG_HDR "===="
@@ -181,12 +182,11 @@ static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
return len;
}
-
-static int ramoops_pstore_write_buf(enum pstore_type_id type,
- enum kmsg_dump_reason reason,
- u64 *id, unsigned int part,
- const char *buf, size_t size,
- struct pstore_info *psi)
+static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
+ const char *buf, size_t size,
+ struct pstore_info *psi)
{
struct ramoops_context *cxt = psi->data;
struct persistent_ram_zone *prz = cxt->przs[cxt->dump_write_cnt];
@@ -406,7 +406,7 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
goto fail_init_fprz;
if (!cxt->przs && !cxt->cprz && !cxt->fprz) {
- pr_err("memory size too small, minimum is %lu\n",
+ pr_err("memory size too small, minimum is %zu\n",
cxt->console_size + cxt->record_size +
cxt->ftrace_size);
goto fail_cnt;
@@ -414,13 +414,14 @@ static int __devinit ramoops_probe(struct platform_device *pdev)
cxt->pstore.data = cxt;
/*
- * Console can handle any buffer size, so prefer dumps buffer
- * size since usually it is smaller.
+ * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
+ * have to handle dumps, we must have at least record_size buffer. And
+ * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
+ * ZERO_SIZE_PTR).
*/
- if (cxt->przs)
- cxt->pstore.bufsize = cxt->przs[0]->buffer_size;
- else
- cxt->pstore.bufsize = cxt->cprz->buffer_size;
+ if (cxt->console_size)
+ cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
+ cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
spin_lock_init(&cxt->pstore.buf_lock);
if (!cxt->pstore.buf) {
@@ -537,6 +538,7 @@ postcore_initcall(ramoops_init);
static void __exit ramoops_exit(void)
{
platform_driver_unregister(&ramoops_driver);
+ platform_device_unregister(dummy);
kfree(dummy_data);
}
module_exit(ramoops_exit);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 557a9c20a215..05ae3c97f7a5 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1160,6 +1160,8 @@ static int need_print_warning(struct dquot_warn *warn)
return uid_eq(current_fsuid(), warn->w_dq_id.uid);
case GRPQUOTA:
return in_group_p(warn->w_dq_id.gid);
+ case PRJQUOTA: /* Never taken... Just make gcc happy */
+ return 0;
}
return 0;
}
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index ff0135d6bc51..af1661f7a54f 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -331,11 +331,11 @@ static struct super_block *quotactl_block(const char __user *special, int cmd)
#ifdef CONFIG_BLOCK
struct block_device *bdev;
struct super_block *sb;
- char *tmp = getname(special);
+ struct filename *tmp = getname(special);
if (IS_ERR(tmp))
return ERR_CAST(tmp);
- bdev = lookup_bdev(tmp);
+ bdev = lookup_bdev(tmp->name);
putname(tmp);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 46485557cdc6..f27f01a98aa2 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
reiserfs_warning(sb, "reiserfs-13077",
"nfsd/reiserfs, fhtype=%d, len=%d - odd",
fh_type, fh_len);
- fh_type = 5;
+ fh_type = fh_len;
}
+ if (fh_len < 2)
+ return NULL;
return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
(fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
@@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
+ if (fh_type > fh_len)
+ fh_type = fh_len;
if (fh_type < 4)
return NULL;
diff --git a/fs/super.c b/fs/super.c
index 5fdf7ff32c4e..12f123712161 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -186,15 +186,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
spin_lock_init(&s->s_inode_lru_lock);
INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount);
- mutex_init(&s->s_lock);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
/*
- * The locking rules for s_lock are up to the
- * filesystem. For example ext3fs has different
- * lock ordering than usbfs:
- */
- lockdep_set_class(&s->s_lock, &type->s_lock_key);
- /*
* sget() can have s_umount recursion.
*
* When it cannot find a suitable sb, it allocates a new
@@ -394,22 +387,6 @@ bool grab_super_passive(struct super_block *sb)
return false;
}
-/*
- * Superblock locking. We really ought to get rid of these two.
- */
-void lock_super(struct super_block * sb)
-{
- mutex_lock(&sb->s_lock);
-}
-
-void unlock_super(struct super_block * sb)
-{
- mutex_unlock(&sb->s_lock);
-}
-
-EXPORT_SYMBOL(lock_super);
-EXPORT_SYMBOL(unlock_super);
-
/**
* generic_shutdown_super - common helper for ->kill_sb()
* @sb: superblock to kill
@@ -865,7 +842,7 @@ int get_anon_bdev(dev_t *p)
else if (error)
return -EAGAIN;
- if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
+ if ((dev & MAX_IDR_MASK) == (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev);
if (unnamed_dev_start > dev)
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 6b0bb00d4d2b..2fbdff6be25c 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -485,20 +485,18 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
/**
* sysfs_pathname - return full path to sysfs dirent
* @sd: sysfs_dirent whose path we want
- * @path: caller allocated buffer
+ * @path: caller allocated buffer of size PATH_MAX
*
* Gives the name "/" to the sysfs_root entry; any path returned
* is relative to wherever sysfs is mounted.
- *
- * XXX: does no error checking on @path size
*/
static char *sysfs_pathname(struct sysfs_dirent *sd, char *path)
{
if (sd->s_parent) {
sysfs_pathname(sd->s_parent, path);
- strcat(path, "/");
+ strlcat(path, "/", PATH_MAX);
}
- strcat(path, sd->s_name);
+ strlcat(path, sd->s_name, PATH_MAX);
return path;
}
@@ -531,9 +529,11 @@ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
char *path = kzalloc(PATH_MAX, GFP_KERNEL);
WARN(1, KERN_WARNING
"sysfs: cannot create duplicate filename '%s'\n",
- (path == NULL) ? sd->s_name :
- strcat(strcat(sysfs_pathname(acxt->parent_sd, path), "/"),
- sd->s_name));
+ (path == NULL) ? sd->s_name
+ : (sysfs_pathname(acxt->parent_sd, path),
+ strlcat(path, "/", PATH_MAX),
+ strlcat(path, sd->s_name, PATH_MAX),
+ path));
kfree(path);
}
diff --git a/fs/sysv/balloc.c b/fs/sysv/balloc.c
index 9a6ad96acf27..921c053fc052 100644
--- a/fs/sysv/balloc.c
+++ b/fs/sysv/balloc.c
@@ -60,12 +60,12 @@ void sysv_free_block(struct super_block * sb, sysv_zone_t nr)
return;
}
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
count = fs16_to_cpu(sbi, *sbi->s_bcache_count);
if (count > sbi->s_flc_size) {
printk("sysv_free_block: flc_count > flc_size\n");
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return;
}
/* If the free list head in super-block is full, it is copied
@@ -77,7 +77,7 @@ void sysv_free_block(struct super_block * sb, sysv_zone_t nr)
bh = sb_getblk(sb, block);
if (!bh) {
printk("sysv_free_block: getblk() failed\n");
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return;
}
memset(bh->b_data, 0, sb->s_blocksize);
@@ -93,7 +93,7 @@ void sysv_free_block(struct super_block * sb, sysv_zone_t nr)
*sbi->s_bcache_count = cpu_to_fs16(sbi, count);
fs32_add(sbi, sbi->s_free_blocks, 1);
dirty_sb(sb);
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
}
sysv_zone_t sysv_new_block(struct super_block * sb)
@@ -104,7 +104,7 @@ sysv_zone_t sysv_new_block(struct super_block * sb)
struct buffer_head * bh;
unsigned count;
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
count = fs16_to_cpu(sbi, *sbi->s_bcache_count);
if (count == 0) /* Applies only to Coherent FS */
@@ -147,11 +147,11 @@ sysv_zone_t sysv_new_block(struct super_block * sb)
/* Now the free list head in the superblock is valid again. */
fs32_add(sbi, sbi->s_free_blocks, -1);
dirty_sb(sb);
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return nr;
Enospc:
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return 0;
}
@@ -173,7 +173,7 @@ unsigned long sysv_count_free_blocks(struct super_block * sb)
if (sbi->s_type == FSTYPE_AFS)
return 0;
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
sb_count = fs32_to_cpu(sbi, *sbi->s_free_blocks);
if (0)
@@ -211,7 +211,7 @@ unsigned long sysv_count_free_blocks(struct super_block * sb)
if (count != sb_count)
goto Ecount;
done:
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return count;
Einval:
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 8233b02eccae..f9db4eb31db4 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -118,7 +118,7 @@ void sysv_free_inode(struct inode * inode)
"%s\n", inode->i_sb->s_id);
return;
}
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
if (count < sbi->s_fic_size) {
*sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino);
@@ -128,7 +128,7 @@ void sysv_free_inode(struct inode * inode)
dirty_sb(sb);
memset(raw_inode, 0, sizeof(struct sysv_inode));
mark_buffer_dirty(bh);
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
brelse(bh);
}
@@ -147,13 +147,13 @@ struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
if (!inode)
return ERR_PTR(-ENOMEM);
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) {
count = refill_free_cache(sb);
if (count == 0) {
iput(inode);
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return ERR_PTR(-ENOSPC);
}
}
@@ -174,7 +174,7 @@ struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */
mark_inode_dirty(inode); /* cleared by sysv_write_inode() */
/* That's it. */
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return inode;
}
@@ -185,7 +185,7 @@ unsigned long sysv_count_free_inodes(struct super_block * sb)
struct sysv_inode * raw_inode;
int ino, count, sb_count;
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes);
@@ -213,7 +213,7 @@ unsigned long sysv_count_free_inodes(struct super_block * sb)
if (count != sb_count)
goto Einval;
out:
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return count;
Einval:
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index d33e506c1eac..c327d4ee1235 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -36,7 +36,7 @@ static int sysv_sync_fs(struct super_block *sb, int wait)
struct sysv_sb_info *sbi = SYSV_SB(sb);
unsigned long time = get_seconds(), old_time;
- lock_super(sb);
+ mutex_lock(&sbi->s_lock);
/*
* If we are going to write out the super block,
@@ -51,7 +51,7 @@ static int sysv_sync_fs(struct super_block *sb, int wait)
mark_buffer_dirty(sbi->s_bh2);
}
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
return 0;
}
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 7491c33b6468..a38e87bdd78d 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -368,6 +368,7 @@ static int sysv_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_sb = sb;
sbi->s_block_base = 0;
+ mutex_init(&sbi->s_lock);
sb->s_fs_info = sbi;
sb_set_blocksize(sb, BLOCK_SIZE);
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 0bc35fdc58e2..69d488986cce 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -58,6 +58,7 @@ struct sysv_sb_info {
u32 s_nzones; /* same as s_sbd->s_fsize */
u16 s_namelen; /* max length of dir entry */
int s_forced_ro;
+ struct mutex s_lock;
};
/*
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index ff48c5a85309..5bc77817f382 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1536,6 +1536,7 @@ out_unlock:
static const struct vm_operations_struct ubifs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = ubifs_vm_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 1b3e410bf334..a7ea492ae660 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -54,7 +54,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
if (ufs_fragnum(fragment) + count > uspi->s_fpg)
ufs_error (sb, "ufs_free_fragments", "internal error");
- lock_super(sb);
+ mutex_lock(&UFS_SB(sb)->s_lock);
cgno = ufs_dtog(uspi, fragment);
bit = ufs_dtogd(uspi, fragment);
@@ -118,12 +118,12 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT\n");
return;
failed:
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT (FAILED)\n");
return;
}
@@ -155,7 +155,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
goto failed;
}
- lock_super(sb);
+ mutex_lock(&UFS_SB(sb)->s_lock);
do_more:
overflow = 0;
@@ -215,12 +215,12 @@ do_more:
}
ufs_mark_sb_dirty(sb);
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT\n");
return;
failed_unlock:
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
failed:
UFSD("EXIT (FAILED)\n");
return;
@@ -361,7 +361,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
usb1 = ubh_get_usb_first(uspi);
*err = -ENOSPC;
- lock_super (sb);
+ mutex_lock(&UFS_SB(sb)->s_lock);
tmp = ufs_data_ptr_to_cpu(sb, p);
if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
@@ -382,19 +382,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
"fragment %llu, tmp %llu\n",
(unsigned long long)fragment,
(unsigned long long)tmp);
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
return INVBLOCK;
}
if (fragment < UFS_I(inode)->i_lastfrag) {
UFSD("EXIT (ALREADY ALLOCATED)\n");
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
return 0;
}
}
else {
if (tmp) {
UFSD("EXIT (ALREADY ALLOCATED)\n");
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
return 0;
}
}
@@ -403,7 +403,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
* There is not enough space for user on the device
*/
if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT (FAILED)\n");
return 0;
}
@@ -428,7 +428,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
ufs_clear_frags(inode, result + oldcount,
newcount - oldcount, locked_page != NULL);
}
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT, result %llu\n", (unsigned long long)result);
return result;
}
@@ -443,7 +443,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
fragment + count);
ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
locked_page != NULL);
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT, result %llu\n", (unsigned long long)result);
return result;
}
@@ -481,7 +481,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
*err = 0;
UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
fragment + count);
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
if (newcount < request)
ufs_free_fragments (inode, result + newcount, request - newcount);
ufs_free_fragments (inode, tmp, oldcount);
@@ -489,7 +489,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
return result;
}
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT (FAILED)\n");
return 0;
}
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index e84cbe21b986..d0426d74817b 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -71,11 +71,11 @@ void ufs_free_inode (struct inode * inode)
ino = inode->i_ino;
- lock_super (sb);
+ mutex_lock(&UFS_SB(sb)->s_lock);
if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
return;
}
@@ -83,7 +83,7 @@ void ufs_free_inode (struct inode * inode)
bit = ufs_inotocgoff (ino);
ucpi = ufs_load_cylinder (sb, cg);
if (!ucpi) {
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
return;
}
ucg = ubh_get_ucg(UCPI_UBH(ucpi));
@@ -117,7 +117,7 @@ void ufs_free_inode (struct inode * inode)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
- unlock_super (sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
UFSD("EXIT\n");
}
@@ -197,7 +197,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
uspi = sbi->s_uspi;
usb1 = ubh_get_usb_first(uspi);
- lock_super (sb);
+ mutex_lock(&sbi->s_lock);
/*
* Try to place the inode in its parent directory
@@ -333,20 +333,20 @@ cg_found:
brelse(bh);
}
- unlock_super (sb);
+ mutex_unlock(&sbi->s_lock);
UFSD("allocating inode %lu\n", inode->i_ino);
UFSD("EXIT\n");
return inode;
fail_remove_inode:
- unlock_super(sb);
+ mutex_unlock(&sbi->s_lock);
clear_nlink(inode);
iput(inode);
UFSD("EXIT (FAILED): err %d\n", err);
return ERR_PTR(err);
failed:
- unlock_super (sb);
+ mutex_unlock(&sbi->s_lock);
make_bad_inode(inode);
iput (inode);
UFSD("EXIT (FAILED): err %d\n", err);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index f7cfecfe1cab..dc8e3a861d0f 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -699,7 +699,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
unsigned flags;
lock_ufs(sb);
- lock_super(sb);
+ mutex_lock(&UFS_SB(sb)->s_lock);
UFSD("ENTER\n");
@@ -717,7 +717,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
ufs_put_cstotal(sb);
UFSD("EXIT\n");
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return 0;
@@ -805,6 +805,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
}
#endif
mutex_init(&sbi->mutex);
+ mutex_init(&sbi->s_lock);
spin_lock_init(&sbi->work_lock);
INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
/*
@@ -1280,7 +1281,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
unsigned flags;
lock_ufs(sb);
- lock_super(sb);
+ mutex_lock(&UFS_SB(sb)->s_lock);
uspi = UFS_SB(sb)->s_uspi;
flags = UFS_SB(sb)->s_flags;
usb1 = ubh_get_usb_first(uspi);
@@ -1294,7 +1295,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
new_mount_opt = 0;
ufs_set_opt (new_mount_opt, ONERROR_LOCK);
if (!ufs_parse_options (data, &new_mount_opt)) {
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return -EINVAL;
}
@@ -1302,14 +1303,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
new_mount_opt |= ufstype;
} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
printk("ufstype can't be changed during remount\n");
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return -EINVAL;
}
if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
UFS_SB(sb)->s_mount_opt = new_mount_opt;
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return 0;
}
@@ -1334,7 +1335,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
#ifndef CONFIG_UFS_FS_WRITE
printk("ufs was compiled with read-only support, "
"can't be mounted as read-write\n");
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return -EINVAL;
#else
@@ -1344,13 +1345,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
printk("this ufstype is read-only supported\n");
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return -EINVAL;
}
if (!ufs_read_cylinder_structures(sb)) {
printk("failed during remounting\n");
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return -EPERM;
}
@@ -1358,7 +1359,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
#endif
}
UFS_SB(sb)->s_mount_opt = new_mount_opt;
- unlock_super(sb);
+ mutex_unlock(&UFS_SB(sb)->s_lock);
unlock_ufs(sb);
return 0;
}
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 343e6fc571e5..ff2c15ab81aa 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -24,6 +24,7 @@ struct ufs_sb_info {
int work_queued; /* non-zero if the delayed work is queued */
struct delayed_work sync_work; /* FS sync delayed work */
spinlock_t work_lock; /* protects sync_work and work_queued */
+ struct mutex s_lock;
};
struct ufs_inode_info {
diff --git a/fs/xattr.c b/fs/xattr.c
index 1780f062dbaf..e21c119f4f99 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -412,7 +412,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
if (!f.file)
return error;
dentry = f.file->f_path.dentry;
- audit_inode(NULL, dentry);
+ audit_inode(NULL, dentry, 0);
error = mnt_want_write_file(f.file);
if (!error) {
error = setxattr(dentry, name, value, size, flags);
@@ -507,7 +507,7 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
if (!f.file)
return error;
- audit_inode(NULL, f.file->f_path.dentry);
+ audit_inode(NULL, f.file->f_path.dentry, 0);
error = getxattr(f.file->f_path.dentry, name, value, size);
fdput(f);
return error;
@@ -586,7 +586,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
if (!f.file)
return error;
- audit_inode(NULL, f.file->f_path.dentry);
+ audit_inode(NULL, f.file->f_path.dentry, 0);
error = listxattr(f.file->f_path.dentry, list, size);
fdput(f);
return error;
@@ -655,7 +655,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
if (!f.file)
return error;
dentry = f.file->f_path.dentry;
- audit_inode(NULL, dentry);
+ audit_inode(NULL, dentry, 0);
error = mnt_want_write_file(f.file);
if (!error) {
error = removexattr(dentry, name);
@@ -846,7 +846,7 @@ static int __simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
const void *value, size_t size, int flags)
{
struct simple_xattr *xattr;
- struct simple_xattr *uninitialized_var(new_xattr);
+ struct simple_xattr *new_xattr = NULL;
int err = 0;
/* value == NULL means remove */
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
index 11efd830b5f5..9fbea87fdb6e 100644
--- a/fs/xattr_acl.c
+++ b/fs/xattr_acl.c
@@ -45,7 +45,7 @@ static void posix_acl_fix_xattr_userns(
break;
case ACL_GROUP:
gid = make_kgid(from, le32_to_cpu(entry->e_id));
- entry->e_id = cpu_to_le32(from_kuid(to, uid));
+ entry->e_id = cpu_to_le32(from_kgid(to, gid));
break;
default:
break;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 42679223a0fd..8c6d1d70278c 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -189,6 +189,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
struct inode *inode = NULL;
+ if (fh_len < xfs_fileid_length(fileid_type))
+ return NULL;
+
switch (fileid_type) {
case FILEID_INO32_GEN_PARENT:
inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1eaeb8be3aae..aa473fa640a2 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -940,7 +940,6 @@ xfs_file_mmap(
struct vm_area_struct *vma)
{
vma->vm_ops = &xfs_file_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
file_accessed(filp);
return 0;
@@ -1443,4 +1442,5 @@ const struct file_operations xfs_dir_file_operations = {
static const struct vm_operations_struct xfs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = xfs_vm_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};