summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2010-04-29 09:36:24 +0200
committerJens Axboe <jens.axboe@oracle.com>2010-04-29 09:36:24 +0200
commit7407cf355fdf5500430be966dbbde84a27293bad (patch)
tree922861288ff38558ed721a79653f52b17b13bb95 /fs
parent6a47dc1418682c83d603b491df1d048f73aa973e (diff)
parent79dba2eaa771c3173957eccfd288e0e0d12e4d3f (diff)
downloadlinux-7407cf355fdf5500430be966dbbde84a27293bad.tar.gz
linux-7407cf355fdf5500430be966dbbde84a27293bad.tar.bz2
linux-7407cf355fdf5500430be966dbbde84a27293bad.zip
Merge branch 'master' into for-2.6.35
Conflicts: fs/block_dev.c Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/v9fs.c10
-rw-r--r--fs/9p/v9fs.h2
-rw-r--r--fs/9p/vfs_super.c1
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/mntpt.c24
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/volume.c7
-rw-r--r--fs/binfmt_elf_fdpic.c7
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/block_dev.c20
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/ceph/addr.c62
-rw-r--r--fs/ceph/caps.c42
-rw-r--r--fs/ceph/dir.c7
-rw-r--r--fs/ceph/inode.c10
-rw-r--r--fs/ceph/messenger.c9
-rw-r--r--fs/ceph/osdmap.c180
-rw-r--r--fs/ceph/osdmap.h1
-rw-r--r--fs/ceph/rados.h6
-rw-r--r--fs/ceph/snap.c26
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/cifs/cifs_fs_sb.h3
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/coda/inode.c8
-rw-r--r--fs/ecryptfs/crypto.c37
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h15
-rw-r--r--fs/ecryptfs/inode.c129
-rw-r--r--fs/ecryptfs/main.c10
-rw-r--r--fs/ecryptfs/mmap.c38
-rw-r--r--fs/ecryptfs/super.c2
-rw-r--r--fs/exofs/exofs.h2
-rw-r--r--fs/exofs/super.c8
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/mballoc.c21
-rw-r--r--fs/ioctl.c92
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_dmap.c16
-rw-r--r--fs/jfs/jfs_dmap.h6
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/namei.c4
-rw-r--r--fs/jfs/resize.c6
-rw-r--r--fs/jfs/symlink.c14
-rw-r--r--fs/logfs/gc.c8
-rw-r--r--fs/logfs/journal.c29
-rw-r--r--fs/logfs/logfs.h15
-rw-r--r--fs/logfs/readwrite.c75
-rw-r--r--fs/logfs/segment.c8
-rw-r--r--fs/logfs/super.c11
-rw-r--r--fs/ncpfs/inode.c8
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/inode.c8
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/write.c44
-rw-r--r--fs/nfsd/nfs4xdr.c8
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/quota/Kconfig8
-rw-r--r--fs/quota/dquot.c16
-rw-r--r--fs/reiserfs/dir.c2
-rw-r--r--fs/reiserfs/xattr.c19
-rw-r--r--fs/smbfs/inode.c8
-rw-r--r--fs/squashfs/block.c5
-rw-r--r--fs/squashfs/super.c4
-rw-r--r--fs/squashfs/zlib_wrapper.c3
-rw-r--r--fs/super.c8
-rw-r--r--fs/sync.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c4
-rw-r--r--fs/xfs/xfs_dfrag.c22
-rw-r--r--fs/xfs/xfs_log.c38
70 files changed, 753 insertions, 476 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 5c5bc8480070..f8b86e92cd66 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -238,6 +238,13 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
return ERR_PTR(-ENOMEM);
}
+ rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY);
+ if (rc) {
+ __putname(v9ses->aname);
+ __putname(v9ses->uname);
+ return ERR_PTR(rc);
+ }
+
spin_lock(&v9fs_sessionlist_lock);
list_add(&v9ses->slist, &v9fs_sessionlist);
spin_unlock(&v9fs_sessionlist_lock);
@@ -301,6 +308,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
return fid;
error:
+ bdi_destroy(&v9ses->bdi);
return ERR_PTR(retval);
}
@@ -326,6 +334,8 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
__putname(v9ses->uname);
__putname(v9ses->aname);
+ bdi_destroy(&v9ses->bdi);
+
spin_lock(&v9fs_sessionlist_lock);
list_del(&v9ses->slist);
spin_unlock(&v9fs_sessionlist_lock);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index a0a8d3dd1361..bec4d0bcb458 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -20,6 +20,7 @@
* Boston, MA 02111-1301 USA
*
*/
+#include <linux/backing-dev.h>
/**
* enum p9_session_flags - option flags for each 9P session
@@ -102,6 +103,7 @@ struct v9fs_session_info {
u32 uid; /* if ACCESS_SINGLE, the uid that has access */
struct p9_client *clnt; /* 9p client */
struct list_head slist; /* list of sessions registered with v9fs */
+ struct backing_dev_info bdi;
};
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 491108bd6e0d..806da5d3b3a0 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -77,6 +77,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_blocksize = 1 << sb->s_blocksize_bits;
sb->s_magic = V9FS_MAGIC;
sb->s_op = &v9fs_super_ops;
+ sb->s_bdi = &v9ses->bdi;
sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
MS_NOATIME;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index c54dad4e6063..a10f2582844f 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/fscache.h>
+#include <linux/backing-dev.h>
#include "afs.h"
#include "afs_vl.h"
@@ -313,6 +314,7 @@ struct afs_volume {
unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */
struct afs_server *servers[8]; /* servers on which volume resides (ordered) */
struct rw_semaphore server_sem; /* lock for accessing current server */
+ struct backing_dev_info bdi;
};
/*
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 5e813a816ce4..b3feddc4f7d6 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -138,9 +138,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
{
struct afs_super_info *super;
struct vfsmount *mnt;
- struct page *page = NULL;
+ struct page *page;
size_t size;
- char *buf, *devname = NULL, *options = NULL;
+ char *buf, *devname, *options;
int ret;
_enter("{%s}", mntpt->d_name.name);
@@ -150,22 +150,22 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
ret = -EINVAL;
size = mntpt->d_inode->i_size;
if (size > PAGE_SIZE - 1)
- goto error;
+ goto error_no_devname;
ret = -ENOMEM;
devname = (char *) get_zeroed_page(GFP_KERNEL);
if (!devname)
- goto error;
+ goto error_no_devname;
options = (char *) get_zeroed_page(GFP_KERNEL);
if (!options)
- goto error;
+ goto error_no_options;
/* read the contents of the AFS special symlink */
page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto error;
+ goto error_no_page;
}
ret = -EIO;
@@ -196,12 +196,12 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
return mnt;
error:
- if (page)
- page_cache_release(page);
- if (devname)
- free_page((unsigned long) devname);
- if (options)
- free_page((unsigned long) options);
+ page_cache_release(page);
+error_no_page:
+ free_page((unsigned long) options);
+error_no_options:
+ free_page((unsigned long) devname);
+error_no_devname:
_leave(" = %d", ret);
return ERR_PTR(ret);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 14f6431598ad..e932e5a3a0c1 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -311,6 +311,7 @@ static int afs_fill_super(struct super_block *sb, void *data)
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
sb->s_fs_info = as;
+ sb->s_bdi = &as->volume->bdi;
/* allocate the root inode and dentry */
fid.vid = as->volume->vid;
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index a353e69e2391..401eeb21869f 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -106,6 +106,10 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
volume->cell = params->cell;
volume->vid = vlocation->vldb.vid[params->type];
+ ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY);
+ if (ret)
+ goto error_bdi;
+
init_rwsem(&volume->server_sem);
/* look up all the applicable server records */
@@ -151,6 +155,8 @@ error:
return ERR_PTR(ret);
error_discard:
+ bdi_destroy(&volume->bdi);
+error_bdi:
up_write(&params->cell->vl_sem);
for (loop = volume->nservers - 1; loop >= 0; loop--)
@@ -200,6 +206,7 @@ void afs_put_volume(struct afs_volume *volume)
for (loop = volume->nservers - 1; loop >= 0; loop--)
afs_put_server(volume->servers[loop]);
+ bdi_destroy(&volume->bdi);
kfree(volume);
_leave(" [destroyed]");
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 7ab23e006e4c..2c5f9a0e5d72 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1005,15 +1005,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
}
} else if (!mm->start_data) {
mm->start_data = seg->addr;
-#ifndef CONFIG_MMU
mm->end_data = seg->addr + phdr->p_memsz;
-#endif
}
-
-#ifdef CONFIG_MMU
- if (seg->addr + phdr->p_memsz > mm->end_data)
- mm->end_data = seg->addr + phdr->p_memsz;
-#endif
}
seg++;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index e0e769bdca59..49566c1687d8 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
- (int) r,(int)(start_brk-start_code),(int)text_len);
+ (int) r,(int)(start_brk-start_data+text_len),(int)text_len);
goto failed;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index dd769304382e..55dcb7884f4d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -406,17 +406,23 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
- struct block_device *bdev = I_BDEV(filp->f_mapping->host);
+ struct inode *bd_inode = filp->f_mapping->host;
+ struct block_device *bdev = I_BDEV(bd_inode);
int error;
- error = sync_blockdev(bdev);
- if (error)
- return error;
-
- error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL,
- (BLKDEV_IFL_WAIT));
+ /*
+ * There is no need to serialise calls to blkdev_issue_flush with
+ * i_mutex and doing so causes performance issues with concurrent
+ * O_SYNC writers to a block device.
+ */
+ mutex_unlock(&bd_inode->i_mutex);
+
+ error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
if (error == -EOPNOTSUPP)
error = 0;
+
+ mutex_lock(&bd_inode->i_mutex);
+
return error;
}
EXPORT_SYMBOL(blkdev_fsync);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e7b8f2c89ccb..feca04197d02 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -44,8 +44,6 @@ static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
-static atomic_t btrfs_bdi_num = ATOMIC_INIT(0);
-
/*
* end_io_wq structs are used to do processing in task context when an IO is
* complete. This is used during reads to verify checksums, and it is used
@@ -1375,19 +1373,11 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
{
int err;
- bdi->name = "btrfs";
bdi->capabilities = BDI_CAP_MAP_COPY;
- err = bdi_init(bdi);
+ err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
if (err)
return err;
- err = bdi_register(bdi, NULL, "btrfs-%d",
- atomic_inc_return(&btrfs_bdi_num));
- if (err) {
- bdi_destroy(bdi);
- return err;
- }
-
bdi->ra_pages = default_backing_dev_info.ra_pages;
bdi->unplug_io_fn = btrfs_unplug_io_fn;
bdi->unplug_io_data = info;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index aa3cd7cc3e40..412593703d1e 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -337,16 +337,15 @@ out:
/*
* Get ref for the oldest snapc for an inode with dirty data... that is, the
* only snap context we are allowed to write back.
- *
- * Caller holds i_lock.
*/
-static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
- u64 *snap_size)
+static struct ceph_snap_context *get_oldest_context(struct inode *inode,
+ u64 *snap_size)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
+ spin_lock(&inode->i_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
capsnap->context, capsnap->dirty_pages);
@@ -357,21 +356,11 @@ static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
break;
}
}
- if (!snapc && ci->i_snap_realm) {
- snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
+ if (!snapc && ci->i_head_snapc) {
+ snapc = ceph_get_snap_context(ci->i_head_snapc);
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
}
- return snapc;
-}
-
-static struct ceph_snap_context *get_oldest_context(struct inode *inode,
- u64 *snap_size)
-{
- struct ceph_snap_context *snapc = NULL;
-
- spin_lock(&inode->i_lock);
- snapc = __get_oldest_context(inode, snap_size);
spin_unlock(&inode->i_lock);
return snapc;
}
@@ -392,7 +381,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
int len = PAGE_CACHE_SIZE;
loff_t i_size;
int err = 0;
- struct ceph_snap_context *snapc;
+ struct ceph_snap_context *snapc, *oldest;
u64 snap_size = 0;
long writeback_stat;
@@ -413,13 +402,16 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %p page %p not dirty?\n", inode, page);
goto out;
}
- if (snapc != get_oldest_context(inode, &snap_size)) {
+ oldest = get_oldest_context(inode, &snap_size);
+ if (snapc->seq > oldest->seq) {
dout("writepage %p page %p snapc %p not writeable - noop\n",
inode, page, (void *)page->private);
/* we should only noop if called by kswapd */
WARN_ON((current->flags & PF_MEMALLOC) == 0);
+ ceph_put_snap_context(oldest);
goto out;
}
+ ceph_put_snap_context(oldest);
/* is this a partial page at end of file? */
if (snap_size)
@@ -458,7 +450,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
ClearPagePrivate(page);
end_page_writeback(page);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
- ceph_put_snap_context(snapc);
+ ceph_put_snap_context(snapc); /* page's reference */
out:
return err;
}
@@ -558,9 +550,9 @@ static void writepages_finish(struct ceph_osd_request *req,
dout("inode %p skipping page %p\n", inode, page);
wbc->pages_skipped++;
}
+ ceph_put_snap_context((void *)page->private);
page->private = 0;
ClearPagePrivate(page);
- ceph_put_snap_context(snapc);
dout("unlocking %d %p\n", i, page);
end_page_writeback(page);
@@ -618,7 +610,7 @@ static int ceph_writepages_start(struct address_space *mapping,
int range_whole = 0;
int should_loop = 1;
pgoff_t max_pages = 0, max_pages_ever = 0;
- struct ceph_snap_context *snapc = NULL, *last_snapc = NULL;
+ struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
struct pagevec pvec;
int done = 0;
int rc = 0;
@@ -770,9 +762,10 @@ get_more_pages:
}
/* only if matching snap context */
- if (snapc != (void *)page->private) {
- dout("page snapc %p != oldest %p\n",
- (void *)page->private, snapc);
+ pgsnapc = (void *)page->private;
+ if (pgsnapc->seq > snapc->seq) {
+ dout("page snapc %p %lld > oldest %p %lld\n",
+ pgsnapc, pgsnapc->seq, snapc, snapc->seq);
unlock_page(page);
if (!locked_pages)
continue; /* keep looking for snap */
@@ -914,7 +907,10 @@ static int context_is_writeable_or_written(struct inode *inode,
struct ceph_snap_context *snapc)
{
struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
- return !oldest || snapc->seq <= oldest->seq;
+ int ret = !oldest || snapc->seq <= oldest->seq;
+
+ ceph_put_snap_context(oldest);
+ return ret;
}
/*
@@ -936,8 +932,8 @@ static int ceph_update_writeable_page(struct file *file,
int pos_in_page = pos & ~PAGE_CACHE_MASK;
int end_in_page = pos_in_page + len;
loff_t i_size;
- struct ceph_snap_context *snapc;
int r;
+ struct ceph_snap_context *snapc, *oldest;
retry_locked:
/* writepages currently holds page lock, but if we change that later, */
@@ -947,23 +943,24 @@ retry_locked:
BUG_ON(!ci->i_snap_realm);
down_read(&mdsc->snap_rwsem);
BUG_ON(!ci->i_snap_realm->cached_context);
- if (page->private &&
- (void *)page->private != ci->i_snap_realm->cached_context) {
+ snapc = (void *)page->private;
+ if (snapc && snapc != ci->i_head_snapc) {
/*
* this page is already dirty in another (older) snap
* context! is it writeable now?
*/
- snapc = get_oldest_context(inode, NULL);
+ oldest = get_oldest_context(inode, NULL);
up_read(&mdsc->snap_rwsem);
- if (snapc != (void *)page->private) {
+ if (snapc->seq > oldest->seq) {
+ ceph_put_snap_context(oldest);
dout(" page %p snapc %p not current or oldest\n",
- page, (void *)page->private);
+ page, snapc);
/*
* queue for writeback, and wait for snapc to
* be writeable or written
*/
- snapc = ceph_get_snap_context((void *)page->private);
+ snapc = ceph_get_snap_context(snapc);
unlock_page(page);
ceph_queue_writeback(inode);
r = wait_event_interruptible(ci->i_cap_wq,
@@ -973,6 +970,7 @@ retry_locked:
return r;
return -EAGAIN;
}
+ ceph_put_snap_context(oldest);
/* yay, writeable, do it now (without dropping page lock) */
dout(" page %p snapc %p not current, but oldest\n",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 3710e077a857..aa2239fa9a3b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1205,6 +1205,12 @@ retry:
if (capsnap->dirty_pages || capsnap->writing)
continue;
+ /*
+ * if cap writeback already occurred, we should have dropped
+ * the capsnap in ceph_put_wrbuffer_cap_refs.
+ */
+ BUG_ON(capsnap->dirty == 0);
+
/* pick mds, take s_mutex */
mds = __ceph_get_cap_mds(ci, &mseq);
if (session && session->s_mds != mds) {
@@ -2118,8 +2124,8 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
}
spin_unlock(&inode->i_lock);
- dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
- last ? "last" : "");
+ dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
+ last ? " last" : "", put ? " put" : "");
if (last && !flushsnaps)
ceph_check_caps(ci, 0, NULL);
@@ -2143,7 +2149,8 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
{
struct inode *inode = &ci->vfs_inode;
int last = 0;
- int last_snap = 0;
+ int complete_capsnap = 0;
+ int drop_capsnap = 0;
int found = 0;
struct ceph_cap_snap *capsnap = NULL;
@@ -2166,19 +2173,32 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->context == snapc) {
found = 1;
- capsnap->dirty_pages -= nr;
- last_snap = !capsnap->dirty_pages;
break;
}
}
BUG_ON(!found);
+ capsnap->dirty_pages -= nr;
+ if (capsnap->dirty_pages == 0) {
+ complete_capsnap = 1;
+ if (capsnap->dirty == 0)
+ /* cap writeback completed before we created
+ * the cap_snap; no FLUSHSNAP is needed */
+ drop_capsnap = 1;
+ }
dout("put_wrbuffer_cap_refs on %p cap_snap %p "
- " snap %lld %d/%d -> %d/%d %s%s\n",
+ " snap %lld %d/%d -> %d/%d %s%s%s\n",
inode, capsnap, capsnap->context->seq,
ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
ci->i_wrbuffer_ref, capsnap->dirty_pages,
last ? " (wrbuffer last)" : "",
- last_snap ? " (capsnap last)" : "");
+ complete_capsnap ? " (complete capsnap)" : "",
+ drop_capsnap ? " (drop capsnap)" : "");
+ if (drop_capsnap) {
+ ceph_put_snap_context(capsnap->context);
+ list_del(&capsnap->ci_item);
+ list_del(&capsnap->flushing_item);
+ ceph_put_cap_snap(capsnap);
+ }
}
spin_unlock(&inode->i_lock);
@@ -2186,10 +2206,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
if (last) {
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
iput(inode);
- } else if (last_snap) {
+ } else if (complete_capsnap) {
ceph_flush_snaps(ci);
wake_up(&ci->i_cap_wq);
}
+ if (drop_capsnap)
+ iput(inode);
}
/*
@@ -2465,8 +2487,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
break;
}
WARN_ON(capsnap->dirty_pages || capsnap->writing);
- dout(" removing cap_snap %p follows %lld\n",
- capsnap, follows);
+ dout(" removing %p cap_snap %p follows %lld\n",
+ inode, capsnap, follows);
ceph_put_snap_context(capsnap->context);
list_del(&capsnap->ci_item);
list_del(&capsnap->flushing_item);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 7261dc6c2ead..ea8ee2e526aa 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -171,11 +171,11 @@ more:
spin_lock(&inode->i_lock);
spin_lock(&dcache_lock);
+ last = dentry;
+
if (err < 0)
goto out_unlock;
- last = dentry;
-
p = p->prev;
filp->f_pos++;
@@ -312,7 +312,7 @@ more:
req->r_readdir_offset = fi->next_offset;
req->r_args.readdir.frag = cpu_to_le32(frag);
req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
- req->r_num_caps = max_entries;
+ req->r_num_caps = max_entries + 1;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err < 0) {
ceph_mdsc_put_request(req);
@@ -489,6 +489,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct inode *inode = ceph_get_snapdir(parent);
dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
dentry, dentry->d_name.len, dentry->d_name.name, inode);
+ BUG_ON(!d_unhashed(dentry));
d_add(dentry, inode);
err = 0;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index aca82d55cc53..26f883c275e8 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -886,6 +886,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
struct inode *in = NULL;
struct ceph_mds_reply_inode *ininfo;
struct ceph_vino vino;
+ struct ceph_client *client = ceph_sb_to_client(sb);
int i = 0;
int err = 0;
@@ -949,7 +950,14 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
return err;
}
- if (rinfo->head->is_dentry && !req->r_aborted) {
+ /*
+ * ignore null lease/binding on snapdir ENOENT, or else we
+ * will have trouble splicing in the virtual snapdir later
+ */
+ if (rinfo->head->is_dentry && !req->r_aborted &&
+ (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
+ client->mount_args->snapdir_name,
+ req->r_dentry->d_name.len))) {
/*
* lookup link rename : null -> possibly existing inode
* mknod symlink mkdir : null -> new inode
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 8f1715ffbe4b..cdaaa131add3 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -30,6 +30,10 @@ static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
+#ifdef CONFIG_LOCKDEP
+static struct lock_class_key socket_class;
+#endif
+
static void queue_con(struct ceph_connection *con);
static void con_work(struct work_struct *);
@@ -228,6 +232,10 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
con->sock = sock;
sock->sk->sk_allocation = GFP_NOFS;
+#ifdef CONFIG_LOCKDEP
+ lockdep_set_class(&sock->sk->sk_lock, &socket_class);
+#endif
+
set_sock_callbacks(sock, con);
dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
@@ -333,6 +341,7 @@ static void reset_connection(struct ceph_connection *con)
con->out_msg = NULL;
}
con->in_seq = 0;
+ con->in_seq_acked = 0;
}
/*
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index 21c6623c4b07..2e2c15eed82a 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -314,71 +314,6 @@ bad:
return ERR_PTR(err);
}
-
-/*
- * osd map
- */
-void ceph_osdmap_destroy(struct ceph_osdmap *map)
-{
- dout("osdmap_destroy %p\n", map);
- if (map->crush)
- crush_destroy(map->crush);
- while (!RB_EMPTY_ROOT(&map->pg_temp)) {
- struct ceph_pg_mapping *pg =
- rb_entry(rb_first(&map->pg_temp),
- struct ceph_pg_mapping, node);
- rb_erase(&pg->node, &map->pg_temp);
- kfree(pg);
- }
- while (!RB_EMPTY_ROOT(&map->pg_pools)) {
- struct ceph_pg_pool_info *pi =
- rb_entry(rb_first(&map->pg_pools),
- struct ceph_pg_pool_info, node);
- rb_erase(&pi->node, &map->pg_pools);
- kfree(pi);
- }
- kfree(map->osd_state);
- kfree(map->osd_weight);
- kfree(map->osd_addr);
- kfree(map);
-}
-
-/*
- * adjust max osd value. reallocate arrays.
- */
-static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
-{
- u8 *state;
- struct ceph_entity_addr *addr;
- u32 *weight;
-
- state = kcalloc(max, sizeof(*state), GFP_NOFS);
- addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
- weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
- if (state == NULL || addr == NULL || weight == NULL) {
- kfree(state);
- kfree(addr);
- kfree(weight);
- return -ENOMEM;
- }
-
- /* copy old? */
- if (map->osd_state) {
- memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
- memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
- memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
- kfree(map->osd_state);
- kfree(map->osd_addr);
- kfree(map->osd_weight);
- }
-
- map->osd_state = state;
- map->osd_weight = weight;
- map->osd_addr = addr;
- map->max_osd = max;
- return 0;
-}
-
/*
* rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
* to a set of osds)
@@ -482,6 +417,13 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
return NULL;
}
+static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
+{
+ rb_erase(&pi->node, root);
+ kfree(pi->name);
+ kfree(pi);
+}
+
void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
{
ceph_decode_copy(p, &pi->v, sizeof(pi->v));
@@ -490,6 +432,98 @@ void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
*p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
}
+static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
+{
+ struct ceph_pg_pool_info *pi;
+ u32 num, len, pool;
+
+ ceph_decode_32_safe(p, end, num, bad);
+ dout(" %d pool names\n", num);
+ while (num--) {
+ ceph_decode_32_safe(p, end, pool, bad);
+ ceph_decode_32_safe(p, end, len, bad);
+ dout(" pool %d len %d\n", pool, len);
+ pi = __lookup_pg_pool(&map->pg_pools, pool);
+ if (pi) {
+ kfree(pi->name);
+ pi->name = kmalloc(len + 1, GFP_NOFS);
+ if (pi->name) {
+ memcpy(pi->name, *p, len);
+ pi->name[len] = '\0';
+ dout(" name is %s\n", pi->name);
+ }
+ }
+ *p += len;
+ }
+ return 0;
+
+bad:
+ return -EINVAL;
+}
+
+/*
+ * osd map
+ */
+void ceph_osdmap_destroy(struct ceph_osdmap *map)
+{
+ dout("osdmap_destroy %p\n", map);
+ if (map->crush)
+ crush_destroy(map->crush);
+ while (!RB_EMPTY_ROOT(&map->pg_temp)) {
+ struct ceph_pg_mapping *pg =
+ rb_entry(rb_first(&map->pg_temp),
+ struct ceph_pg_mapping, node);
+ rb_erase(&pg->node, &map->pg_temp);
+ kfree(pg);
+ }
+ while (!RB_EMPTY_ROOT(&map->pg_pools)) {
+ struct ceph_pg_pool_info *pi =
+ rb_entry(rb_first(&map->pg_pools),
+ struct ceph_pg_pool_info, node);
+ __remove_pg_pool(&map->pg_pools, pi);
+ }
+ kfree(map->osd_state);
+ kfree(map->osd_weight);
+ kfree(map->osd_addr);
+ kfree(map);
+}
+
+/*
+ * adjust max osd value. reallocate arrays.
+ */
+static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
+{
+ u8 *state;
+ struct ceph_entity_addr *addr;
+ u32 *weight;
+
+ state = kcalloc(max, sizeof(*state), GFP_NOFS);
+ addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
+ weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
+ if (state == NULL || addr == NULL || weight == NULL) {
+ kfree(state);
+ kfree(addr);
+ kfree(weight);
+ return -ENOMEM;
+ }
+
+ /* copy old? */
+ if (map->osd_state) {
+ memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
+ memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
+ memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
+ kfree(map->osd_state);
+ kfree(map->osd_addr);
+ kfree(map->osd_weight);
+ }
+
+ map->osd_state = state;
+ map->osd_weight = weight;
+ map->osd_addr = addr;
+ map->max_osd = max;
+ return 0;
+}
+
/*
* decode a full map.
*/
@@ -526,7 +560,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_32_safe(p, end, max, bad);
while (max--) {
ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
- pi = kmalloc(sizeof(*pi), GFP_NOFS);
+ pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
goto bad;
pi->id = ceph_decode_32(p);
@@ -539,6 +573,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
__decode_pool(p, pi);
__insert_pg_pool(&map->pg_pools, pi);
}
+
+ if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+ goto bad;
+
ceph_decode_32_safe(p, end, map->pool_max, bad);
ceph_decode_32_safe(p, end, map->flags, bad);
@@ -712,7 +750,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
}
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (!pi) {
- pi = kmalloc(sizeof(*pi), GFP_NOFS);
+ pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi) {
err = -ENOMEM;
goto bad;
@@ -722,6 +760,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
}
__decode_pool(p, pi);
}
+ if (version >= 5 && __decode_pool_names(p, end, map) < 0)
+ goto bad;
/* old_pool */
ceph_decode_32_safe(p, end, len, bad);
@@ -730,10 +770,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
ceph_decode_32_safe(p, end, pool, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
- if (pi) {
- rb_erase(&pi->node, &map->pg_pools);
- kfree(pi);
- }
+ if (pi)
+ __remove_pg_pool(&map->pg_pools, pi);
}
/* new_up */
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
index 1fb55afb2642..8bc9f1e4f562 100644
--- a/fs/ceph/osdmap.h
+++ b/fs/ceph/osdmap.h
@@ -23,6 +23,7 @@ struct ceph_pg_pool_info {
int id;
struct ceph_pg_pool v;
int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
+ char *name;
};
struct ceph_pg_mapping {
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
index 26ac8b89a676..a1fc1d017b58 100644
--- a/fs/ceph/rados.h
+++ b/fs/ceph/rados.h
@@ -11,8 +11,10 @@
/*
* osdmap encoding versions
*/
-#define CEPH_OSDMAP_INC_VERSION 4
-#define CEPH_OSDMAP_VERSION 4
+#define CEPH_OSDMAP_INC_VERSION 5
+#define CEPH_OSDMAP_INC_VERSION_EXT 5
+#define CEPH_OSDMAP_VERSION 5
+#define CEPH_OSDMAP_VERSION_EXT 5
/*
* fs id
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e6f9bc57d472..2b881262ef67 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -431,8 +431,7 @@ static int dup_array(u64 **dst, __le64 *src, int num)
* Caller must hold snap_rwsem for read (i.e., the realm topology won't
* change).
*/
-void ceph_queue_cap_snap(struct ceph_inode_info *ci,
- struct ceph_snap_context *snapc)
+void ceph_queue_cap_snap(struct ceph_inode_info *ci)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap_snap *capsnap;
@@ -451,10 +450,11 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
as no new writes are allowed to start when pending, so any
writes in progress now were started before the previous
cap_snap. lucky us. */
- dout("queue_cap_snap %p snapc %p seq %llu used %d"
- " already pending\n", inode, snapc, snapc->seq, used);
+ dout("queue_cap_snap %p already pending\n", inode);
kfree(capsnap);
} else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
+ struct ceph_snap_context *snapc = ci->i_head_snapc;
+
igrab(inode);
atomic_set(&capsnap->nref, 1);
@@ -463,7 +463,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
INIT_LIST_HEAD(&capsnap->flushing_item);
capsnap->follows = snapc->seq - 1;
- capsnap->context = ceph_get_snap_context(snapc);
capsnap->issued = __ceph_caps_issued(ci, NULL);
capsnap->dirty = __ceph_caps_dirty(ci);
@@ -480,7 +479,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci,
snapshot. */
capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
ci->i_wrbuffer_ref_head = 0;
- ceph_put_snap_context(ci->i_head_snapc);
+ capsnap->context = snapc;
ci->i_head_snapc = NULL;
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
@@ -522,15 +521,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->ctime = inode->i_ctime;
capsnap->time_warp_seq = ci->i_time_warp_seq;
if (capsnap->dirty_pages) {
- dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu "
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
"still has %d dirty pages\n", inode, capsnap,
capsnap->context, capsnap->context->seq,
- capsnap->size, capsnap->dirty_pages);
+ ceph_cap_string(capsnap->dirty), capsnap->size,
+ capsnap->dirty_pages);
return 0;
}
- dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n",
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
inode, capsnap, capsnap->context,
- capsnap->context->seq, capsnap->size);
+ capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+ capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
@@ -602,7 +603,7 @@ more:
if (lastinode)
iput(lastinode);
lastinode = inode;
- ceph_queue_cap_snap(ci, realm->cached_context);
+ ceph_queue_cap_snap(ci);
spin_lock(&realm->inodes_with_caps_lock);
}
spin_unlock(&realm->inodes_with_caps_lock);
@@ -824,8 +825,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
spin_unlock(&realm->inodes_with_caps_lock);
spin_unlock(&inode->i_lock);
- ceph_queue_cap_snap(ci,
- ci->i_snap_realm->cached_context);
+ ceph_queue_cap_snap(ci);
iput(inode);
continue;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index ca702c67bc66..e30dfbb056c3 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -715,8 +715,7 @@ extern int ceph_update_snap_trace(struct ceph_mds_client *m,
extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg);
-extern void ceph_queue_cap_snap(struct ceph_inode_info *ci,
- struct ceph_snap_context *snapc);
+extern void ceph_queue_cap_snap(struct ceph_inode_info *ci);
extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap);
extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 4797787c6a44..246a167cb913 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -18,6 +18,8 @@
#ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H
+#include <linux/backing-dev.h>
+
#define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */
#define CIFS_MOUNT_SET_UID 2 /* set current's euid in create etc. */
#define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */
@@ -50,5 +52,6 @@ struct cifs_sb_info {
#ifdef CONFIG_CIFS_DFS_UPCALL
char *mountdata; /* mount options received at mount time */
#endif
+ struct backing_dev_info bdi;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index ded66be6597c..ad235d604a0b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -103,6 +103,12 @@ cifs_read_super(struct super_block *sb, void *data,
if (cifs_sb == NULL)
return -ENOMEM;
+ rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
+ if (rc) {
+ kfree(cifs_sb);
+ return rc;
+ }
+
#ifdef CONFIG_CIFS_DFS_UPCALL
/* copy mount params to sb for use in submounts */
/* BB: should we move this after the mount so we
@@ -115,6 +121,7 @@ cifs_read_super(struct super_block *sb, void *data,
int len = strlen(data);
cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
if (cifs_sb->mountdata == NULL) {
+ bdi_destroy(&cifs_sb->bdi);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
return -ENOMEM;
@@ -135,6 +142,7 @@ cifs_read_super(struct super_block *sb, void *data,
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
+ sb->s_bdi = &cifs_sb->bdi;
/* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
sb->s_blocksize =
cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
@@ -183,6 +191,7 @@ out_mount_failed:
}
#endif
unload_nls(cifs_sb->local_nls);
+ bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb);
}
return rc;
@@ -214,6 +223,7 @@ cifs_put_super(struct super_block *sb)
#endif
unload_nls(cifs_sb->local_nls);
+ bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb);
unlock_kernel();
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index a1695dcadd99..d97f9935a028 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -167,6 +167,10 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
return -EBUSY;
}
+ error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY);
+ if (error)
+ goto bdi_err;
+
vc->vc_sb = sb;
sb->s_fs_info = vc;
@@ -175,6 +179,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
sb->s_blocksize_bits = 12;
sb->s_magic = CODA_SUPER_MAGIC;
sb->s_op = &coda_super_operations;
+ sb->s_bdi = &vc->bdi;
/* get root fid from Venus: this needs the root inode */
error = venus_rootfid(sb, &fid);
@@ -200,6 +205,8 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
return 0;
error:
+ bdi_destroy(&vc->bdi);
+ bdi_err:
if (root)
iput(root);
if (vc)
@@ -210,6 +217,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
static void coda_put_super(struct super_block *sb)
{
+ bdi_destroy(&coda_vcp(sb)->bdi);
coda_vcp(sb)->vc_sb = NULL;
sb->s_fs_info = NULL;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index efb2b9400391..1cc087635a5e 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -382,8 +382,8 @@ out:
static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
struct ecryptfs_crypt_stat *crypt_stat)
{
- (*offset) = (crypt_stat->num_header_bytes_at_front
- + (crypt_stat->extent_size * extent_num));
+ (*offset) = ecryptfs_lower_header_size(crypt_stat)
+ + (crypt_stat->extent_size * extent_num);
}
/**
@@ -835,13 +835,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
set_extent_mask_and_shift(crypt_stat);
crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- crypt_stat->num_header_bytes_at_front = 0;
+ crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else {
if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
- crypt_stat->num_header_bytes_at_front =
+ crypt_stat->metadata_size =
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else
- crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE;
+ crypt_stat->metadata_size = PAGE_CACHE_SIZE;
}
}
@@ -1108,9 +1108,9 @@ static void write_ecryptfs_marker(char *page_virt, size_t *written)
(*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
}
-static void
-write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat,
- size_t *written)
+void ecryptfs_write_crypt_stat_flags(char *page_virt,
+ struct ecryptfs_crypt_stat *crypt_stat,
+ size_t *written)
{
u32 flags = 0;
int i;
@@ -1238,8 +1238,7 @@ ecryptfs_write_header_metadata(char *virt,
header_extent_size = (u32)crypt_stat->extent_size;
num_header_extents_at_front =
- (u16)(crypt_stat->num_header_bytes_at_front
- / crypt_stat->extent_size);
+ (u16)(crypt_stat->metadata_size / crypt_stat->extent_size);
put_unaligned_be32(header_extent_size, virt);
virt += 4;
put_unaligned_be16(num_header_extents_at_front, virt);
@@ -1292,7 +1291,8 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
offset = ECRYPTFS_FILE_SIZE_BYTES;
write_ecryptfs_marker((page_virt + offset), &written);
offset += written;
- write_ecryptfs_flags((page_virt + offset), crypt_stat, &written);
+ ecryptfs_write_crypt_stat_flags((page_virt + offset), crypt_stat,
+ &written);
offset += written;
ecryptfs_write_header_metadata((page_virt + offset), crypt_stat,
&written);
@@ -1382,7 +1382,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
rc = -EINVAL;
goto out;
}
- virt_len = crypt_stat->num_header_bytes_at_front;
+ virt_len = crypt_stat->metadata_size;
order = get_order(virt_len);
/* Released in this function */
virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
@@ -1428,16 +1428,15 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
header_extent_size = get_unaligned_be32(virt);
virt += sizeof(__be32);
num_header_extents_at_front = get_unaligned_be16(virt);
- crypt_stat->num_header_bytes_at_front =
- (((size_t)num_header_extents_at_front
- * (size_t)header_extent_size));
+ crypt_stat->metadata_size = (((size_t)num_header_extents_at_front
+ * (size_t)header_extent_size));
(*bytes_read) = (sizeof(__be32) + sizeof(__be16));
if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
- && (crypt_stat->num_header_bytes_at_front
+ && (crypt_stat->metadata_size
< ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
rc = -EINVAL;
printk(KERN_WARNING "Invalid header size: [%zd]\n",
- crypt_stat->num_header_bytes_at_front);
+ crypt_stat->metadata_size);
}
return rc;
}
@@ -1452,8 +1451,7 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
*/
static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
{
- crypt_stat->num_header_bytes_at_front =
- ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
+ crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
}
/**
@@ -1607,6 +1605,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
ecryptfs_dentry,
ECRYPTFS_VALIDATE_HEADER_SIZE);
if (rc) {
+ memset(page_virt, 0, PAGE_CACHE_SIZE);
rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
if (rc) {
printk(KERN_DEBUG "Valid eCryptfs headers not found in "
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 542f625312f3..bfc2e0f78f00 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -35,6 +35,7 @@
#include <linux/scatterlist.h>
#include <linux/hash.h>
#include <linux/nsproxy.h>
+#include <linux/backing-dev.h>
/* Version verification for shared data structures w/ userspace */
#define ECRYPTFS_VERSION_MAJOR 0x00
@@ -273,7 +274,7 @@ struct ecryptfs_crypt_stat {
u32 flags;
unsigned int file_version;
size_t iv_bytes;
- size_t num_header_bytes_at_front;
+ size_t metadata_size;
size_t extent_size; /* Data extent size; default is 4096 */
size_t key_size;
size_t extent_shift;
@@ -393,6 +394,7 @@ struct ecryptfs_mount_crypt_stat {
struct ecryptfs_sb_info {
struct super_block *wsi_sb;
struct ecryptfs_mount_crypt_stat mount_crypt_stat;
+ struct backing_dev_info bdi;
};
/* file private data. */
@@ -464,6 +466,14 @@ struct ecryptfs_daemon {
extern struct mutex ecryptfs_daemon_hash_mux;
+static inline size_t
+ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat)
+{
+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
+ return 0;
+ return crypt_stat->metadata_size;
+}
+
static inline struct ecryptfs_file_info *
ecryptfs_file_to_private(struct file *file)
{
@@ -651,6 +661,9 @@ int ecryptfs_decrypt_page(struct page *page);
int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
+void ecryptfs_write_crypt_stat_flags(char *page_virt,
+ struct ecryptfs_crypt_stat *crypt_stat,
+ size_t *written);
int ecryptfs_read_and_validate_header_region(char *data,
struct inode *ecryptfs_inode);
int ecryptfs_read_and_validate_xattr_region(char *page_virt,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index d3362faf3852..e2d4418affac 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -324,6 +324,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
rc = ecryptfs_read_and_validate_header_region(page_virt,
ecryptfs_dentry->d_inode);
if (rc) {
+ memset(page_virt, 0, PAGE_CACHE_SIZE);
rc = ecryptfs_read_and_validate_xattr_region(page_virt,
ecryptfs_dentry);
if (rc) {
@@ -336,7 +337,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- file_size = (crypt_stat->num_header_bytes_at_front
+ file_size = (crypt_stat->metadata_size
+ i_size_read(lower_dentry->d_inode));
else
file_size = i_size_read(lower_dentry->d_inode);
@@ -388,9 +389,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- printk(KERN_ERR "%s: lookup_one_len() returned [%d] on "
- "lower_dentry = [%s]\n", __func__, rc,
- ecryptfs_dentry->d_name.name);
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ "[%d] on lower_dentry = [%s]\n", __func__, rc,
+ encrypted_and_encoded_name);
goto out_d_drop;
}
if (lower_dentry->d_inode)
@@ -417,9 +418,9 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- printk(KERN_ERR "%s: lookup_one_len() returned [%d] on "
- "lower_dentry = [%s]\n", __func__, rc,
- encrypted_and_encoded_name);
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ "[%d] on lower_dentry = [%s]\n", __func__, rc,
+ encrypted_and_encoded_name);
goto out_d_drop;
}
lookup_and_interpose:
@@ -456,8 +457,8 @@ static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
if (rc)
goto out_lock;
- fsstack_copy_attr_times(dir, lower_new_dentry->d_inode);
- fsstack_copy_inode_size(dir, lower_new_dentry->d_inode);
+ fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
+ fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
old_dentry->d_inode->i_nlink =
ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink;
i_size_write(new_dentry->d_inode, file_size_save);
@@ -648,38 +649,17 @@ out_lock:
return rc;
}
-static int
-ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
+ size_t *bufsiz)
{
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
char *lower_buf;
- size_t lower_bufsiz;
- struct dentry *lower_dentry;
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
- char *plaintext_name;
- size_t plaintext_name_size;
+ size_t lower_bufsiz = PATH_MAX;
mm_segment_t old_fs;
int rc;
- lower_dentry = ecryptfs_dentry_to_lower(dentry);
- if (!lower_dentry->d_inode->i_op->readlink) {
- rc = -EINVAL;
- goto out;
- }
- mount_crypt_stat = &ecryptfs_superblock_to_private(
- dentry->d_sb)->mount_crypt_stat;
- /*
- * If the lower filename is encrypted, it will result in a significantly
- * longer name. If needed, truncate the name after decode and decrypt.
- */
- if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
- lower_bufsiz = PATH_MAX;
- else
- lower_bufsiz = bufsiz;
- /* Released in this function */
lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
- if (lower_buf == NULL) {
- printk(KERN_ERR "%s: Out of memory whilst attempting to "
- "kmalloc [%zd] bytes\n", __func__, lower_bufsiz);
+ if (!lower_buf) {
rc = -ENOMEM;
goto out;
}
@@ -689,29 +669,31 @@ ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
(char __user *)lower_buf,
lower_bufsiz);
set_fs(old_fs);
- if (rc >= 0) {
- rc = ecryptfs_decode_and_decrypt_filename(&plaintext_name,
- &plaintext_name_size,
- dentry, lower_buf,
- rc);
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to decode and "
- "decrypt filename; rc = [%d]\n", __func__,
- rc);
- goto out_free_lower_buf;
- }
- /* Check for bufsiz <= 0 done in sys_readlinkat() */
- rc = copy_to_user(buf, plaintext_name,
- min((size_t) bufsiz, plaintext_name_size));
- if (rc)
- rc = -EFAULT;
- else
- rc = plaintext_name_size;
- kfree(plaintext_name);
- fsstack_copy_attr_atime(dentry->d_inode, lower_dentry->d_inode);
- }
-out_free_lower_buf:
+ if (rc < 0)
+ goto out;
+ lower_bufsiz = rc;
+ rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
+ lower_buf, lower_bufsiz);
+out:
kfree(lower_buf);
+ return rc;
+}
+
+static int
+ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+ char *kbuf;
+ size_t kbufsiz, copied;
+ int rc;
+
+ rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
+ if (rc)
+ goto out;
+ copied = min_t(size_t, bufsiz, kbufsiz);
+ rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
+ kfree(kbuf);
+ fsstack_copy_attr_atime(dentry->d_inode,
+ ecryptfs_dentry_to_lower(dentry)->d_inode);
out:
return rc;
}
@@ -769,7 +751,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
{
loff_t lower_size;
- lower_size = crypt_stat->num_header_bytes_at_front;
+ lower_size = ecryptfs_lower_header_size(crypt_stat);
if (upper_size != 0) {
loff_t num_extents;
@@ -1016,6 +998,28 @@ out:
return rc;
}
+int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ int rc = 0;
+
+ mount_crypt_stat = &ecryptfs_superblock_to_private(
+ dentry->d_sb)->mount_crypt_stat;
+ generic_fillattr(dentry->d_inode, stat);
+ if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
+ char *target;
+ size_t targetsiz;
+
+ rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
+ if (!rc) {
+ kfree(target);
+ stat->size = targetsiz;
+ }
+ }
+ return rc;
+}
+
int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
@@ -1040,7 +1044,7 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->setxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1058,7 +1062,7 @@ ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
int rc = 0;
if (!lower_dentry->d_inode->i_op->getxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1085,7 +1089,7 @@ ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->listxattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1102,7 +1106,7 @@ static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->removexattr) {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
@@ -1133,6 +1137,7 @@ const struct inode_operations ecryptfs_symlink_iops = {
.put_link = ecryptfs_put_link,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
+ .getattr = ecryptfs_getattr_link,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index af1a8f01ebac..760983d0f25e 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -497,17 +497,25 @@ struct kmem_cache *ecryptfs_sb_info_cache;
static int
ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
{
+ struct ecryptfs_sb_info *esi;
int rc = 0;
/* Released in ecryptfs_put_super() */
ecryptfs_set_superblock_private(sb,
kmem_cache_zalloc(ecryptfs_sb_info_cache,
GFP_KERNEL));
- if (!ecryptfs_superblock_to_private(sb)) {
+ esi = ecryptfs_superblock_to_private(sb);
+ if (!esi) {
ecryptfs_printk(KERN_WARNING, "Out of memory\n");
rc = -ENOMEM;
goto out;
}
+
+ rc = bdi_setup_and_register(&esi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
+ if (rc)
+ goto out;
+
+ sb->s_bdi = &esi->bdi;
sb->s_op = &ecryptfs_sops;
/* Released through deactivate_super(sb) from get_sb_nodev */
sb->s_root = d_alloc(NULL, &(const struct qstr) {
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index d491237c98e7..2ee9a3a7b68c 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -83,6 +83,19 @@ out:
return rc;
}
+static void strip_xattr_flag(char *page_virt,
+ struct ecryptfs_crypt_stat *crypt_stat)
+{
+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
+ size_t written;
+
+ crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
+ ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
+ &written);
+ crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
+ }
+}
+
/**
* Header Extent:
* Octets 0-7: Unencrypted file size (big-endian)
@@ -98,19 +111,6 @@ out:
* (big-endian)
* Octet 26: Begin RFC 2440 authentication token packet set
*/
-static void set_header_info(char *page_virt,
- struct ecryptfs_crypt_stat *crypt_stat)
-{
- size_t written;
- size_t save_num_header_bytes_at_front =
- crypt_stat->num_header_bytes_at_front;
-
- crypt_stat->num_header_bytes_at_front =
- ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
- ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written);
- crypt_stat->num_header_bytes_at_front =
- save_num_header_bytes_at_front;
-}
/**
* ecryptfs_copy_up_encrypted_with_header
@@ -136,8 +136,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
* num_extents_per_page)
+ extent_num_in_page);
size_t num_header_extents_at_front =
- (crypt_stat->num_header_bytes_at_front
- / crypt_stat->extent_size);
+ (crypt_stat->metadata_size / crypt_stat->extent_size);
if (view_extent_num < num_header_extents_at_front) {
/* This is a header extent */
@@ -147,9 +146,14 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
memset(page_virt, 0, PAGE_CACHE_SIZE);
/* TODO: Support more than one header extent */
if (view_extent_num == 0) {
+ size_t written;
+
rc = ecryptfs_read_xattr_region(
page_virt, page->mapping->host);
- set_header_info(page_virt, crypt_stat);
+ strip_xattr_flag(page_virt + 16, crypt_stat);
+ ecryptfs_write_header_metadata(page_virt + 20,
+ crypt_stat,
+ &written);
}
kunmap_atomic(page_virt, KM_USER0);
flush_dcache_page(page);
@@ -162,7 +166,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
/* This is an encrypted data extent */
loff_t lower_offset =
((view_extent_num * crypt_stat->extent_size)
- - crypt_stat->num_header_bytes_at_front);
+ - crypt_stat->metadata_size);
rc = ecryptfs_read_lower_page_segment(
page, (lower_offset >> PAGE_CACHE_SHIFT),
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index fcef41c1d2cf..0c0ae491d231 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -86,7 +86,6 @@ static void ecryptfs_destroy_inode(struct inode *inode)
if (lower_dentry->d_inode) {
fput(inode_info->lower_file);
inode_info->lower_file = NULL;
- d_drop(lower_dentry);
}
}
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
@@ -123,6 +122,7 @@ static void ecryptfs_put_super(struct super_block *sb)
lock_kernel();
ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat);
+ bdi_destroy(&sb_info->bdi);
kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
ecryptfs_set_superblock_private(sb, NULL);
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 8442e353309f..54373278a353 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -35,6 +35,7 @@
#include <linux/fs.h>
#include <linux/time.h>
+#include <linux/backing-dev.h>
#include "common.h"
/* FIXME: Remove once pnfs hits mainline
@@ -92,6 +93,7 @@ struct exofs_sb_info {
struct exofs_layout layout; /* Default files layout,
* contains the variable osd_dev
* array. Keep last */
+ struct backing_dev_info bdi;
struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */
};
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 18e57ea1e5b4..03149b9a5178 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -302,6 +302,7 @@ static void exofs_put_super(struct super_block *sb)
_exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0],
sbi->layout.s_pid);
+ bdi_destroy(&sbi->bdi);
exofs_free_sbi(sbi);
sb->s_fs_info = NULL;
}
@@ -546,6 +547,10 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi)
return -ENOMEM;
+ ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY);
+ if (ret)
+ goto free_bdi;
+
/* use mount options to fill superblock */
od = osduld_path_lookup(opts->dev_name);
if (IS_ERR(od)) {
@@ -612,6 +617,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
}
/* set up operation vectors */
+ sb->s_bdi = &sbi->bdi;
sb->s_fs_info = sbi;
sb->s_op = &exofs_sops;
sb->s_export_op = &exofs_export_ops;
@@ -643,6 +649,8 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
free_sbi:
+ bdi_destroy(&sbi->bdi);
+free_bdi:
EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n",
opts->dev_name, sbi->layout.s_pid, ret);
exofs_free_sbi(sbi);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 94c8ee81f5e1..236b834b4ca8 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3879,6 +3879,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
+ brelse(iloc.bh);
} else { /* external block */
physical = EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5381802d6052..81d605412844 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5375,7 +5375,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
} else {
struct ext4_iloc iloc;
- err = ext4_get_inode_loc(inode, &iloc);
+ err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -5386,6 +5386,7 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
(unsigned long long)iloc.bh->b_blocknr);
err = -EIO;
}
+ brelse(iloc.bh);
}
return err;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index bde9d0b170c2..b423a364dca3 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2535,6 +2535,17 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->count, entry->group, entry);
+ if (test_opt(sb, DISCARD)) {
+ ext4_fsblk_t discard_block;
+
+ discard_block = entry->start_blk +
+ ext4_group_first_block_no(sb, entry->group);
+ trace_ext4_discard_blocks(sb,
+ (unsigned long long)discard_block,
+ entry->count);
+ sb_issue_discard(sb, discard_block, entry->count);
+ }
+
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
@@ -2556,16 +2567,6 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
page_cache_release(e4b.bd_bitmap_page);
}
ext4_unlock_group(sb, entry->group);
- if (test_opt(sb, DISCARD)) {
- ext4_fsblk_t discard_block;
-
- discard_block = entry->start_blk +
- ext4_group_first_block_no(sb, entry->group);
- trace_ext4_discard_blocks(sb,
- (unsigned long long)discard_block,
- entry->count);
- sb_issue_discard(sb, discard_block, entry->count);
- }
kmem_cache_free(ext4_free_ext_cachep, entry);
ext4_mb_release_desc(&e4b);
}
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 6c751106c2e5..7faefb4da939 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -228,14 +228,23 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
#ifdef CONFIG_BLOCK
-#define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits)
-#define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits);
+static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
+{
+ return (offset >> inode->i_blkbits);
+}
+
+static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
+{
+ return (blk << inode->i_blkbits);
+}
/**
* __generic_block_fiemap - FIEMAP for block based inodes (no locking)
- * @inode - the inode to map
- * @arg - the pointer to userspace where we copy everything to
- * @get_block - the fs's get_block function
+ * @inode: the inode to map
+ * @fieinfo: the fiemap info struct that will be passed back to userspace
+ * @start: where to start mapping in the inode
+ * @len: how much space to map
+ * @get_block: the fs's get_block function
*
* This does FIEMAP for block based inodes. Basically it will just loop
* through get_block until we hit the number of extents we want to map, or we
@@ -250,58 +259,63 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
*/
int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block)
+ struct fiemap_extent_info *fieinfo, loff_t start,
+ loff_t len, get_block_t *get_block)
{
- struct buffer_head tmp;
- unsigned long long start_blk;
- long long length = 0, map_len = 0;
+ struct buffer_head map_bh;
+ sector_t start_blk, last_blk;
+ loff_t isize = i_size_read(inode);
u64 logical = 0, phys = 0, size = 0;
u32 flags = FIEMAP_EXTENT_MERGED;
- int ret = 0, past_eof = 0, whole_file = 0;
+ bool past_eof = false, whole_file = false;
+ int ret = 0;
- if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
+ ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ if (ret)
return ret;
- start_blk = logical_to_blk(inode, start);
-
- length = (long long)min_t(u64, len, i_size_read(inode));
- if (length < len)
- whole_file = 1;
+ /*
+ * Either the i_mutex or other appropriate locking needs to be held
+ * since we expect isize to not change at all through the duration of
+ * this call.
+ */
+ if (len >= isize) {
+ whole_file = true;
+ len = isize;
+ }
- map_len = length;
+ start_blk = logical_to_blk(inode, start);
+ last_blk = logical_to_blk(inode, start + len - 1);
do {
/*
* we set b_size to the total size we want so it will map as
* many contiguous blocks as possible at once
*/
- memset(&tmp, 0, sizeof(struct buffer_head));
- tmp.b_size = map_len;
+ memset(&map_bh, 0, sizeof(struct buffer_head));
+ map_bh.b_size = len;
- ret = get_block(inode, start_blk, &tmp, 0);
+ ret = get_block(inode, start_blk, &map_bh, 0);
if (ret)
break;
/* HOLE */
- if (!buffer_mapped(&tmp)) {
- length -= blk_to_logical(inode, 1);
+ if (!buffer_mapped(&map_bh)) {
start_blk++;
/*
- * we want to handle the case where there is an
+ * We want to handle the case where there is an
* allocated block at the front of the file, and then
* nothing but holes up to the end of the file properly,
* to make sure that extent at the front gets properly
* marked with FIEMAP_EXTENT_LAST
*/
if (!past_eof &&
- blk_to_logical(inode, start_blk) >=
- blk_to_logical(inode, 0)+i_size_read(inode))
+ blk_to_logical(inode, start_blk) >= isize)
past_eof = 1;
/*
- * first hole after going past the EOF, this is our
+ * First hole after going past the EOF, this is our
* last extent
*/
if (past_eof && size) {
@@ -309,15 +323,18 @@ int __generic_block_fiemap(struct inode *inode,
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
- break;
+ } else if (size) {
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
+ size = 0;
}
/* if we have holes up to/past EOF then we're done */
- if (length <= 0 || past_eof)
+ if (start_blk > last_blk || past_eof || ret)
break;
} else {
/*
- * we have gone over the length of what we wanted to
+ * We have gone over the length of what we wanted to
* map, and it wasn't the entire file, so add the extent
* we got last time and exit.
*
@@ -331,7 +348,7 @@ int __generic_block_fiemap(struct inode *inode,
* are good to go, just add the extent to the fieinfo
* and break
*/
- if (length <= 0 && !whole_file) {
+ if (start_blk > last_blk && !whole_file) {
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
@@ -351,11 +368,10 @@ int __generic_block_fiemap(struct inode *inode,
}
logical = blk_to_logical(inode, start_blk);
- phys = blk_to_logical(inode, tmp.b_blocknr);
- size = tmp.b_size;
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = map_bh.b_size;
flags = FIEMAP_EXTENT_MERGED;
- length -= tmp.b_size;
start_blk += logical_to_blk(inode, size);
/*
@@ -363,15 +379,13 @@ int __generic_block_fiemap(struct inode *inode,
* soon as we find a hole that the last extent we found
* is marked with FIEMAP_EXTENT_LAST
*/
- if (!past_eof &&
- logical+size >=
- blk_to_logical(inode, 0)+i_size_read(inode))
- past_eof = 1;
+ if (!past_eof && logical + size >= isize)
+ past_eof = true;
}
cond_resched();
} while (1);
- /* if ret is 1 then we just hit the end of the extent array */
+ /* If ret is 1 then we just hit the end of the extent array */
if (ret == 1)
ret = 0;
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 9dd126276c9f..ed9ba6fe04f5 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -61,7 +61,7 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &page_symlink_inode_operations;
inode->i_mapping->a_ops = &jfs_aops;
} else {
- inode->i_op = &jfs_symlink_inode_operations;
+ inode->i_op = &jfs_fast_symlink_inode_operations;
/*
* The inline data should be null-terminated, but
* don't let on-disk corruption crash the kernel
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 6c4dfcbf3f55..9e2f6a721668 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -196,7 +196,7 @@ int dbMount(struct inode *ipbmap)
bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
- bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth);
+ bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
@@ -288,7 +288,7 @@ int dbSync(struct inode *ipbmap)
dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
- dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth);
+ dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
@@ -1441,7 +1441,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
* tree index of this allocation group within the control page.
*/
agperlev =
- (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth;
+ (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
/* dmap control page trees fan-out by 4 and a single allocation
@@ -1460,7 +1460,7 @@ dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
* the subtree to find the leftmost leaf that describes this
* free space.
*/
- for (k = bmp->db_agheigth; k > 0; k--) {
+ for (k = bmp->db_agheight; k > 0; k--) {
for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
if (l2nb <= dcp->stree[m + n]) {
ti = m + n;
@@ -3607,7 +3607,7 @@ void dbFinalizeBmap(struct inode *ipbmap)
}
/*
- * compute db_aglevel, db_agheigth, db_width, db_agstart:
+ * compute db_aglevel, db_agheight, db_width, db_agstart:
* an ag is covered in aglevel dmapctl summary tree,
* at agheight level height (from leaf) with agwidth number of nodes
* each, which starts at agstart index node of the smmary tree node
@@ -3616,9 +3616,9 @@ void dbFinalizeBmap(struct inode *ipbmap)
bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
l2nl =
bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
- bmp->db_agheigth = l2nl >> 1;
- bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1));
- for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0;
+ bmp->db_agheight = l2nl >> 1;
+ bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
+ for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
i--) {
bmp->db_agstart += n;
n <<= 2;
diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
index 1a6eb41569bc..6dcb906c55d8 100644
--- a/fs/jfs/jfs_dmap.h
+++ b/fs/jfs/jfs_dmap.h
@@ -210,7 +210,7 @@ struct dbmap_disk {
__le32 dn_maxag; /* 4: max active alloc group number */
__le32 dn_agpref; /* 4: preferred alloc group (hint) */
__le32 dn_aglevel; /* 4: dmapctl level holding the AG */
- __le32 dn_agheigth; /* 4: height in dmapctl of the AG */
+ __le32 dn_agheight; /* 4: height in dmapctl of the AG */
__le32 dn_agwidth; /* 4: width in dmapctl of the AG */
__le32 dn_agstart; /* 4: start tree index at AG height */
__le32 dn_agl2size; /* 4: l2 num of blks per alloc group */
@@ -229,7 +229,7 @@ struct dbmap {
int dn_maxag; /* max active alloc group number */
int dn_agpref; /* preferred alloc group (hint) */
int dn_aglevel; /* dmapctl level holding the AG */
- int dn_agheigth; /* height in dmapctl of the AG */
+ int dn_agheight; /* height in dmapctl of the AG */
int dn_agwidth; /* width in dmapctl of the AG */
int dn_agstart; /* start tree index at AG height */
int dn_agl2size; /* l2 num of blks per alloc group */
@@ -255,7 +255,7 @@ struct bmap {
#define db_agsize db_bmap.dn_agsize
#define db_agl2size db_bmap.dn_agl2size
#define db_agwidth db_bmap.dn_agwidth
-#define db_agheigth db_bmap.dn_agheigth
+#define db_agheight db_bmap.dn_agheight
#define db_agstart db_bmap.dn_agstart
#define db_numag db_bmap.dn_numag
#define db_maxlevel db_bmap.dn_maxlevel
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 79e2c79661df..9e6bda30a6e8 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -48,5 +48,6 @@ extern const struct file_operations jfs_dir_operations;
extern const struct inode_operations jfs_file_inode_operations;
extern const struct file_operations jfs_file_operations;
extern const struct inode_operations jfs_symlink_inode_operations;
+extern const struct inode_operations jfs_fast_symlink_inode_operations;
extern const struct dentry_operations jfs_ci_dentry_operations;
#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 4a3e9f39c21d..a9cf8e8675be 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -956,7 +956,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
*/
if (ssize <= IDATASIZE) {
- ip->i_op = &jfs_symlink_inode_operations;
+ ip->i_op = &jfs_fast_symlink_inode_operations;
i_fastsymlink = JFS_IP(ip)->i_inline;
memcpy(i_fastsymlink, name, ssize);
@@ -978,7 +978,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
else {
jfs_info("jfs_symlink: allocate extent ip:0x%p", ip);
- ip->i_op = &page_symlink_inode_operations;
+ ip->i_op = &jfs_symlink_inode_operations;
ip->i_mapping->a_ops = &jfs_aops;
/*
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 7f24a0bb08ca..1aba0039f1c9 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
struct inode *iplist[1];
struct jfs_superblock *j_sb, *j_sb2;
uint old_agsize;
+ int agsizechanged = 0;
struct buffer_head *bh, *bh2;
/* If the volume hasn't grown, get out now */
@@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
*/
if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
goto error_out;
+
+ agsizechanged |= (bmp->db_agsize != old_agsize);
+
/*
* the map now has extended to cover additional nblocks:
* dn_mapsize = oldMapsize + nblocks;
@@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
* will correctly identify the new ag);
*/
/* if new AG size the same as old AG size, done! */
- if (bmp->db_agsize != old_agsize) {
+ if (agsizechanged) {
if ((rc = diExtendFS(ipimap, ipbmap)))
goto error_out;
diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c
index 4af1a05aad0a..205b946d8e0d 100644
--- a/fs/jfs/symlink.c
+++ b/fs/jfs/symlink.c
@@ -29,9 +29,21 @@ static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
return NULL;
}
-const struct inode_operations jfs_symlink_inode_operations = {
+const struct inode_operations jfs_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = jfs_follow_link,
+ .setattr = jfs_setattr,
+ .setxattr = jfs_setxattr,
+ .getxattr = jfs_getxattr,
+ .listxattr = jfs_listxattr,
+ .removexattr = jfs_removexattr,
+};
+
+const struct inode_operations jfs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+ .setattr = jfs_setattr,
.setxattr = jfs_setxattr,
.getxattr = jfs_getxattr,
.listxattr = jfs_listxattr,
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
index 84e36f52fe95..76c242fbe1b0 100644
--- a/fs/logfs/gc.c
+++ b/fs/logfs/gc.c
@@ -459,6 +459,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target)
struct logfs_block *block;
int round, progress, last_progress = 0;
+ /*
+ * Doing too many changes to the segfile at once would result
+ * in a large number of aliases. Write the journal before
+ * things get out of hand.
+ */
+ if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES)
+ logfs_write_anchor(sb);
+
if (no_free_segments(sb) >= target &&
super->s_no_object_aliases < MAX_OBJ_ALIASES)
return;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 33bd260b8309..fb0a613f885b 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -389,7 +389,10 @@ static void journal_get_erase_count(struct logfs_area *area)
static int journal_erase_segment(struct logfs_area *area)
{
struct super_block *sb = area->a_sb;
- struct logfs_segment_header sh;
+ union {
+ struct logfs_segment_header sh;
+ unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)];
+ } u;
u64 ofs;
int err;
@@ -397,20 +400,21 @@ static int journal_erase_segment(struct logfs_area *area)
if (err)
return err;
- sh.pad = 0;
- sh.type = SEG_JOURNAL;
- sh.level = 0;
- sh.segno = cpu_to_be32(area->a_segno);
- sh.ec = cpu_to_be32(area->a_erase_count);
- sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
- sh.crc = logfs_crc32(&sh, sizeof(sh), 4);
+ memset(&u, 0, sizeof(u));
+ u.sh.pad = 0;
+ u.sh.type = SEG_JOURNAL;
+ u.sh.level = 0;
+ u.sh.segno = cpu_to_be32(area->a_segno);
+ u.sh.ec = cpu_to_be32(area->a_erase_count);
+ u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
+ u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4);
/* This causes a bug in segment.c. Not yet. */
//logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0);
ofs = dev_ofs(sb, area->a_segno, 0);
- area->a_used_bytes = ALIGN(sizeof(sh), 16);
- logfs_buf_write(area, ofs, &sh, sizeof(sh));
+ area->a_used_bytes = sizeof(u);
+ logfs_buf_write(area, ofs, &u, sizeof(u));
return 0;
}
@@ -494,6 +498,8 @@ static void account_shadows(struct super_block *sb)
btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
+ btree_grim_visitor32(&tree->segment_map, 0, NULL);
+ tree->no_shadowed_segments = 0;
if (li->li_block) {
/*
@@ -607,9 +613,9 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type,
if (len == 0)
return logfs_write_header(super, header, 0, type);
+ BUG_ON(len > sb->s_blocksize);
compr_len = logfs_compress(buf, data, len, sb->s_blocksize);
if (compr_len < 0 || type == JE_ANCHOR) {
- BUG_ON(len > sb->s_blocksize);
memcpy(data, buf, len);
compr_len = len;
compr = COMPR_NONE;
@@ -661,6 +667,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type,
if (ofs < 0)
return ofs;
logfs_buf_write(area, ofs, super->s_compressed_je, len);
+ BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES);
super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
return 0;
}
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index b84b0eec6024..0a3df1a0c936 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -257,10 +257,14 @@ struct logfs_shadow {
* struct shadow_tree
* @new: shadows where old_ofs==0, indexed by new_ofs
* @old: shadows where old_ofs!=0, indexed by old_ofs
+ * @segment_map: bitfield of segments containing shadows
+ * @no_shadowed_segment: number of segments containing shadows
*/
struct shadow_tree {
struct btree_head64 new;
struct btree_head64 old;
+ struct btree_head32 segment_map;
+ int no_shadowed_segments;
};
struct object_alias_item {
@@ -305,13 +309,14 @@ typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix,
level_t level, int child_no, __be64 val);
struct logfs_block_ops {
void (*write_block)(struct logfs_block *block);
- gc_level_t (*block_level)(struct logfs_block *block);
void (*free_block)(struct super_block *sb, struct logfs_block*block);
int (*write_alias)(struct super_block *sb,
struct logfs_block *block,
write_alias_t *write_one_alias);
};
+#define MAX_JOURNAL_ENTRIES 256
+
struct logfs_super {
struct mtd_info *s_mtd; /* underlying device */
struct block_device *s_bdev; /* underlying device */
@@ -378,7 +383,7 @@ struct logfs_super {
u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */
u64 s_last_version;
struct logfs_area *s_journal_area; /* open journal segment */
- __be64 s_je_array[64];
+ __be64 s_je_array[MAX_JOURNAL_ENTRIES];
int s_no_je;
int s_sum_index; /* for the 12 summaries */
@@ -722,4 +727,10 @@ static inline struct logfs_area *get_area(struct super_block *sb,
return logfs_super(sb)->s_area[(__force u8)gc_level];
}
+static inline void logfs_mempool_destroy(mempool_t *pool)
+{
+ if (pool)
+ mempool_destroy(pool);
+}
+
#endif
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index bff40253dfb2..3159db6958e5 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -430,25 +430,6 @@ static void inode_write_block(struct logfs_block *block)
}
}
-static gc_level_t inode_block_level(struct logfs_block *block)
-{
- BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER);
- return GC_LEVEL(LOGFS_MAX_LEVELS);
-}
-
-static gc_level_t indirect_block_level(struct logfs_block *block)
-{
- struct page *page;
- struct inode *inode;
- u64 bix;
- level_t level;
-
- page = block->page;
- inode = page->mapping->host;
- logfs_unpack_index(page->index, &bix, &level);
- return expand_level(inode->i_ino, level);
-}
-
/*
* This silences a false, yet annoying gcc warning. I hate it when my editor
* jumps into bitops.h each time I recompile this file.
@@ -587,14 +568,12 @@ static void indirect_free_block(struct super_block *sb,
static struct logfs_block_ops inode_block_ops = {
.write_block = inode_write_block,
- .block_level = inode_block_level,
.free_block = inode_free_block,
.write_alias = inode_write_alias,
};
struct logfs_block_ops indirect_block_ops = {
.write_block = indirect_write_block,
- .block_level = indirect_block_level,
.free_block = indirect_free_block,
.write_alias = indirect_write_alias,
};
@@ -1241,6 +1220,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
mempool_free(shadow, super->s_shadow_pool);
}
+static void mark_segment(struct shadow_tree *tree, u32 segno)
+{
+ int err;
+
+ if (!btree_lookup32(&tree->segment_map, segno)) {
+ err = btree_insert32(&tree->segment_map, segno, (void *)1,
+ GFP_NOFS);
+ BUG_ON(err);
+ tree->no_shadowed_segments++;
+ }
+}
+
/**
* fill_shadow_tree - Propagate shadow tree changes due to a write
* @inode: Inode owning the page
@@ -1288,6 +1279,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page,
super->s_dirty_used_bytes += shadow->new_len;
super->s_dirty_free_bytes += shadow->old_len;
+ mark_segment(tree, shadow->old_ofs >> super->s_segshift);
+ mark_segment(tree, shadow->new_ofs >> super->s_segshift);
}
}
@@ -1845,19 +1838,37 @@ static int __logfs_truncate(struct inode *inode, u64 size)
return logfs_truncate_direct(inode, size);
}
-int logfs_truncate(struct inode *inode, u64 size)
+/*
+ * Truncate, by changing the segment file, can consume a fair amount
+ * of resources. So back off from time to time and do some GC.
+ * 8 or 2048 blocks should be well within safety limits even if
+ * every single block resided in a different segment.
+ */
+#define TRUNCATE_STEP (8 * 1024 * 1024)
+int logfs_truncate(struct inode *inode, u64 target)
{
struct super_block *sb = inode->i_sb;
- int err;
+ u64 size = i_size_read(inode);
+ int err = 0;
- logfs_get_wblocks(sb, NULL, 1);
- err = __logfs_truncate(inode, size);
- if (!err)
- err = __logfs_write_inode(inode, 0);
- logfs_put_wblocks(sb, NULL, 1);
+ size = ALIGN(size, TRUNCATE_STEP);
+ while (size > target) {
+ if (size > TRUNCATE_STEP)
+ size -= TRUNCATE_STEP;
+ else
+ size = 0;
+ if (size < target)
+ size = target;
+
+ logfs_get_wblocks(sb, NULL, 1);
+ err = __logfs_truncate(inode, target);
+ if (!err)
+ err = __logfs_write_inode(inode, 0);
+ logfs_put_wblocks(sb, NULL, 1);
+ }
if (!err)
- err = vmtruncate(inode, size);
+ err = vmtruncate(inode, target);
/* I don't trust error recovery yet. */
WARN_ON(err);
@@ -2251,8 +2262,6 @@ void logfs_cleanup_rw(struct super_block *sb)
struct logfs_super *super = logfs_super(sb);
destroy_meta_inode(super->s_segfile_inode);
- if (super->s_block_pool)
- mempool_destroy(super->s_block_pool);
- if (super->s_shadow_pool)
- mempool_destroy(super->s_shadow_pool);
+ logfs_mempool_destroy(super->s_block_pool);
+ logfs_mempool_destroy(super->s_shadow_pool);
}
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 801a3a141625..f77ce2b470ba 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -183,14 +183,8 @@ static int btree_write_alias(struct super_block *sb, struct logfs_block *block,
return 0;
}
-static gc_level_t btree_block_level(struct logfs_block *block)
-{
- return expand_level(block->ino, block->level);
-}
-
static struct logfs_block_ops btree_block_ops = {
.write_block = btree_write_block,
- .block_level = btree_block_level,
.free_block = __free_block,
.write_alias = btree_write_alias,
};
@@ -919,7 +913,7 @@ err:
for (i--; i >= 0; i--)
free_area(super->s_area[i]);
free_area(super->s_journal_area);
- mempool_destroy(super->s_alias_pool);
+ logfs_mempool_destroy(super->s_alias_pool);
return -ENOMEM;
}
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index b60bfac3263c..5866ee6e1327 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -12,6 +12,7 @@
#include "logfs.h"
#include <linux/bio.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
#include <linux/mtd/mtd.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
@@ -137,6 +138,10 @@ static int logfs_sb_set(struct super_block *sb, void *_super)
sb->s_fs_info = super;
sb->s_mtd = super->s_mtd;
sb->s_bdev = super->s_bdev;
+ if (sb->s_bdev)
+ sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+ if (sb->s_mtd)
+ sb->s_bdi = sb->s_mtd->backing_dev_info;
return 0;
}
@@ -452,6 +457,8 @@ static int logfs_read_sb(struct super_block *sb, int read_only)
btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool);
btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool);
+ btree_init_mempool32(&super->s_shadow_tree.segment_map,
+ super->s_btree_pool);
ret = logfs_init_mapping(sb);
if (ret)
@@ -516,8 +523,8 @@ static void logfs_kill_sb(struct super_block *sb)
if (super->s_erase_page)
__free_page(super->s_erase_page);
super->s_devops->put_device(sb);
- mempool_destroy(super->s_btree_pool);
- mempool_destroy(super->s_alias_pool);
+ logfs_mempool_destroy(super->s_btree_pool);
+ logfs_mempool_destroy(super->s_alias_pool);
kfree(super);
log_super("LogFS: Finished unmounting\n");
}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index cf98da1be23e..fa3385154023 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -526,10 +526,15 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
sb->s_blocksize_bits = 10;
sb->s_magic = NCP_SUPER_MAGIC;
sb->s_op = &ncp_sops;
+ sb->s_bdi = &server->bdi;
server = NCP_SBP(sb);
memset(server, 0, sizeof(*server));
+ error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
+ if (error)
+ goto out_bdi;
+
server->ncp_filp = ncp_filp;
server->ncp_sock = sock;
@@ -719,6 +724,8 @@ out_fput2:
if (server->info_filp)
fput(server->info_filp);
out_fput:
+ bdi_destroy(&server->bdi);
+out_bdi:
/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
* The previously used put_filp(ncp_filp); was bogous, since
@@ -756,6 +763,7 @@ static void ncp_put_super(struct super_block *sb)
kill_pid(server->m.wdog_pid, SIGTERM, 1);
put_pid(server->m.wdog_pid);
+ bdi_destroy(&server->bdi);
kfree(server->priv.data);
kfree(server->auth.object_name);
vfree(server->rxbuf);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 2a3d352c0bff..a8766c4ef2e0 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1294,7 +1294,8 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
+ NFS_CAP_POSIX_LOCK;
server->options = data->options;
/* Get a client record */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c6f2750648f4..db3ad849a289 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -837,6 +837,8 @@ out_zap_parent:
/* If we have submounts, don't unhash ! */
if (have_submounts(dentry))
goto out_valid;
+ if (dentry->d_flags & DCACHE_DISCONNECTED)
+ goto out_valid;
shrink_dcache_parent(dentry);
}
d_drop(dentry);
@@ -1025,12 +1027,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
res = NULL;
goto out;
/* This turned out not to be a regular file */
+ case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(nd->intent.open.flags & O_NOFOLLOW))
goto no_open;
- /* case -EISDIR: */
/* case -EINVAL: */
default:
goto out;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 737128f777f3..50a56edca0b5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -623,10 +623,10 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
list_for_each_entry(pos, &nfsi->open_files, list) {
if (cred != NULL && pos->cred != cred)
continue;
- if ((pos->mode & mode) == mode) {
- ctx = get_nfs_open_context(pos);
- break;
- }
+ if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
+ continue;
+ ctx = get_nfs_open_context(pos);
+ break;
}
spin_unlock(&inode->i_lock);
return ctx;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index fe0cd9eb1d4d..638067007c65 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1523,6 +1523,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
+ if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
+ server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
@@ -1664,7 +1666,7 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
- if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+ if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 53ff70e23993..de38d63aa920 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page)
struct inode *inode = page->mapping->host;
struct nfs_server *nfss = NFS_SERVER(inode);
+ page_cache_get(page);
if (atomic_long_inc_return(&nfss->writeback) >
NFS_CONGESTION_ON_THRESH) {
set_bdi_congested(&nfss->backing_dev_info,
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page)
struct nfs_server *nfss = NFS_SERVER(inode);
end_page_writeback(page);
+ page_cache_release(page);
if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
@@ -421,6 +423,7 @@ static void
nfs_mark_request_dirty(struct nfs_page *req)
{
__set_page_dirty_nobuffers(req->wb_page);
+ __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -660,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
req = nfs_setup_write_request(ctx, page, offset, count);
if (IS_ERR(req))
return PTR_ERR(req);
+ nfs_mark_request_dirty(req);
/* Update file length */
nfs_grow_file(page, offset, count);
nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
+ nfs_mark_request_dirty(req);
nfs_clear_page_tag_locked(req);
return 0;
}
@@ -739,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page,
status = nfs_writepage_setup(ctx, page, offset, count);
if (status < 0)
nfs_set_pageerror(page);
- else
- __set_page_dirty_nobuffers(page);
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
@@ -749,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page,
static void nfs_writepage_release(struct nfs_page *req)
{
+ struct page *page = req->wb_page;
- if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
- nfs_end_page_writeback(req->wb_page);
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
nfs_inode_remove_request(req);
- } else
- nfs_end_page_writeback(req->wb_page);
nfs_clear_page_tag_locked(req);
+ nfs_end_page_writeback(page);
}
static int flush_task_priority(int how)
@@ -779,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
int how)
{
struct inode *inode = req->wb_context->path.dentry->d_inode;
- int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
@@ -794,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = flags,
+ .flags = RPC_TASK_ASYNC,
.priority = priority,
};
+ int ret = 0;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
@@ -835,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
(unsigned long long)data->args.offset);
task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- return PTR_ERR(task);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+ }
+ if (how & FLUSH_SYNC) {
+ ret = rpc_wait_for_completion_task(task);
+ if (ret == 0)
+ ret = task->tk_status;
+ }
rpc_put_task(task);
- return 0;
+out:
+ return ret;
}
/* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -847,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
*/
static void nfs_redirty_request(struct nfs_page *req)
{
+ struct page *page = req->wb_page;
+
nfs_mark_request_dirty(req);
- nfs_end_page_writeback(req->wb_page);
nfs_clear_page_tag_locked(req);
+ nfs_end_page_writeback(page);
}
/*
@@ -1084,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata)
if (nfs_write_need_commit(data)) {
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
nfs_mark_request_commit(req);
- nfs_end_page_writeback(page);
dprintk(" marked for commit\n");
goto next;
}
dprintk(" OK\n");
remove_request:
- nfs_end_page_writeback(page);
nfs_inode_remove_request(req);
next:
nfs_clear_page_tag_locked(req);
+ nfs_end_page_writeback(page);
}
nfs_writedata_release(calldata);
}
@@ -1207,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head,
{
struct nfs_page *first = nfs_list_entry(head->next);
struct inode *inode = first->wb_context->path.dentry->d_inode;
- int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
@@ -1222,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head,
.callback_ops = &nfs_commit_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = flags,
+ .flags = RPC_TASK_ASYNC,
.priority = priority,
};
@@ -1252,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
+ if (how & FLUSH_SYNC)
+ rpc_wait_for_completion_task(task);
rpc_put_task(task);
return 0;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index e1703175ee28..34ccf815ea8a 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -161,10 +161,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
- argp->end = p + (argp->pagelen>>2);
+ argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
- argp->end = p + (PAGE_SIZE>>2);
+ argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
@@ -1426,10 +1426,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
- argp->end = p + (argp->pagelen>>2);
+ argp->end = argp->p + (argp->pagelen>>2);
argp->pagelen = 0;
} else {
- argp->end = p + (PAGE_SIZE>>2);
+ argp->end = argp->p + (PAGE_SIZE>>2);
argp->pagelen -= PAGE_SIZE;
}
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7621db800a74..8418fcc0a6ab 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2909,7 +2909,7 @@ out_no_task:
*/
static const struct pid_entry tid_base_stuff[] = {
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
- DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations),
+ DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
REG("environ", S_IRUSR, proc_environ_operations),
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index dad7fb247ddc..3e21b1e2ad3a 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -33,6 +33,14 @@ config PRINT_QUOTA_WARNING
Note that this behavior is currently deprecated and may go away in
future. Please use notification via netlink socket instead.
+config QUOTA_DEBUG
+ bool "Additional quota sanity checks"
+ depends on QUOTA
+ default n
+ help
+ If you say Y here, quota subsystem will perform some additional
+ sanity checks of quota internal structures. If unsure, say N.
+
# Generic support for tree structured quota files. Selected when needed.
config QUOTA_TREE
tristate
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index a0a9405b202a..788b5802a7ce 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -80,8 +80,6 @@
#include <asm/uaccess.h>
-#define __DQUOT_PARANOIA
-
/*
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
* and quota formats, dqstats structure containing statistics about the lists
@@ -695,7 +693,7 @@ void dqput(struct dquot *dquot)
if (!dquot)
return;
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (!atomic_read(&dquot->dq_count)) {
printk("VFS: dqput: trying to free free dquot\n");
printk("VFS: device %s, dquot of %s %d\n",
@@ -748,7 +746,7 @@ we_slept:
goto we_slept;
}
atomic_dec(&dquot->dq_count);
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
/* sanity check */
BUG_ON(!list_empty(&dquot->dq_free));
#endif
@@ -845,7 +843,7 @@ we_slept:
dquot = NULL;
goto out;
}
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
#endif
out:
@@ -874,7 +872,7 @@ static int dqinit_needed(struct inode *inode, int type)
static void add_dquot_ref(struct super_block *sb, int type)
{
struct inode *inode, *old_inode = NULL;
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
int reserved = 0;
#endif
@@ -882,7 +880,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
continue;
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
@@ -907,7 +905,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_unlock(&inode_lock);
iput(old_inode);
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
printk(KERN_WARNING "VFS (%s): Writes happened before quota"
" was turned on thus quota information is probably "
@@ -940,7 +938,7 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
inode->i_dquot[type] = NULL;
if (dquot) {
if (dqput_blocks(dquot)) {
-#ifdef __DQUOT_PARANOIA
+#ifdef CONFIG_QUOTA_DEBUG
if (atomic_read(&dquot->dq_count) != 1)
printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
#endif
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index f8a6075abf50..07930449a958 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -46,8 +46,6 @@ static inline bool is_privroot_deh(struct dentry *dir,
struct reiserfs_de_head *deh)
{
struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
- if (reiserfs_expose_privroot(dir->d_sb))
- return 0;
return (dir == dir->d_parent && privroot->d_inode &&
deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 4f9586bb7631..e7cc00e636dc 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -554,7 +554,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
if (!err && new_size < i_size_read(dentry->d_inode)) {
struct iattr newattrs = {
.ia_ctime = current_fs_time(inode->i_sb),
- .ia_size = buffer_size,
+ .ia_size = new_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
@@ -973,21 +973,13 @@ int reiserfs_permission(struct inode *inode, int mask)
return generic_permission(inode, mask, NULL);
}
-/* This will catch lookups from the fs root to .reiserfs_priv */
-static int
-xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
+static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
- if (container_of(q1, struct dentry, d_name) == priv_root)
- return -ENOENT;
- if (q1->len == name->len &&
- !memcmp(q1->name, name->name, name->len))
- return 0;
- return 1;
+ return -EPERM;
}
static const struct dentry_operations xattr_lookup_poison_ops = {
- .d_compare = xattr_lookup_poison,
+ .d_revalidate = xattr_hide_revalidate,
};
int reiserfs_lookup_privroot(struct super_block *s)
@@ -1001,8 +993,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
- if (!reiserfs_expose_privroot(s))
- s->s_root->d_op = &xattr_lookup_poison_ops;
+ dentry->d_op = &xattr_lookup_poison_ops;
if (dentry->d_inode)
dentry->d_inode->i_flags |= S_PRIVATE;
} else
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 1c4c8f089970..dfa1d67f8fca 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -479,6 +479,7 @@ smb_put_super(struct super_block *sb)
if (server->conn_pid)
kill_pid(server->conn_pid, SIGTERM, 1);
+ bdi_destroy(&server->bdi);
kfree(server->ops);
smb_unload_nls(server);
sb->s_fs_info = NULL;
@@ -525,6 +526,11 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
if (!server)
goto out_no_server;
sb->s_fs_info = server;
+
+ if (bdi_setup_and_register(&server->bdi, "smbfs", BDI_CAP_MAP_COPY))
+ goto out_bdi;
+
+ sb->s_bdi = &server->bdi;
server->super_block = sb;
server->mnt = NULL;
@@ -624,6 +630,8 @@ out_no_smbiod:
out_bad_option:
kfree(mem);
out_no_mem:
+ bdi_destroy(&server->bdi);
+out_bdi:
if (!server->mnt)
printk(KERN_ERR "smb_fill_super: allocation failure\n");
sb->s_fs_info = NULL;
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 1cb0d81b164b..653c030eb840 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -87,9 +87,8 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
u64 cur_index = index >> msblk->devblksize_log2;
int bytes, compressed, b = 0, k = 0, page = 0, avail;
-
- bh = kcalloc((msblk->block_size >> msblk->devblksize_log2) + 1,
- sizeof(*bh), GFP_KERNEL);
+ bh = kcalloc(((srclength + msblk->devblksize - 1)
+ >> msblk->devblksize_log2) + 1, sizeof(*bh), GFP_KERNEL);
if (bh == NULL)
return -ENOMEM;
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 3550aec2f655..48b6f4a385a6 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -275,7 +275,8 @@ allocate_root:
err = squashfs_read_inode(root, root_inode);
if (err) {
- iget_failed(root);
+ make_bad_inode(root);
+ iput(root);
goto failed_mount;
}
insert_inode_hash(root);
@@ -353,6 +354,7 @@ static void squashfs_put_super(struct super_block *sb)
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);
+ kfree(sbi->inode_lookup_table);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 15a03d0fb9f3..7a603874e483 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -128,8 +128,9 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer,
goto release_mutex;
}
+ length = stream->total_out;
mutex_unlock(&msblk->read_data_mutex);
- return stream->total_out;
+ return length;
release_mutex:
mutex_unlock(&msblk->read_data_mutex);
diff --git a/fs/super.c b/fs/super.c
index f35ac6022109..dc72491a19f9 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -693,6 +693,7 @@ int set_anon_super(struct super_block *s, void *data)
return -EMFILE;
}
s->s_dev = MKDEV(0, dev & MINORMASK);
+ s->s_bdi = &noop_backing_dev_info;
return 0;
}
@@ -954,10 +955,11 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
if (error < 0)
goto out_free_secdata;
BUG_ON(!mnt->mnt_sb);
+ WARN_ON(!mnt->mnt_sb->s_bdi);
- error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
- if (error)
- goto out_sb;
+ error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
+ if (error)
+ goto out_sb;
/*
* filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
diff --git a/fs/sync.c b/fs/sync.c
index fc5c3d75cf3c..92b228176f7c 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include "internal.h"
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
@@ -32,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
* This should be safe, as we require bdi backing to actually
* write out data in the first place
*/
- if (!sb->s_bdi)
+ if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info)
return 0;
if (sb->s_qcop && sb->s_qcop->quota_sync)
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 05cd85317f6f..fd9698215759 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -820,10 +820,10 @@ xfs_reclaim_inode(
* call into reclaim to find it in a clean state instead of waiting for
* it now. We also don't return errors here - if the error is transient
* then the next reclaim pass will flush the inode, and if the error
- * is permanent then the next sync reclaim will relcaim the inode and
+ * is permanent then the next sync reclaim will reclaim the inode and
* pass on the error.
*/
- if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_fs_cmn_err(CE_WARN, ip->i_mount,
"inode 0x%llx background reclaim flush failed with %d",
(long long)ip->i_ino, error);
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index cd27c9d6c71f..5bba29a07812 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -177,16 +177,26 @@ xfs_swap_extents_check_format(
XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
return EINVAL;
- /* Check root block of temp in btree form to max in target */
+ /*
+ * If we are in a btree format, check that the temp root block will fit
+ * in the target and that it has enough extents to be in btree format
+ * in the target.
+ *
+ * Note that we have to be careful to allow btree->extent conversions
+ * (a common defrag case) which will occur when the temp inode is in
+ * extent format...
+ */
if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_BOFF(ip) &&
- tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
+ ((XFS_IFORK_BOFF(ip) &&
+ tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) ||
+ XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= ip->i_df.if_ext_max))
return EINVAL;
- /* Check root block of target in btree form to max in temp */
+ /* Reciprocal target->temp btree format checks */
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_BOFF(tip) &&
- ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
+ ((XFS_IFORK_BOFF(tip) &&
+ ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) ||
+ XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= tip->i_df.if_ext_max))
return EINVAL;
return 0;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index e8fba92d7cd9..2be019136287 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -745,9 +745,16 @@ xfs_log_move_tail(xfs_mount_t *mp,
/*
* Determine if we have a transaction that has gone to disk
- * that needs to be covered. Log activity needs to be idle (no AIL and
- * nothing in the iclogs). And, we need to be in the right state indicating
- * something has gone out.
+ * that needs to be covered. To begin the transition to the idle state
+ * firstly the log needs to be idle (no AIL and nothing in the iclogs).
+ * If we are then in a state where covering is needed, the caller is informed
+ * that dummy transactions are required to move the log into the idle state.
+ *
+ * Because this is called as part of the sync process, we should also indicate
+ * that dummy transactions should be issued in anything but the covered or
+ * idle states. This ensures that the log tail is accurately reflected in
+ * the log at the end of the sync, hence if a crash occurrs avoids replay
+ * of transactions where the metadata is already on disk.
*/
int
xfs_log_need_covered(xfs_mount_t *mp)
@@ -759,17 +766,24 @@ xfs_log_need_covered(xfs_mount_t *mp)
return 0;
spin_lock(&log->l_icloglock);
- if (((log->l_covered_state == XLOG_STATE_COVER_NEED) ||
- (log->l_covered_state == XLOG_STATE_COVER_NEED2))
- && !xfs_trans_ail_tail(log->l_ailp)
- && xlog_iclogs_empty(log)) {
- if (log->l_covered_state == XLOG_STATE_COVER_NEED)
- log->l_covered_state = XLOG_STATE_COVER_DONE;
- else {
- ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2);
- log->l_covered_state = XLOG_STATE_COVER_DONE2;
+ switch (log->l_covered_state) {
+ case XLOG_STATE_COVER_DONE:
+ case XLOG_STATE_COVER_DONE2:
+ case XLOG_STATE_COVER_IDLE:
+ break;
+ case XLOG_STATE_COVER_NEED:
+ case XLOG_STATE_COVER_NEED2:
+ if (!xfs_trans_ail_tail(log->l_ailp) &&
+ xlog_iclogs_empty(log)) {
+ if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+ log->l_covered_state = XLOG_STATE_COVER_DONE;
+ else
+ log->l_covered_state = XLOG_STATE_COVER_DONE2;
}
+ /* FALLTHRU */
+ default:
needed = 1;
+ break;
}
spin_unlock(&log->l_icloglock);
return needed;