summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/file.c141
-rw-r--r--fs/cifs/smb1ops.c8
-rw-r--r--fs/cifs/smb2ops.c2
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/kthread.c6
-rw-r--r--fs/ecryptfs/mmap.c12
-rw-r--r--fs/eventpoll.c22
-rw-r--r--fs/ext4/Kconfig2
-rw-r--r--fs/ext4/extents.c22
-rw-r--r--fs/ext4/file.c8
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/ext4/inode.c99
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/super.c30
-rw-r--r--fs/f2fs/acl.c1
-rw-r--r--fs/f2fs/data.c1
-rw-r--r--fs/f2fs/dir.c16
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/file.c10
-rw-r--r--fs/f2fs/gc.c34
-rw-r--r--fs/f2fs/hash.c18
-rw-r--r--fs/f2fs/inode.c1
-rw-r--r--fs/f2fs/namei.c34
-rw-r--r--fs/f2fs/node.c37
-rw-r--r--fs/f2fs/recovery.c10
-rw-r--r--fs/f2fs/segment.c46
-rw-r--r--fs/f2fs/segment.h15
-rw-r--r--fs/f2fs/super.c15
-rw-r--r--fs/f2fs/xattr.c5
-rw-r--r--fs/file.c2
-rw-r--r--fs/gfs2/lock_dlm.c1
-rw-r--r--fs/gfs2/rgrp.c35
-rw-r--r--fs/jbd2/transaction.c30
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/dir.c16
-rw-r--r--fs/nfs/nfs4proc.c18
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c10
-rw-r--r--fs/proc/generic.c13
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/pstore/ram.c14
-rw-r--r--fs/pstore/ram_core.c9
-rw-r--r--fs/splice.c4
48 files changed, 454 insertions, 332 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f653835d067b..de7f9168a118 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -228,7 +228,6 @@ cifs_alloc_inode(struct super_block *sb)
cifs_set_oplock_level(cifs_inode, 0);
cifs_inode->delete_pending = false;
cifs_inode->invalid_mapping = false;
- cifs_inode->leave_pages_clean = false;
cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
cifs_inode->server_eof = 0;
cifs_inode->uniqueid = 0;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index aea1eec64911..e6899cea1c35 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -386,6 +386,7 @@ struct smb_version_values {
unsigned int cap_unix;
unsigned int cap_nt_find;
unsigned int cap_large_files;
+ unsigned int oplock_read;
};
#define HEADER_SIZE(server) (server->vals->header_size)
@@ -1030,7 +1031,6 @@ struct cifsInodeInfo {
bool clientCanCacheAll; /* read and writebehind oplock */
bool delete_pending; /* DELETE_ON_CLOSE is set */
bool invalid_mapping; /* pagecache is invalid */
- bool leave_pages_clean; /* protected by i_mutex, not set pages dirty */
unsigned long time; /* jiffies of last update of inode */
u64 server_eof; /* current file size on server -- protected by i_lock */
u64 uniqueid; /* server inode number */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0a6677ba212b..8ea6ca50a665 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -238,6 +238,23 @@ out:
return rc;
}
+static bool
+cifs_has_mand_locks(struct cifsInodeInfo *cinode)
+{
+ struct cifs_fid_locks *cur;
+ bool has_locks = false;
+
+ down_read(&cinode->lock_sem);
+ list_for_each_entry(cur, &cinode->llist, llist) {
+ if (!list_empty(&cur->locks)) {
+ has_locks = true;
+ break;
+ }
+ }
+ up_read(&cinode->lock_sem);
+ return has_locks;
+}
+
struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
@@ -248,6 +265,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct cifsFileInfo *cfile;
struct cifs_fid_locks *fdlocks;
struct cifs_tcon *tcon = tlink_tcon(tlink);
+ struct TCP_Server_Info *server = tcon->ses->server;
cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
if (cfile == NULL)
@@ -276,12 +294,22 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
mutex_init(&cfile->fh_mutex);
+ /*
+ * If the server returned a read oplock and we have mandatory brlocks,
+ * set oplock level to None.
+ */
+ if (oplock == server->vals->oplock_read &&
+ cifs_has_mand_locks(cinode)) {
+ cFYI(1, "Reset oplock val from read to None due to mand locks");
+ oplock = 0;
+ }
+
spin_lock(&cifs_file_list_lock);
- if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE)
+ if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
oplock = fid->pending_open->oplock;
list_del(&fid->pending_open->olist);
- tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
+ server->ops->set_fid(cfile, fid, oplock);
list_add(&cfile->tlist, &tcon->openFileList);
/* if readable file instance put first in list*/
@@ -1422,6 +1450,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
+ struct inode *inode = cfile->dentry->d_inode;
if (posix_lck) {
int posix_lock_type;
@@ -1459,6 +1488,21 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
if (!rc)
goto out;
+ /*
+ * Windows 7 server can delay breaking lease from read to None
+ * if we set a byte-range lock on a file - break it explicitly
+ * before sending the lock to the server to be sure the next
+ * read won't conflict with non-overlapted locks due to
+ * pagereading.
+ */
+ if (!CIFS_I(inode)->clientCanCacheAll &&
+ CIFS_I(inode)->clientCanCacheRead) {
+ cifs_invalidate_mapping(inode);
+ cFYI(1, "Set no oplock for inode=%p due to mand locks",
+ inode);
+ CIFS_I(inode)->clientCanCacheRead = false;
+ }
+
rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
type, 1, 0, wait_flag);
if (rc) {
@@ -2103,15 +2147,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
} else {
rc = copied;
pos += copied;
- /*
- * When we use strict cache mode and cifs_strict_writev was run
- * with level II oplock (indicated by leave_pages_clean field of
- * CIFS_I(inode)), we can leave pages clean - cifs_strict_writev
- * sent the data to the server itself.
- */
- if (!CIFS_I(inode)->leave_pages_clean ||
- !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO))
- set_page_dirty(page);
+ set_page_dirty(page);
}
if (rc > 0) {
@@ -2462,8 +2498,8 @@ ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
}
static ssize_t
-cifs_pagecache_writev(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos, bool cache_ex)
+cifs_writev(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
@@ -2485,12 +2521,8 @@ cifs_pagecache_writev(struct kiocb *iocb, const struct iovec *iov,
server->vals->exclusive_lock_type, NULL,
CIFS_WRITE_OP)) {
mutex_lock(&inode->i_mutex);
- if (!cache_ex)
- cinode->leave_pages_clean = true;
rc = __generic_file_aio_write(iocb, iov, nr_segs,
- &iocb->ki_pos);
- if (!cache_ex)
- cinode->leave_pages_clean = false;
+ &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
}
@@ -2517,60 +2549,32 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
struct cifsFileInfo *cfile = (struct cifsFileInfo *)
iocb->ki_filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- ssize_t written, written2;
- /*
- * We need to store clientCanCacheAll here to prevent race
- * conditions - this value can be changed during an execution
- * of generic_file_aio_write. For CIFS it can be changed from
- * true to false only, but for SMB2 it can be changed both from
- * true to false and vice versa. So, we can end up with a data
- * stored in the cache, not marked dirty and not sent to the
- * server if this value changes its state from false to true
- * after cifs_write_end.
- */
- bool cache_ex = cinode->clientCanCacheAll;
- bool cache_read = cinode->clientCanCacheRead;
- int rc;
- loff_t saved_pos;
+ ssize_t written;
- if (cache_ex) {
+ if (cinode->clientCanCacheAll) {
if (cap_unix(tcon->ses) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(
- tcon->fsUnixInfo.Capability)))
+ (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
+ && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
return generic_file_aio_write(iocb, iov, nr_segs, pos);
- return cifs_pagecache_writev(iocb, iov, nr_segs, pos, cache_ex);
+ return cifs_writev(iocb, iov, nr_segs, pos);
}
-
/*
- * For files without exclusive oplock in strict cache mode we need to
- * write the data to the server exactly from the pos to pos+len-1 rather
- * than flush all affected pages because it may cause a error with
- * mandatory locks on these pages but not on the region from pos to
- * ppos+len-1.
+ * For non-oplocked files in strict cache mode we need to write the data
+ * to the server exactly from the pos to pos+len-1 rather than flush all
+ * affected pages because it may cause a error with mandatory locks on
+ * these pages but not on the region from pos to ppos+len-1.
*/
written = cifs_user_writev(iocb, iov, nr_segs, pos);
- if (!cache_read || written <= 0)
- return written;
-
- saved_pos = iocb->ki_pos;
- iocb->ki_pos = pos;
- /* we have a read oplock - need to store a data in the page cache */
- if (cap_unix(tcon->ses) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(
- tcon->fsUnixInfo.Capability)))
- written2 = generic_file_aio_write(iocb, iov, nr_segs, pos);
- else
- written2 = cifs_pagecache_writev(iocb, iov, nr_segs, pos,
- cache_ex);
- /* errors occured during writing - invalidate the page cache */
- if (written2 < 0) {
- rc = cifs_invalidate_mapping(inode);
- if (rc)
- written = (ssize_t)rc;
- else
- iocb->ki_pos = saved_pos;
+ if (written > 0 && cinode->clientCanCacheRead) {
+ /*
+ * Windows 7 server can delay breaking level2 oplock if a write
+ * request comes - break it on the client to prevent reading
+ * an old data.
+ */
+ cifs_invalidate_mapping(inode);
+ cFYI(1, "Set no oplock for inode=%p after a write operation",
+ inode);
+ cinode->clientCanCacheRead = false;
}
return written;
}
@@ -3577,6 +3581,13 @@ void cifs_oplock_break(struct work_struct *work)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
+ if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
+ cifs_has_mand_locks(cinode)) {
+ cFYI(1, "Reset oplock to None for inode=%p due to mand locks",
+ inode);
+ cinode->clientCanCacheRead = false;
+ }
+
if (inode && S_ISREG(inode->i_mode)) {
if (cinode->clientCanCacheRead)
break_lease(inode, O_RDONLY);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index a5d234c8d5d9..47bc5a87f94e 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -53,6 +53,13 @@ send_nt_cancel(struct TCP_Server_Info *server, void *buf,
mutex_unlock(&server->srv_mutex);
return rc;
}
+
+ /*
+ * The response to this call was already factored into the sequence
+ * number when the call went out, so we must adjust it back downward
+ * after signing here.
+ */
+ --server->sequence_number;
rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
mutex_unlock(&server->srv_mutex);
@@ -952,4 +959,5 @@ struct smb_version_values smb1_values = {
.cap_unix = CAP_UNIX,
.cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
.cap_large_files = CAP_LARGE_FILES,
+ .oplock_read = OPLOCK_READ,
};
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d79de7bc4435..c9c7aa7ed966 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -708,6 +708,7 @@ struct smb_version_values smb20_values = {
.cap_unix = 0,
.cap_nt_find = SMB2_NT_FIND,
.cap_large_files = SMB2_LARGE_FILES,
+ .oplock_read = SMB2_OPLOCK_LEVEL_II,
};
struct smb_version_values smb21_values = {
@@ -725,6 +726,7 @@ struct smb_version_values smb21_values = {
.cap_unix = 0,
.cap_nt_find = SMB2_NT_FIND,
.cap_large_files = SMB2_LARGE_FILES,
+ .oplock_read = SMB2_OPLOCK_LEVEL_II,
};
struct smb_version_values smb30_values = {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 76d974c952fe..1a528680ec5a 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -144,9 +144,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
*sent = 0;
- if (ssocket == NULL)
- return -ENOTSOCK; /* BB eventually add reconnect code here */
-
smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
smb_msg.msg_namelen = sizeof(struct sockaddr);
smb_msg.msg_control = NULL;
@@ -291,6 +288,9 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
struct socket *ssocket = server->ssocket;
int val = 1;
+ if (ssocket == NULL)
+ return -ENOTSOCK;
+
cFYI(1, "Sending smb: smb_len=%u", smb_buf_length);
dump_smb(iov[0].iov_base, iov[0].iov_len);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index ea9931281557..a7b0c2dfb3db 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1935,7 +1935,7 @@ static const unsigned char filename_rev_map[256] = {
* @src: Source location for the filename to encode
* @src_size: Size of the source in bytes
*/
-void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
+static void ecryptfs_encode_for_filename(unsigned char *dst, size_t *dst_size,
unsigned char *src, size_t src_size)
{
size_t num_blocks;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 809e67d05ca3..f1ea610362c6 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -102,12 +102,12 @@ int __init ecryptfs_init_kthread(void)
void ecryptfs_destroy_kthread(void)
{
- struct ecryptfs_open_req *req;
+ struct ecryptfs_open_req *req, *tmp;
mutex_lock(&ecryptfs_kthread_ctl.mux);
ecryptfs_kthread_ctl.flags |= ECRYPTFS_KTHREAD_ZOMBIE;
- list_for_each_entry(req, &ecryptfs_kthread_ctl.req_list,
- kthread_ctl_list) {
+ list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list,
+ kthread_ctl_list) {
list_del(&req->kthread_ctl_list);
*req->lower_file = ERR_PTR(-EIO);
complete(&req->done);
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index bd1d57f98f74..564a1fa34b99 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -338,7 +338,8 @@ static int ecryptfs_write_begin(struct file *file,
if (prev_page_end_size
>= i_size_read(page->mapping->host)) {
zero_user(page, 0, PAGE_CACHE_SIZE);
- } else {
+ SetPageUptodate(page);
+ } else if (len < PAGE_CACHE_SIZE) {
rc = ecryptfs_decrypt_page(page);
if (rc) {
printk(KERN_ERR "%s: Error decrypting "
@@ -348,8 +349,8 @@ static int ecryptfs_write_begin(struct file *file,
ClearPageUptodate(page);
goto out;
}
+ SetPageUptodate(page);
}
- SetPageUptodate(page);
}
}
/* If creating a page or more of holes, zero them out via truncate.
@@ -499,6 +500,13 @@ static int ecryptfs_write_end(struct file *file,
}
goto out;
}
+ if (!PageUptodate(page)) {
+ if (copied < PAGE_CACHE_SIZE) {
+ rc = 0;
+ goto out;
+ }
+ SetPageUptodate(page);
+ }
/* Fills in zeros if 'to' goes beyond inode size */
rc = fill_zeros_to_end_of_page(page, to);
if (rc) {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index be56b21435f8..9fec1836057a 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1313,7 +1313,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
* otherwise we might miss an event that happens between the
* f_op->poll() call and the new event set registering.
*/
- epi->event.events = event->events;
+ epi->event.events = event->events; /* need barrier below */
pt._key = event->events;
epi->event.data = event->data; /* protected by mtx */
if (epi->event.events & EPOLLWAKEUP) {
@@ -1324,6 +1324,26 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
}
/*
+ * The following barrier has two effects:
+ *
+ * 1) Flush epi changes above to other CPUs. This ensures
+ * we do not miss events from ep_poll_callback if an
+ * event occurs immediately after we call f_op->poll().
+ * We need this because we did not take ep->lock while
+ * changing epi above (but ep_poll_callback does take
+ * ep->lock).
+ *
+ * 2) We also need to ensure we do not miss _past_ events
+ * when calling f_op->poll(). This barrier also
+ * pairs with the barrier in wq_has_sleeper (see
+ * comments for wq_has_sleeper).
+ *
+ * This barrier will now guarantee ep_poll_callback or f_op->poll
+ * (or both) will notice the readiness of an item.
+ */
+ smp_mb();
+
+ /*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
*/
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 0a475c881852..987358740cb9 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -41,6 +41,7 @@ config EXT4_USE_FOR_EXT23
config EXT4_FS_POSIX_ACL
bool "Ext4 POSIX Access Control Lists"
+ depends on EXT4_FS
select FS_POSIX_ACL
help
POSIX Access Control Lists (ACLs) support permissions for users and
@@ -53,6 +54,7 @@ config EXT4_FS_POSIX_ACL
config EXT4_FS_SECURITY
bool "Ext4 Security Labels"
+ depends on EXT4_FS
help
Security labels support alternative access control models
implemented by security modules like SELinux. This option
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 26af22832a84..5ae1674ec12f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2226,13 +2226,14 @@ errout:
* removes index from the index block.
*/
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
- struct ext4_ext_path *path)
+ struct ext4_ext_path *path, int depth)
{
int err;
ext4_fsblk_t leaf;
/* free index block */
- path--;
+ depth--;
+ path = path + depth;
leaf = ext4_idx_pblock(path->p_idx);
if (unlikely(path->p_hdr->eh_entries == 0)) {
EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
@@ -2257,6 +2258,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
ext4_free_blocks(handle, inode, NULL, leaf, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+
+ while (--depth >= 0) {
+ if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
+ break;
+ path--;
+ err = ext4_ext_get_access(handle, inode, path);
+ if (err)
+ break;
+ path->p_idx->ei_block = (path+1)->p_idx->ei_block;
+ err = ext4_ext_dirty(handle, inode, path);
+ if (err)
+ break;
+ }
return err;
}
@@ -2599,7 +2613,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
/* if this leaf is free, then we should
* remove it from index block above */
if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
- err = ext4_ext_rm_idx(handle, inode, path + depth);
+ err = ext4_ext_rm_idx(handle, inode, path, depth);
out:
return err;
@@ -2802,7 +2816,7 @@ again:
/* index is empty, remove it;
* handle must be already prepared by the
* truncatei_leaf() */
- err = ext4_ext_rm_idx(handle, inode, path + i);
+ err = ext4_ext_rm_idx(handle, inode, path, i);
}
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index d07c27ca594a..405565a62277 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -108,14 +108,6 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
/* Unaligned direct AIO must be serialized; see comment above */
if (unaligned_aio) {
- static unsigned long unaligned_warn_time;
-
- /* Warn about this once per day */
- if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
- ext4_msg(inode->i_sb, KERN_WARNING,
- "Unaligned AIO/DIO on inode %ld by %s; "
- "performance will be poor.",
- inode->i_ino, current->comm);
mutex_lock(ext4_aio_mutex(inode));
ext4_unwritten_wait(inode);
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index dfbc1fe96674..3278e64e57b6 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -109,8 +109,6 @@ static int __sync_inode(struct inode *inode, int datasync)
*
* What we do is just kick off a commit and wait on it. This will snapshot the
* inode to disk.
- *
- * i_mutex lock is held when entering and exiting this function
*/
int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cb1c1ab2720b..cbfe13bf5b2a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2880,8 +2880,6 @@ static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offs
static void ext4_invalidatepage(struct page *page, unsigned long offset)
{
- journal_t *journal = EXT4_JOURNAL(page->mapping->host);
-
trace_ext4_invalidatepage(page, offset);
/*
@@ -2889,16 +2887,34 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset)
*/
if (ext4_should_dioread_nolock(page->mapping->host))
ext4_invalidatepage_free_endio(page, offset);
+
+ /* No journalling happens on data buffers when this function is used */
+ WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
+
+ block_invalidatepage(page, offset);
+}
+
+static int __ext4_journalled_invalidatepage(struct page *page,
+ unsigned long offset)
+{
+ journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+
+ trace_ext4_journalled_invalidatepage(page, offset);
+
/*
* If it's a full truncate we just forget about the pending dirtying
*/
if (offset == 0)
ClearPageChecked(page);
- if (journal)
- jbd2_journal_invalidatepage(journal, page, offset);
- else
- block_invalidatepage(page, offset);
+ return jbd2_journal_invalidatepage(journal, page, offset);
+}
+
+/* Wrapper for aops... */
+static void ext4_journalled_invalidatepage(struct page *page,
+ unsigned long offset)
+{
+ WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
@@ -3264,7 +3280,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
.bmap = ext4_bmap,
- .invalidatepage = ext4_invalidatepage,
+ .invalidatepage = ext4_journalled_invalidatepage,
.releasepage = ext4_releasepage,
.direct_IO = ext4_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
@@ -4305,6 +4321,47 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
+ * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
+ * buffers that are attached to a page stradding i_size and are undergoing
+ * commit. In that case we have to wait for commit to finish and try again.
+ */
+static void ext4_wait_for_tail_page_commit(struct inode *inode)
+{
+ struct page *page;
+ unsigned offset;
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = 0;
+ int ret;
+
+ offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ /*
+ * All buffers in the last page remain valid? Then there's nothing to
+ * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
+ * blocksize case
+ */
+ if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
+ return;
+ while (1) {
+ page = find_lock_page(inode->i_mapping,
+ inode->i_size >> PAGE_CACHE_SHIFT);
+ if (!page)
+ return;
+ ret = __ext4_journalled_invalidatepage(page, offset);
+ unlock_page(page);
+ page_cache_release(page);
+ if (ret != -EBUSY)
+ return;
+ commit_tid = 0;
+ read_lock(&journal->j_state_lock);
+ if (journal->j_committing_transaction)
+ commit_tid = journal->j_committing_transaction->t_tid;
+ read_unlock(&journal->j_state_lock);
+ if (commit_tid)
+ jbd2_log_wait_commit(journal, commit_tid);
+ }
+}
+
+/*
* ext4_setattr()
*
* Called from notify_change.
@@ -4417,16 +4474,28 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size != i_size_read(inode)) {
- truncate_setsize(inode, attr->ia_size);
- /* Inode size will be reduced, wait for dio in flight.
- * Temporarily disable dioread_nolock to prevent
- * livelock. */
+ if (attr->ia_size != inode->i_size) {
+ loff_t oldsize = inode->i_size;
+
+ i_size_write(inode, attr->ia_size);
+ /*
+ * Blocks are going to be removed from the inode. Wait
+ * for dio in flight. Temporarily disable
+ * dioread_nolock to prevent livelock.
+ */
if (orphan) {
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
- ext4_inode_resume_unlocked_dio(inode);
+ if (!ext4_should_journal_data(inode)) {
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+ ext4_inode_resume_unlocked_dio(inode);
+ } else
+ ext4_wait_for_tail_page_commit(inode);
}
+ /*
+ * Truncate pagecache after we've waited for commit
+ * in data=journal mode to make pages freeable.
+ */
+ truncate_pagecache(inode, oldsize, inode->i_size);
}
ext4_truncate(inode);
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index cac448282331..f9ed946a448e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -722,7 +722,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
ext4_warning(dir->i_sb, "Node failed checksum");
brelse(bh);
*err = ERR_BAD_DX_DIR;
- goto fail;
+ goto fail2;
}
set_buffer_verified(bh);
@@ -2368,7 +2368,6 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
}
inode->i_size = EXT4_I(inode)->i_disksize = blocksize;
- dir_block = ext4_bread(handle, inode, 0, 1, &err);
if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
if (!err) {
err = -EIO;
@@ -2648,7 +2647,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
struct ext4_iloc iloc;
int err = 0;
- if (!EXT4_SB(inode->i_sb)->s_journal)
+ if ((!EXT4_SB(inode->i_sb)->s_journal) &&
+ !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
return 0;
mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3cdb0a2fc648..3d4fb81bacd5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1645,9 +1645,7 @@ static int parse_options(char *options, struct super_block *sb,
unsigned int *journal_ioprio,
int is_remount)
{
-#ifdef CONFIG_QUOTA
struct ext4_sb_info *sbi = EXT4_SB(sb);
-#endif
char *p;
substring_t args[MAX_OPT_ARGS];
int token;
@@ -1696,6 +1694,16 @@ static int parse_options(char *options, struct super_block *sb,
}
}
#endif
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ int blocksize =
+ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
+
+ if (blocksize < PAGE_CACHE_SIZE) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "dioread_nolock if block size != PAGE_SIZE");
+ return 0;
+ }
+ }
return 1;
}
@@ -2212,7 +2220,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
__func__, inode->i_ino, inode->i_size);
jbd_debug(2, "truncating inode %lu to %lld bytes\n",
inode->i_ino, inode->i_size);
+ mutex_lock(&inode->i_mutex);
ext4_truncate(inode);
+ mutex_unlock(&inode->i_mutex);
nr_truncates++;
} else {
ext4_msg(sb, KERN_DEBUG,
@@ -3223,6 +3233,10 @@ int ext4_calculate_overhead(struct super_block *sb)
memset(buf, 0, PAGE_SIZE);
cond_resched();
}
+ /* Add the journal blocks as well */
+ if (sbi->s_journal)
+ overhead += EXT4_B2C(sbi, sbi->s_journal->j_maxlen);
+
sbi->s_overhead = overhead;
smp_wmb();
free_page((unsigned long) buf);
@@ -3436,15 +3450,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
clear_opt(sb, DELALLOC);
}
- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- if (blocksize < PAGE_SIZE) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "dioread_nolock if block size != PAGE_SIZE");
- goto failed_mount;
- }
- }
-
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
@@ -3486,6 +3491,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
goto failed_mount;
+ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
@@ -4725,7 +4731,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
ext4_setup_system_zone(sb);
- if (sbi->s_journal == NULL)
+ if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
ext4_commit_super(sb, 1);
#ifdef CONFIG_QUOTA
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index fed74d193ffb..e95b94945d5f 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -82,7 +82,6 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
- acl->a_entries[i].e_id = ACL_UNDEFINED_ID;
entry = (struct f2fs_acl_entry *)((char *)entry +
sizeof(struct f2fs_acl_entry_short));
break;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 655aeabc1dd4..3aa5ce7cab83 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -16,6 +16,7 @@
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/prefetch.h>
#include "f2fs.h"
#include "node.h"
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index b4e24f32b54e..951ed52748f6 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
+#include "node.h"
#include "acl.h"
static unsigned long dir_blocks(struct inode *inode)
@@ -74,7 +75,7 @@ static unsigned long dir_block_index(unsigned int level, unsigned int idx)
return bidx;
}
-static bool early_match_name(const char *name, int namelen,
+static bool early_match_name(const char *name, size_t namelen,
f2fs_hash_t namehash, struct f2fs_dir_entry *de)
{
if (le16_to_cpu(de->name_len) != namelen)
@@ -87,7 +88,7 @@ static bool early_match_name(const char *name, int namelen,
}
static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
- const char *name, int namelen, int *max_slots,
+ const char *name, size_t namelen, int *max_slots,
f2fs_hash_t namehash, struct page **res_page)
{
struct f2fs_dir_entry *de;
@@ -126,7 +127,7 @@ found:
}
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
- unsigned int level, const char *name, int namelen,
+ unsigned int level, const char *name, size_t namelen,
f2fs_hash_t namehash, struct page **res_page)
{
int s = GET_DENTRY_SLOTS(namelen);
@@ -181,7 +182,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
struct qstr *child, struct page **res_page)
{
const char *name = child->name;
- int namelen = child->len;
+ size_t namelen = child->len;
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
f2fs_hash_t name_hash;
@@ -308,6 +309,7 @@ static int init_inode_metadata(struct inode *inode, struct dentry *dentry)
ipage = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
if (IS_ERR(ipage))
return PTR_ERR(ipage);
+ set_cold_node(inode, ipage);
init_dent_inode(dentry, ipage);
f2fs_put_page(ipage, 1);
}
@@ -381,7 +383,7 @@ int f2fs_add_link(struct dentry *dentry, struct inode *inode)
struct inode *dir = dentry->d_parent->d_inode;
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
const char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
+ size_t namelen = dentry->d_name.len;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
int slots = GET_DENTRY_SLOTS(namelen);
@@ -540,13 +542,13 @@ int f2fs_make_empty(struct inode *inode, struct inode *parent)
de = &dentry_blk->dentry[0];
de->name_len = cpu_to_le16(1);
- de->hash_code = 0;
+ de->hash_code = f2fs_dentry_hash(".", 1);
de->ino = cpu_to_le32(inode->i_ino);
memcpy(dentry_blk->filename[0], ".", 1);
set_de_type(de, inode);
de = &dentry_blk->dentry[1];
- de->hash_code = 0;
+ de->hash_code = f2fs_dentry_hash("..", 2);
de->name_len = cpu_to_le16(2);
de->ino = cpu_to_le32(parent->i_ino);
memcpy(dentry_blk->filename[1], "..", 2);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index a18d63db2fb6..13c6dfbb7183 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -881,7 +881,7 @@ int f2fs_sync_fs(struct super_block *, int);
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const char *, int);
+f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
/*
* node.c
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f9e085dfb1f0..7f9ea9271ebe 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -160,15 +160,17 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (need_to_sync_dir(sbi, inode))
need_cp = true;
- f2fs_write_inode(inode, NULL);
-
if (need_cp) {
/* all the dirty node pages should be flushed for POR */
ret = f2fs_sync_fs(inode->i_sb, 1);
clear_inode_flag(F2FS_I(inode), FI_NEED_CP);
} else {
- while (sync_node_pages(sbi, inode->i_ino, &wbc) == 0)
- f2fs_write_inode(inode, NULL);
+ /* if there is no written node page, write its inode page */
+ while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
+ ret = f2fs_write_inode(inode, NULL);
+ if (ret)
+ goto out;
+ }
filemap_fdatawait_range(sbi->node_inode->i_mapping,
0, LONG_MAX);
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 644aa3808273..b0ec721e984a 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -390,9 +390,7 @@ next_step:
}
err = check_valid_map(sbi, segno, off);
- if (err == GC_ERROR)
- return err;
- else if (err == GC_NEXT)
+ if (err == GC_NEXT)
continue;
if (initial) {
@@ -430,28 +428,22 @@ next_step:
*/
block_t start_bidx_of_node(unsigned int node_ofs)
{
- block_t start_bidx;
- unsigned int bidx, indirect_blks;
- int dec;
+ unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
+ unsigned int bidx;
- indirect_blks = 2 * NIDS_PER_BLOCK + 4;
+ if (node_ofs == 0)
+ return 0;
- start_bidx = 1;
- if (node_ofs == 0) {
- start_bidx = 0;
- } else if (node_ofs <= 2) {
+ if (node_ofs <= 2) {
bidx = node_ofs - 1;
} else if (node_ofs <= indirect_blks) {
- dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
+ int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 2 - dec;
} else {
- dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
+ int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 5 - dec;
}
-
- if (start_bidx)
- start_bidx = bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
- return start_bidx;
+ return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
}
static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -556,9 +548,7 @@ next_step:
}
err = check_valid_map(sbi, segno, off);
- if (err == GC_ERROR)
- goto stop;
- else if (err == GC_NEXT)
+ if (err == GC_NEXT)
continue;
if (phase == 0) {
@@ -568,9 +558,7 @@ next_step:
/* Get an inode by ino with checking validity */
err = check_dnode(sbi, entry, &dni, start_addr + off, &nofs);
- if (err == GC_ERROR)
- goto stop;
- else if (err == GC_NEXT)
+ if (err == GC_NEXT)
continue;
if (phase == 1) {
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index a60f04200f8b..6eb8d269b53b 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -42,7 +42,7 @@ static void TEA_transform(unsigned int buf[4], unsigned int const in[])
buf[1] += b1;
}
-static void str2hashbuf(const char *msg, int len, unsigned int *buf, int num)
+static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num)
{
unsigned pad, val;
int i;
@@ -69,13 +69,17 @@ static void str2hashbuf(const char *msg, int len, unsigned int *buf, int num)
*buf++ = pad;
}
-f2fs_hash_t f2fs_dentry_hash(const char *name, int len)
+f2fs_hash_t f2fs_dentry_hash(const char *name, size_t len)
{
- __u32 hash, minor_hash;
+ __u32 hash;
f2fs_hash_t f2fs_hash;
const char *p;
__u32 in[8], buf[4];
+ if ((len <= 2) && (name[0] == '.') &&
+ (name[1] == '.' || name[1] == '\0'))
+ return 0;
+
/* Initialize the default seed for the hash checksum functions */
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
@@ -83,15 +87,15 @@ f2fs_hash_t f2fs_dentry_hash(const char *name, int len)
buf[3] = 0x10325476;
p = name;
- while (len > 0) {
+ while (1) {
str2hashbuf(p, len, in, 4);
TEA_transform(buf, in);
- len -= 16;
p += 16;
+ if (len <= 16)
+ break;
+ len -= 16;
}
hash = buf[0];
- minor_hash = buf[1];
-
f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
return f2fs_hash;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index df5fb381ebf1..bf20b4d03214 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -203,6 +203,7 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
ri->i_generation = cpu_to_le32(inode->i_generation);
+ set_cold_node(inode, node_page);
set_page_dirty(node_page);
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 89b7675dc377..1a49b881bac0 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -77,8 +77,8 @@ fail:
static int is_multimedia_file(const unsigned char *s, const char *sub)
{
- int slen = strlen(s);
- int sublen = strlen(sub);
+ size_t slen = strlen(s);
+ size_t sublen = strlen(sub);
int ret;
if (sublen > slen)
@@ -123,6 +123,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
nid_t ino = 0;
int err;
+ f2fs_balance_fs(sbi);
+
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -144,8 +146,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (!sbi->por_doing)
d_instantiate(dentry, inode);
unlock_new_inode(inode);
-
- f2fs_balance_fs(sbi);
return 0;
out:
clear_nlink(inode);
@@ -163,6 +163,8 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err;
+ f2fs_balance_fs(sbi);
+
inode->i_ctime = CURRENT_TIME;
atomic_inc(&inode->i_count);
@@ -172,8 +174,6 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
goto out;
d_instantiate(dentry, inode);
-
- f2fs_balance_fs(sbi);
return 0;
out:
clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
@@ -223,6 +223,8 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
struct page *page;
int err = -ENOENT;
+ f2fs_balance_fs(sbi);
+
de = f2fs_find_entry(dir, &dentry->d_name, &page);
if (!de)
goto fail;
@@ -238,7 +240,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
/* In order to evict this inode, we set it dirty */
mark_inode_dirty(inode);
- f2fs_balance_fs(sbi);
fail:
return err;
}
@@ -249,9 +250,11 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct super_block *sb = dir->i_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
struct inode *inode;
- unsigned symlen = strlen(symname) + 1;
+ size_t symlen = strlen(symname) + 1;
int err;
+ f2fs_balance_fs(sbi);
+
inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -268,9 +271,6 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
-
- f2fs_balance_fs(sbi);
-
return err;
out:
clear_nlink(inode);
@@ -286,6 +286,8 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct inode *inode;
int err;
+ f2fs_balance_fs(sbi);
+
inode = f2fs_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -305,7 +307,6 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- f2fs_balance_fs(sbi);
return 0;
out_fail:
@@ -336,6 +337,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
if (!new_valid_dev(rdev))
return -EINVAL;
+ f2fs_balance_fs(sbi);
+
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -350,9 +353,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
alloc_nid_done(sbi, inode->i_ino);
d_instantiate(dentry, inode);
unlock_new_inode(inode);
-
- f2fs_balance_fs(sbi);
-
return 0;
out:
clear_nlink(inode);
@@ -376,6 +376,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *new_entry;
int err = -ENOENT;
+ f2fs_balance_fs(sbi);
+
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_entry)
goto out;
@@ -441,8 +443,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
mutex_unlock_op(sbi, RENAME);
-
- f2fs_balance_fs(sbi);
return 0;
out_dir:
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 19870361497e..5066bfd256c9 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -484,12 +484,14 @@ static void truncate_node(struct dnode_of_data *dn)
struct node_info ni;
get_node_info(sbi, dn->nid, &ni);
+ if (dn->inode->i_blocks == 0) {
+ BUG_ON(ni.blk_addr != NULL_ADDR);
+ goto invalidate;
+ }
BUG_ON(ni.blk_addr == NULL_ADDR);
- if (ni.blk_addr != NULL_ADDR)
- invalidate_blocks(sbi, ni.blk_addr);
-
/* Deallocate node address */
+ invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, dn->inode, 1);
set_node_addr(sbi, &ni, NULL_ADDR);
@@ -499,7 +501,7 @@ static void truncate_node(struct dnode_of_data *dn)
} else {
sync_inode_page(dn);
}
-
+invalidate:
clear_node_page_dirty(dn->node_page);
F2FS_SET_SB_DIRT(sbi);
@@ -768,20 +770,12 @@ int remove_inode_page(struct inode *inode)
dn.inode_page_locked = 1;
truncate_node(&dn);
}
- if (inode->i_blocks == 1) {
- /* inernally call f2fs_put_page() */
- set_new_dnode(&dn, inode, page, page, ino);
- truncate_node(&dn);
- } else if (inode->i_blocks == 0) {
- struct node_info ni;
- get_node_info(sbi, inode->i_ino, &ni);
- /* called after f2fs_new_inode() is failed */
- BUG_ON(ni.blk_addr != NULL_ADDR);
- f2fs_put_page(page, 1);
- } else {
- BUG();
- }
+ /* 0 is possible, after f2fs_new_inode() is failed */
+ BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
+ set_new_dnode(&dn, inode, page, page, ino);
+ truncate_node(&dn);
+
mutex_unlock_op(sbi, NODE_TRUNC);
return 0;
}
@@ -834,17 +828,18 @@ struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
goto fail;
}
set_node_addr(sbi, &new_ni, NEW_ADDR);
+ set_cold_node(dn->inode, page);
dn->node_page = page;
sync_inode_page(dn);
set_page_dirty(page);
- set_cold_node(dn->inode, page);
if (ofs == 0)
inc_valid_inode_count(sbi);
return page;
fail:
+ clear_node_page_dirty(page);
f2fs_put_page(page, 1);
return ERR_PTR(err);
}
@@ -1093,7 +1088,6 @@ static int f2fs_write_node_page(struct page *page,
{
struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
nid_t nid;
- unsigned int nofs;
block_t new_addr;
struct node_info ni;
@@ -1110,7 +1104,6 @@ static int f2fs_write_node_page(struct page *page,
/* get old block addr of this node page */
nid = nid_of_node(page);
- nofs = ofs_of_node(page);
BUG_ON(page->index != nid);
get_node_info(sbi, nid, &ni);
@@ -1571,7 +1564,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
nid_t nid;
struct f2fs_nat_entry raw_ne;
int offset = -1;
- block_t old_blkaddr, new_blkaddr;
+ block_t new_blkaddr;
ne = list_entry(cur, struct nat_entry, list);
nid = nat_get_nid(ne);
@@ -1585,7 +1578,6 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
if (offset >= 0) {
raw_ne = nat_in_journal(sum, offset);
- old_blkaddr = le32_to_cpu(raw_ne.block_addr);
goto flush_now;
}
to_nat_page:
@@ -1607,7 +1599,6 @@ to_nat_page:
BUG_ON(!nat_blk);
raw_ne = nat_blk->entries[nid - start_nid];
- old_blkaddr = le32_to_cpu(raw_ne.block_addr);
flush_now:
new_blkaddr = nat_get_blkaddr(ne);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b07e9b6ef376..b571fee677d5 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -144,14 +144,15 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
goto out;
}
- INIT_LIST_HEAD(&entry->list);
- list_add_tail(&entry->list, head);
-
entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
if (IS_ERR(entry->inode)) {
err = PTR_ERR(entry->inode);
+ kmem_cache_free(fsync_entry_slab, entry);
goto out;
}
+
+ INIT_LIST_HEAD(&entry->list);
+ list_add_tail(&entry->list, head);
entry->blkaddr = blkaddr;
}
if (IS_INODE(page)) {
@@ -228,6 +229,9 @@ static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
/* Deallocate previous index in the node page */
inode = f2fs_iget_nowait(sbi->sb, ino);
+ if (IS_ERR(inode))
+ return;
+
truncate_hole(inode, bidx, bidx + 1);
iput(inode);
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1b26e4ea1016..de6240922b0a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -12,54 +12,23 @@
#include <linux/f2fs_fs.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/prefetch.h>
#include <linux/vmalloc.h>
#include "f2fs.h"
#include "segment.h"
#include "node.h"
-static int need_to_flush(struct f2fs_sb_info *sbi)
-{
- unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) *
- sbi->segs_per_sec;
- int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
- int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
-
- if (sbi->por_doing)
- return 0;
-
- if (free_sections(sbi) <= (node_secs + 2 * dent_secs +
- reserved_sections(sbi)))
- return 1;
- return 0;
-}
-
/*
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi)
{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = LONG_MAX,
- .for_reclaim = 0,
- };
-
- if (sbi->por_doing)
- return;
-
/*
- * We should do checkpoint when there are so many dirty node pages
- * with enough free segments. After then, we should do GC.
+ * We should do GC or end up with checkpoint, if there are so many dirty
+ * dir/node pages without enough free segments.
*/
- if (need_to_flush(sbi)) {
- sync_dirty_dir_inodes(sbi);
- sync_node_pages(sbi, 0, &wbc);
- }
-
if (has_not_enough_free_secs(sbi)) {
mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi, 1);
@@ -631,7 +600,6 @@ static void f2fs_end_io_write(struct bio *bio, int err)
if (page->mapping)
set_bit(AS_EIO, &page->mapping->flags);
set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
- set_page_dirty(page);
}
end_page_writeback(page);
dec_page_count(p->sbi, F2FS_WRITEBACK);
@@ -791,11 +759,10 @@ static int __get_segment_type(struct page *page, enum page_type p_type)
return __get_segment_type_2(page, p_type);
case 4:
return __get_segment_type_4(page, p_type);
- case 6:
- return __get_segment_type_6(page, p_type);
- default:
- BUG();
}
+ /* NR_CURSEG_TYPE(6) logs by default */
+ BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
+ return __get_segment_type_6(page, p_type);
}
static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
@@ -1608,7 +1575,6 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_DIRTY_TYPE; i++) {
dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
- dirty_i->nr_dirty[i] = 0;
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 0948405af6f5..66a288a52fd3 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -459,7 +459,20 @@ static inline int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi)
{
- return free_sections(sbi) <= reserved_sections(sbi);
+ unsigned int pages_per_sec = (1 << sbi->log_blocks_per_seg) *
+ sbi->segs_per_sec;
+ int node_secs = ((get_pages(sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1)
+ >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ int dent_secs = ((get_pages(sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1)
+ >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+
+ if (sbi->por_doing)
+ return false;
+
+ if (free_sections(sbi) <= (node_secs + 2 * dent_secs +
+ reserved_sections(sbi)))
+ return true;
+ return false;
}
static inline int utilization(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 13867322cf5a..08a94c814bdc 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -119,7 +119,6 @@ static void f2fs_put_super(struct super_block *sb)
int f2fs_sync_fs(struct super_block *sb, int sync)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
- int ret = 0;
if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
return 0;
@@ -127,7 +126,7 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (sync)
write_checkpoint(sbi, false, false);
- return ret;
+ return 0;
}
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -148,8 +147,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
- buf->f_files = valid_inode_count(sbi);
- buf->f_ffree = sbi->total_node_count - valid_node_count(sbi);
+ buf->f_files = sbi->total_node_count;
+ buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
buf->f_namelen = F2FS_MAX_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
@@ -302,7 +301,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
case Opt_active_logs:
if (args->from && match_int(args, &arg))
return -EINVAL;
- if (arg != 2 && arg != 4 && arg != 6)
+ if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
return -EINVAL;
sbi->active_logs = arg;
break;
@@ -528,8 +527,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
/* if there are nt orphan nodes free them */
err = -EINVAL;
- if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
- recover_orphan_inodes(sbi))
+ if (recover_orphan_inodes(sbi))
goto free_node_inode;
/* read root inode and dentry */
@@ -548,8 +546,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
}
/* recover fsynced data */
- if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) &&
- !test_opt(sbi, DISABLE_ROLL_FORWARD))
+ if (!test_opt(sbi, DISABLE_ROLL_FORWARD))
recover_fsync_data(sbi);
/* After POR, we can run background GC thread */
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 7d52e8dc0c59..940136a3d3a6 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -208,7 +208,7 @@ int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
struct page *page;
void *base_addr;
int error = 0, found = 0;
- int value_len, name_len;
+ size_t value_len, name_len;
if (name == NULL)
return -EINVAL;
@@ -304,7 +304,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
struct f2fs_xattr_entry *here, *last;
struct page *page;
void *base_addr;
- int error, found, free, name_len, newsize;
+ int error, found, free, newsize;
+ size_t name_len;
char *pval;
if (name == NULL)
diff --git a/fs/file.c b/fs/file.c
index 15cb8618e95d..2b3570b7caeb 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -490,7 +490,7 @@ void exit_files(struct task_struct *tsk)
}
}
-static void __devinit fdtable_defer_list_init(int cpu)
+static void fdtable_defer_list_init(int cpu)
{
struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
spin_lock_init(&fddef->lock);
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 8dad6b093716..b906ed17a839 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -241,6 +241,7 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
static void gfs2_reverse_hex(char *c, u64 value)
{
+ *c = '0';
while (value) {
*c-- = hex_asc[value & 0x0f];
value >>= 4;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 37ee061d899e..b7eff078fe90 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -350,10 +350,14 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
BUG_ON(len < chunk_size);
len -= chunk_size;
block = gfs2_rbm_to_block(&rbm);
- gfs2_rbm_from_block(&rbm, block + chunk_size);
- n_unaligned = 3;
- if (ptr)
+ if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
+ n_unaligned = 0;
break;
+ }
+ if (ptr) {
+ n_unaligned = 3;
+ break;
+ }
n_unaligned = len & 3;
}
@@ -557,22 +561,20 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd)
*/
int gfs2_rs_alloc(struct gfs2_inode *ip)
{
- struct gfs2_blkreserv *res;
+ int error = 0;
+ down_write(&ip->i_rw_mutex);
if (ip->i_res)
- return 0;
-
- res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
- if (!res)
- return -ENOMEM;
+ goto out;
- RB_CLEAR_NODE(&res->rs_node);
+ ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
+ if (!ip->i_res) {
+ error = -ENOMEM;
+ goto out;
+ }
- down_write(&ip->i_rw_mutex);
- if (ip->i_res)
- kmem_cache_free(gfs2_rsrv_cachep, res);
- else
- ip->i_res = res;
+ RB_CLEAR_NODE(&ip->i_res->rs_node);
+out:
up_write(&ip->i_rw_mutex);
return 0;
}
@@ -1424,6 +1426,9 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
rs->rs_free = extlen;
rs->rs_inum = ip->i_no_addr;
rs_insert(ip);
+ } else {
+ if (goal == rgd->rd_last_alloc + rgd->rd_data0)
+ rgd->rd_last_alloc = 0;
}
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 42f6615af0ac..df9f29760efa 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -209,7 +209,8 @@ repeat:
if (!new_transaction)
goto alloc_transaction;
write_lock(&journal->j_state_lock);
- if (!journal->j_running_transaction) {
+ if (!journal->j_running_transaction &&
+ !journal->j_barrier_count) {
jbd2_get_transaction(journal, new_transaction);
new_transaction = NULL;
}
@@ -1839,7 +1840,6 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
BUFFER_TRACE(bh, "entry");
-retry:
/*
* It is safe to proceed here without the j_list_lock because the
* buffers cannot be stolen by try_to_free_buffers as long as we are
@@ -1934,14 +1934,11 @@ retry:
* for commit and try again.
*/
if (partial_page) {
- tid_t tid = journal->j_committing_transaction->t_tid;
-
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
write_unlock(&journal->j_state_lock);
- jbd2_log_wait_commit(journal, tid);
- goto retry;
+ return -EBUSY;
}
/*
* OK, buffer won't be reachable after truncate. We just set
@@ -2002,21 +1999,23 @@ zap_buffer_unlocked:
* @page: page to flush
* @offset: length of page to invalidate.
*
- * Reap page buffers containing data after offset in page.
- *
+ * Reap page buffers containing data after offset in page. Can return -EBUSY
+ * if buffers are part of the committing transaction and the page is straddling
+ * i_size. Caller then has to wait for current commit and try again.
*/
-void jbd2_journal_invalidatepage(journal_t *journal,
- struct page *page,
- unsigned long offset)
+int jbd2_journal_invalidatepage(journal_t *journal,
+ struct page *page,
+ unsigned long offset)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
int may_free = 1;
+ int ret = 0;
if (!PageLocked(page))
BUG();
if (!page_has_buffers(page))
- return;
+ return 0;
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
@@ -2030,9 +2029,11 @@ void jbd2_journal_invalidatepage(journal_t *journal,
if (offset <= curr_off) {
/* This block is wholly outside the truncation point */
lock_buffer(bh);
- may_free &= journal_unmap_buffer(journal, bh,
- offset > 0);
+ ret = journal_unmap_buffer(journal, bh, offset > 0);
unlock_buffer(bh);
+ if (ret < 0)
+ return ret;
+ may_free &= ret;
}
curr_off = next_off;
bh = next;
@@ -2043,6 +2044,7 @@ void jbd2_journal_invalidatepage(journal_t *journal,
if (may_free && try_to_free_buffers(page))
J_ASSERT(!page_has_buffers(page));
}
+ return 0;
}
/*
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index c89b26bc9759..264d1aa935f2 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -206,7 +206,7 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
list_for_each_entry(lo, &server->layouts, plh_layouts) {
ino = igrab(lo->plh_inode);
- if (ino)
+ if (!ino)
continue;
spin_lock(&ino->i_lock);
/* Is this layout in the process of being freed? */
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 32e6c53520e2..1b2d7eb93796 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2153,12 +2153,16 @@ static int nfs_open_permission_mask(int openflags)
{
int mask = 0;
- if ((openflags & O_ACCMODE) != O_WRONLY)
- mask |= MAY_READ;
- if ((openflags & O_ACCMODE) != O_RDONLY)
- mask |= MAY_WRITE;
- if (openflags & __FMODE_EXEC)
- mask |= MAY_EXEC;
+ if (openflags & __FMODE_EXEC) {
+ /* ONLY check exec rights */
+ mask = MAY_EXEC;
+ } else {
+ if ((openflags & O_ACCMODE) != O_WRONLY)
+ mask |= MAY_READ;
+ if ((openflags & O_ACCMODE) != O_RDONLY)
+ mask |= MAY_WRITE;
+ }
+
return mask;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5d864fb36578..cf747ef86650 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1626,7 +1626,8 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
static int nfs4_opendata_access(struct rpc_cred *cred,
struct nfs4_opendata *opendata,
- struct nfs4_state *state, fmode_t fmode)
+ struct nfs4_state *state, fmode_t fmode,
+ int openflags)
{
struct nfs_access_entry cache;
u32 mask;
@@ -1638,11 +1639,14 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
mask = 0;
/* don't check MAY_WRITE - a newly created file may not have
- * write mode bits, but POSIX allows the creating process to write */
- if (fmode & FMODE_READ)
- mask |= MAY_READ;
- if (fmode & FMODE_EXEC)
- mask |= MAY_EXEC;
+ * write mode bits, but POSIX allows the creating process to write.
+ * use openflags to check for exec, because fmode won't
+ * always have FMODE_EXEC set when file open for exec. */
+ if (openflags & __FMODE_EXEC) {
+ /* ONLY check for exec rights */
+ mask = MAY_EXEC;
+ } else if (fmode & FMODE_READ)
+ mask = MAY_READ;
cache.cred = cred;
cache.jiffies = jiffies;
@@ -1896,7 +1900,7 @@ static int _nfs4_do_open(struct inode *dir,
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
- status = nfs4_opendata_access(cred, opendata, state, fmode);
+ status = nfs4_opendata_access(cred, opendata, state, fmode, flags);
if (status != 0)
goto err_opendata_put;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index e7165d915362..d00260b08103 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -254,7 +254,7 @@ static void
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
lo->plh_retry_timestamp = jiffies;
- if (test_and_set_bit(fail_bit, &lo->plh_flags))
+ if (!test_and_set_bit(fail_bit, &lo->plh_flags))
atomic_inc(&lo->plh_refcount);
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index b6bdb18e892c..a5e5d9899d56 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,12 +91,16 @@ void nfs_readdata_release(struct nfs_read_data *rdata)
put_nfs_open_context(rdata->args.context);
if (rdata->pages.pagevec != rdata->pages.page_array)
kfree(rdata->pages.pagevec);
- if (rdata != &read_header->rpc_data)
- kfree(rdata);
- else
+ if (rdata == &read_header->rpc_data) {
rdata->header = NULL;
+ rdata = NULL;
+ }
if (atomic_dec_and_test(&hdr->refcnt))
hdr->completion_ops->completion(hdr);
+ /* Note: we only free the rpc_task after callbacks are done.
+ * See the comment in rpc_free_task() for why
+ */
+ kfree(rdata);
}
EXPORT_SYMBOL_GPL(nfs_readdata_release);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index c25cadf8f8c4..2e7e8c878e5d 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1152,7 +1152,7 @@ static int nfs_get_option_str(substring_t args[], char **option)
{
kfree(*option);
*option = match_strdup(args);
- return !option;
+ return !*option;
}
static int nfs_get_option_ul(substring_t args[], unsigned long *option)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b673be31590e..c483cc50b82e 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -126,12 +126,16 @@ void nfs_writedata_release(struct nfs_write_data *wdata)
put_nfs_open_context(wdata->args.context);
if (wdata->pages.pagevec != wdata->pages.page_array)
kfree(wdata->pages.pagevec);
- if (wdata != &write_header->rpc_data)
- kfree(wdata);
- else
+ if (wdata == &write_header->rpc_data) {
wdata->header = NULL;
+ wdata = NULL;
+ }
if (atomic_dec_and_test(&hdr->refcnt))
hdr->completion_ops->completion(hdr);
+ /* Note: we only free the rpc_task after callbacks are done.
+ * See the comment in rpc_free_task() for why
+ */
+ kfree(wdata);
}
EXPORT_SYMBOL_GPL(nfs_writedata_release);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index e064f562b1f7..76ddae83daa5 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -352,18 +352,18 @@ retry:
if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
return -ENOMEM;
- spin_lock_bh(&proc_inum_lock);
+ spin_lock_irq(&proc_inum_lock);
error = ida_get_new(&proc_inum_ida, &i);
- spin_unlock_bh(&proc_inum_lock);
+ spin_unlock_irq(&proc_inum_lock);
if (error == -EAGAIN)
goto retry;
else if (error)
return error;
if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
- spin_lock_bh(&proc_inum_lock);
+ spin_lock_irq(&proc_inum_lock);
ida_remove(&proc_inum_ida, i);
- spin_unlock_bh(&proc_inum_lock);
+ spin_unlock_irq(&proc_inum_lock);
return -ENOSPC;
}
*inum = PROC_DYNAMIC_FIRST + i;
@@ -372,9 +372,10 @@ retry:
void proc_free_inum(unsigned int inum)
{
- spin_lock_bh(&proc_inum_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&proc_inum_lock, flags);
ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
- spin_unlock_bh(&proc_inum_lock);
+ spin_unlock_irqrestore(&proc_inum_lock, flags);
}
static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 448455b7fd91..ca5ce7f9f800 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1278,7 +1278,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
walk.mm = mm;
pol = get_vma_policy(task, vma, vma->vm_start);
- mpol_to_str(buffer, sizeof(buffer), pol, 0);
+ mpol_to_str(buffer, sizeof(buffer), pol);
mpol_cond_put(pol);
seq_printf(m, "%08lx %s", vma->vm_start, buffer);
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index f883e7e74305..7003e5266f25 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -291,9 +291,8 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
kfree(cxt->przs);
}
-static int __devinit ramoops_init_przs(struct device *dev,
- struct ramoops_context *cxt,
- phys_addr_t *paddr, size_t dump_mem_sz)
+static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+ phys_addr_t *paddr, size_t dump_mem_sz)
{
int err = -ENOMEM;
int i;
@@ -336,10 +335,9 @@ fail_prz:
return err;
}
-static int __devinit ramoops_init_prz(struct device *dev,
- struct ramoops_context *cxt,
- struct persistent_ram_zone **prz,
- phys_addr_t *paddr, size_t sz, u32 sig)
+static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
+ struct persistent_ram_zone **prz,
+ phys_addr_t *paddr, size_t sz, u32 sig)
{
if (!sz)
return 0;
@@ -367,7 +365,7 @@ static int __devinit ramoops_init_prz(struct device *dev,
return 0;
}
-static int __devinit ramoops_probe(struct platform_device *pdev)
+static int ramoops_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ramoops_platform_data *pdata = pdev->dev.platform_data;
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index eecd2a8a84dd..0306303be372 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -390,8 +390,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
return 0;
}
-static int __devinit persistent_ram_post_init(struct persistent_ram_zone *prz,
- u32 sig, int ecc_size)
+static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+ int ecc_size)
{
int ret;
@@ -443,9 +443,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
kfree(prz);
}
-struct persistent_ram_zone * __devinit persistent_ram_new(phys_addr_t start,
- size_t size, u32 sig,
- int ecc_size)
+struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ u32 sig, int ecc_size)
{
struct persistent_ram_zone *prz;
int ret = -ENOMEM;
diff --git a/fs/splice.c b/fs/splice.c
index 8890604e3fcd..6909d89d0da5 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -696,8 +696,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
return -EINVAL;
more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
- if (sd->len < sd->total_len)
+
+ if (sd->len < sd->total_len && pipe->nrbufs > 1)
more |= MSG_SENDPAGE_NOTLAST;
+
return file->f_op->sendpage(file, buf->page, buf->offset,
sd->len, &pos, more);
}