summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 13:50:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-03 13:50:22 -0700
commit5264406cdb66c7003eb3edf53c9773b1b20611b9 (patch)
treee94f76f64a0b3b45dcb9f9bec85cce2ba78e1221 /include/linux
parent200e340f2196d7fd427a5810d06e893b932f145a (diff)
parentdd45ab9dd28c82fc495d98cd9788666fd8d76b99 (diff)
downloadlinux-5264406cdb66c7003eb3edf53c9773b1b20611b9.tar.gz
linux-5264406cdb66c7003eb3edf53c9773b1b20611b9.tar.bz2
linux-5264406cdb66c7003eb3edf53c9773b1b20611b9.zip
Merge tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs iov_iter updates from Al Viro: "Part 1 - isolated cleanups and optimizations. One of the goals is to reduce the overhead of using ->read_iter() and ->write_iter() instead of ->read()/->write(). new_sync_{read,write}() has a surprising amount of overhead, in particular inside iocb_flags(). That's the explanation for the beginning of the series is in this pile; it's not directly iov_iter-related, but it's a part of the same work..." * tag 'pull-work.iov_iter-base' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: first_iovec_segment(): just return address iov_iter: massage calling conventions for first_{iovec,bvec}_segment() iov_iter: first_{iovec,bvec}_segment() - simplify a bit iov_iter: lift dealing with maxpages out of first_{iovec,bvec}_segment() iov_iter_get_pages{,_alloc}(): cap the maxsize with MAX_RW_COUNT iov_iter_bvec_advance(): don't bother with bvec_iter copy_page_{to,from}_iter(): switch iovec variants to generic keep iocb_flags() result cached in struct file iocb: delay evaluation of IS_SYNC(...) until we want to check IOCB_DSYNC struct file: use anonymous union member for rcuhead and llist btrfs: use IOMAP_DIO_NOSYNC teach iomap_dio_rw() to suppress dsync No need of likely/unlikely on calls of check_copy_size()
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/fs.h21
-rw-r--r--include/linux/iomap.h6
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/uio.h15
4 files changed, 27 insertions, 19 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 76f09f3ee4a5..cc64873d76c5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -943,9 +943,10 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
struct file {
union {
- struct llist_node fu_llist;
- struct rcu_head fu_rcuhead;
- } f_u;
+ struct llist_node f_llist;
+ struct rcu_head f_rcuhead;
+ unsigned int f_iocb_flags;
+ };
struct path f_path;
struct inode *f_inode; /* cached value */
const struct file_operations *f_op;
@@ -2328,13 +2329,11 @@ static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
!vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode));
}
-static inline int iocb_flags(struct file *file);
-
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
- .ki_flags = iocb_flags(filp),
+ .ki_flags = filp->f_iocb_flags,
.ki_ioprio = get_current_ioprio(),
};
}
@@ -2850,6 +2849,12 @@ extern int vfs_fsync(struct file *file, int datasync);
extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
unsigned int flags);
+static inline bool iocb_is_dsync(const struct kiocb *iocb)
+{
+ return (iocb->ki_flags & IOCB_DSYNC) ||
+ IS_SYNC(iocb->ki_filp->f_mapping->host);
+}
+
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
@@ -2857,7 +2862,7 @@ extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
*/
static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
- if (iocb->ki_flags & IOCB_DSYNC) {
+ if (iocb_is_dsync(iocb)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
@@ -3380,7 +3385,7 @@ static inline int iocb_flags(struct file *file)
res |= IOCB_APPEND;
if (file->f_flags & O_DIRECT)
res |= IOCB_DIRECT;
- if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ if (file->f_flags & O_DSYNC)
res |= IOCB_DSYNC;
if (file->f_flags & __O_SYNC)
res |= IOCB_SYNC;
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 758a1125e72f..25ac28175e4f 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -347,6 +347,12 @@ struct iomap_dio_ops {
*/
#define IOMAP_DIO_PARTIAL (1 << 2)
+/*
+ * The caller will sync the write if needed; do not sync it within
+ * iomap_dio_rw. Overrides IOMAP_DIO_FORCE_WAIT.
+ */
+#define IOMAP_DIO_NOSYNC (1 << 3)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5a328cf02b75..47e5d374c7eb 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -148,7 +148,7 @@ _copy_to_user(void __user *, const void *, unsigned long);
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(check_copy_size(to, n, false)))
+ if (check_copy_size(to, n, false))
n = _copy_from_user(to, from, n);
return n;
}
@@ -156,7 +156,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (likely(check_copy_size(from, n, true)))
+ if (check_copy_size(from, n, true))
n = _copy_to_user(to, from, n);
return n;
}
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 34ba4a731179..9a2dc496d535 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -156,19 +156,17 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, true))
return _copy_to_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check
@@ -184,10 +182,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check
size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
+ if (check_copy_size(addr, bytes, false))
return _copy_from_iter_nocache(addr, bytes, i);
+ return 0;
}
static __always_inline __must_check