summaryrefslogtreecommitdiffstats
path: root/fs/netfs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2021-06-29 22:37:05 +0100
committerDavid Howells <dhowells@redhat.com>2022-03-18 09:29:05 +0000
commitbc899ee1c898e520574ff4d99356eb2e724a9265 (patch)
treea3a91c584d50d2e567341b4f9ba6fc0a9f40ec12 /fs/netfs
parenta5c9dc4451394b2854493944dcc0ff71af9705a3 (diff)
downloadlinux-stable-bc899ee1c898e520574ff4d99356eb2e724a9265.tar.gz
linux-stable-bc899ee1c898e520574ff4d99356eb2e724a9265.tar.bz2
linux-stable-bc899ee1c898e520574ff4d99356eb2e724a9265.zip
netfs: Add a netfs inode context
Add a netfs_i_context struct that should be included in the network filesystem's own inode struct wrapper, directly after the VFS's inode struct, e.g.: struct my_inode { struct { /* These must be contiguous */ struct inode vfs_inode; struct netfs_i_context netfs_ctx; }; }; The netfs_i_context struct so far contains a single field for the network filesystem to use - the cache cookie: struct netfs_i_context { ... struct fscache_cookie *cache; }; Three functions are provided to help with this: (1) void netfs_i_context_init(struct inode *inode, const struct netfs_request_ops *ops); Initialise the netfs context and set the operations. (2) struct netfs_i_context *netfs_i_context(struct inode *inode); Find the netfs context from the VFS inode. (3) struct inode *netfs_inode(struct netfs_i_context *ctx); Find the VFS inode from the netfs context. Changes ======= ver #4) - Fix netfs_is_cache_enabled() to check cookie->cache_priv to see if a cache is present[3]. - Fix netfs_skip_folio_read() to zero out all of the page, not just some of it[3]. ver #3) - Split out the bit to move ceph cap-getting on readahead into ceph_init_request()[1]. - Stick in a comment to the netfs inode structs indicating the contiguity requirements[2]. ver #2) - Adjust documentation to match. - Use "#if IS_ENABLED()" in netfs_i_cookie(), not "#ifdef". - Move the cap check from ceph_readahead() to ceph_init_request() to be called from netfslib. - Remove ceph_readahead() and use netfs_readahead() directly instead. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/8af0d47f17d89c06bbf602496dd845f2b0bf25b3.camel@kernel.org/ [1] Link: https://lore.kernel.org/r/beaf4f6a6c2575ed489adb14b257253c868f9a5c.camel@kernel.org/ [2] Link: https://lore.kernel.org/r/3536452.1647421585@warthog.procyon.org.uk/ [3] Link: https://lore.kernel.org/r/164622984545.3564931.15691742939278418580.stgit@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/164678213320.1200972.16807551936267647470.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/164692909854.2099075.9535537286264248057.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/306388.1647595110@warthog.procyon.org.uk/ # v4
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/internal.h18
-rw-r--r--fs/netfs/objects.c12
-rw-r--r--fs/netfs/read_helper.c100
-rw-r--r--fs/netfs/stats.c1
4 files changed, 67 insertions, 64 deletions
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 89837e904fa7..54c761bcc8e6 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -6,6 +6,7 @@
*/
#include <linux/netfs.h>
+#include <linux/fscache.h>
#include <trace/events/netfs.h>
#ifdef pr_fmt
@@ -19,8 +20,6 @@
*/
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct file *file,
- const struct netfs_request_ops *ops,
- void *netfs_priv,
loff_t start, size_t len,
enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
@@ -81,6 +80,21 @@ static inline void netfs_stat_d(atomic_t *stat)
#define netfs_stat_d(x) do {} while(0)
#endif
+/*
+ * Miscellaneous functions.
+ */
+static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cookie = ctx->cache;
+
+ return fscache_cookie_valid(cookie) && cookie->cache_priv &&
+ fscache_cookie_enabled(cookie);
+#else
+ return false;
+#endif
+}
+
/*****************************************************************************/
/*
* debug tracing
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index ae18827e156b..657b19e60118 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -13,12 +13,12 @@
*/
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct file *file,
- const struct netfs_request_ops *ops,
- void *netfs_priv,
loff_t start, size_t len,
enum netfs_io_origin origin)
{
static atomic_t debug_ids;
+ struct inode *inode = file ? file_inode(file) : mapping->host;
+ struct netfs_i_context *ctx = netfs_i_context(inode);
struct netfs_io_request *rreq;
int ret;
@@ -29,11 +29,10 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->start = start;
rreq->len = len;
rreq->origin = origin;
- rreq->netfs_ops = ops;
- rreq->netfs_priv = netfs_priv;
+ rreq->netfs_ops = ctx->ops;
rreq->mapping = mapping;
- rreq->inode = file_inode(file);
- rreq->i_size = i_size_read(rreq->inode);
+ rreq->inode = inode;
+ rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
INIT_LIST_HEAD(&rreq->subrequests);
INIT_WORK(&rreq->work, netfs_rreq_work);
@@ -76,6 +75,7 @@ static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
+
netfs_clear_subrequests(rreq, false);
if (rreq->netfs_priv)
rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index b5176f4320f4..c048cd328ce5 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -14,7 +14,6 @@
#include <linux/uio.h>
#include <linux/sched/mm.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/netfs.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/netfs.h>
@@ -735,8 +734,6 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
/**
* netfs_readahead - Helper to manage a read request
* @ractl: The description of the readahead request
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
*
* Fulfil a readahead request by drawing data from the cache if possible, or
* the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
@@ -744,35 +741,32 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
* readahead window can be expanded in either direction to a more convenient
* alighment for RPC efficiency or to make storage in the cache feasible.
*
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory. It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
-void netfs_readahead(struct readahead_control *ractl,
- const struct netfs_request_ops *ops,
- void *netfs_priv)
+void netfs_readahead(struct readahead_control *ractl)
{
struct netfs_io_request *rreq;
+ struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
unsigned int debug_index = 0;
int ret;
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
if (readahead_count(ractl) == 0)
- goto cleanup;
+ return;
rreq = netfs_alloc_request(ractl->mapping, ractl->file,
- ops, netfs_priv,
readahead_pos(ractl),
readahead_length(ractl),
NETFS_READAHEAD);
if (IS_ERR(rreq))
- goto cleanup;
+ return;
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
}
@@ -804,42 +798,35 @@ void netfs_readahead(struct readahead_control *ractl,
cleanup_free:
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
return;
-cleanup:
- if (netfs_priv)
- ops->cleanup(ractl->mapping, netfs_priv);
- return;
}
EXPORT_SYMBOL(netfs_readahead);
/**
* netfs_readpage - Helper to manage a readpage request
* @file: The file to read from
- * @folio: The folio to read
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
+ * @subpage: A subpage of the folio to read
*
* Fulfil a readpage request by drawing data from the cache if possible, or the
* netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests
* from different sources will get munged together.
*
- * The calling netfs must provide a table of operations, only one of which,
- * issue_op, is mandatory. It may also be passed a private token, which will
- * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
*
* This is usable whether or not caching is enabled.
*/
-int netfs_readpage(struct file *file,
- struct folio *folio,
- const struct netfs_request_ops *ops,
- void *netfs_priv)
+int netfs_readpage(struct file *file, struct page *subpage)
{
+ struct folio *folio = page_folio(subpage);
+ struct address_space *mapping = folio->mapping;
struct netfs_io_request *rreq;
+ struct netfs_i_context *ctx = netfs_i_context(mapping->host);
unsigned int debug_index = 0;
int ret;
_enter("%lx", folio_index(folio));
- rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv,
+ rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
NETFS_READPAGE);
if (IS_ERR(rreq)) {
@@ -847,8 +834,8 @@ int netfs_readpage(struct file *file,
goto alloc_error;
}
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
folio_unlock(folio);
goto out;
@@ -886,8 +873,6 @@ out:
netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
return ret;
alloc_error:
- if (netfs_priv)
- ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio);
return ret;
}
@@ -898,6 +883,7 @@ EXPORT_SYMBOL(netfs_readpage);
* @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
+ * @always_fill: T if the folio should always be completely filled/cleared
*
* In some cases, write_begin doesn't need to read at all:
* - full folio write
@@ -907,17 +893,27 @@ EXPORT_SYMBOL(netfs_readpage);
* If any of these criteria are met, then zero out the unwritten parts
* of the folio and return true. Otherwise, return false.
*/
-static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
+ bool always_fill)
{
struct inode *inode = folio_inode(folio);
loff_t i_size = i_size_read(inode);
size_t offset = offset_in_folio(folio, pos);
+ size_t plen = folio_size(folio);
+
+ if (unlikely(always_fill)) {
+ if (pos - offset + len <= i_size)
+ return false; /* Page entirely before EOF */
+ zero_user_segment(&folio->page, 0, plen);
+ folio_mark_uptodate(folio);
+ return true;
+ }
/* Full folio write */
- if (offset == 0 && len >= folio_size(folio))
+ if (offset == 0 && len >= plen)
return true;
- /* pos beyond last folio in the file */
+ /* Page entirely beyond the end of the file */
if (pos - offset >= i_size)
goto zero_out;
@@ -927,7 +923,7 @@ static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
return false;
zero_out:
- zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
+ zero_user_segments(&folio->page, 0, offset, offset + len, plen);
return true;
}
@@ -940,8 +936,6 @@ zero_out:
* @aop_flags: AOP_* flags
* @_folio: Where to put the resultant folio
* @_fsdata: Place for the netfs to store a cookie
- * @ops: The network filesystem's operations for the helper to use
- * @netfs_priv: Private netfs data to be retained in the request
*
* Pre-read data for a write-begin request by drawing data from the cache if
* possible, or the netfs if not. Space beyond the EOF is zero-filled.
@@ -960,17 +954,18 @@ zero_out:
* should go ahead; unlock the folio and return -EAGAIN to cause the folio to
* be regot; or return an error.
*
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
* This is usable whether or not caching is enabled.
*/
int netfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int aop_flags,
- struct folio **_folio, void **_fsdata,
- const struct netfs_request_ops *ops,
- void *netfs_priv)
+ struct folio **_folio, void **_fsdata)
{
struct netfs_io_request *rreq;
+ struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
struct folio *folio;
- struct inode *inode = file_inode(file);
unsigned int debug_index = 0, fgp_flags;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
@@ -986,9 +981,9 @@ retry:
if (!folio)
return -ENOMEM;
- if (ops->check_write_begin) {
+ if (ctx->ops->check_write_begin) {
/* Allow the netfs (eg. ceph) to flush conflicts. */
- ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
+ ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
if (ret < 0) {
trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
if (ret == -EAGAIN)
@@ -1004,13 +999,13 @@ retry:
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
- if (!ops->is_cache_enabled(inode) &&
- netfs_skip_folio_read(folio, pos, len)) {
+ if (!netfs_is_cache_enabled(ctx) &&
+ netfs_skip_folio_read(folio, pos, len, false)) {
netfs_stat(&netfs_n_rh_write_zskip);
goto have_folio_no_wait;
}
- rreq = netfs_alloc_request(mapping, file, ops, netfs_priv,
+ rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
NETFS_READ_FOR_WRITE);
if (IS_ERR(rreq)) {
@@ -1019,10 +1014,9 @@ retry:
}
rreq->no_unlock_folio = folio_index(folio);
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
- netfs_priv = NULL;
- if (ops->begin_cache_operation) {
- ret = ops->begin_cache_operation(rreq);
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto error_put;
}
@@ -1076,8 +1070,6 @@ have_folio:
if (ret < 0)
goto error;
have_folio_no_wait:
- if (netfs_priv)
- ops->cleanup(mapping, netfs_priv);
*_folio = folio;
_leave(" = 0");
return 0;
@@ -1087,8 +1079,6 @@ error_put:
error:
folio_unlock(folio);
folio_put(folio);
- if (netfs_priv)
- ops->cleanup(mapping, netfs_priv);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index 9ae538c85378..5510a7a14a40 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -7,7 +7,6 @@
#include <linux/export.h>
#include <linux/seq_file.h>
-#include <linux/netfs.h>
#include "internal.h"
atomic_t netfs_n_rh_readahead;