summaryrefslogtreecommitdiffstats
path: root/fs/afs/write.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2017-11-02 15:27:53 +0000
committerDavid Howells <dhowells@redhat.com>2017-11-13 15:38:21 +0000
commit13524ab3c6f41bcd257d28644414297bea8282b7 (patch)
tree9ac5ab8bfd44026bb13783cf540f4fdb57d72d44 /fs/afs/write.c
parent1cf7a1518aefa69ac6ba0c3f9206073e4221e3c8 (diff)
downloadlinux-13524ab3c6f41bcd257d28644414297bea8282b7.tar.gz
linux-13524ab3c6f41bcd257d28644414297bea8282b7.tar.bz2
linux-13524ab3c6f41bcd257d28644414297bea8282b7.zip
afs: Trace page dirty/clean
Add a trace event that logs the dirtying and cleaning of pages attached to AFS inodes. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs/afs/write.c')
-rw-r--r--fs/afs/write.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 6807277ef956..4472882f06df 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -18,19 +18,6 @@
#include "internal.h"
/*
- * We use page->private to hold the amount of the page that we've written to,
- * splitting the field into two parts. However, we need to represent a range
- * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
- */
-#if PAGE_SIZE > 32768
-#define AFS_PRIV_MAX 0xffffffff
-#define AFS_PRIV_SHIFT 32
-#else
-#define AFS_PRIV_MAX 0xffff
-#define AFS_PRIV_SHIFT 16
-#endif
-
-/*
* mark a page as having been made dirty and thus needing writeback
*/
int afs_set_page_dirty(struct page *page)
@@ -145,6 +132,8 @@ try_again:
priv = (unsigned long)t << AFS_PRIV_SHIFT;
priv |= f;
+ trace_afs_page_dirty(vnode, tracepoint_string("begin"),
+ page->index, priv);
SetPagePrivate(page);
set_page_private(page, priv);
_leave(" = 0");
@@ -386,6 +375,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
struct page *primary_page,
pgoff_t final_page)
{
+ struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct page *pages[8], *page;
unsigned long count, priv;
unsigned n, offset, to, f, t;
@@ -407,8 +397,13 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
priv = page_private(primary_page);
offset = priv & AFS_PRIV_MAX;
to = priv >> AFS_PRIV_SHIFT;
+ trace_afs_page_dirty(vnode, tracepoint_string("store"),
+ primary_page->index, priv);
WARN_ON(offset == to);
+ if (offset == to)
+ trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
+ primary_page->index, priv);
if (start >= final_page || to < PAGE_SIZE)
goto no_more;
@@ -452,6 +447,9 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
}
to = t;
+ trace_afs_page_dirty(vnode, tracepoint_string("store+"),
+ page->index, priv);
+
if (!clear_page_dirty_for_io(page))
BUG();
if (test_set_page_writeback(page))
@@ -657,6 +655,7 @@ int afs_writepages(struct address_space *mapping,
void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
{
struct pagevec pv;
+ unsigned long priv;
unsigned count, loop;
pgoff_t first = call->first, last = call->last;
@@ -676,6 +675,9 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
ASSERTCMP(pv.nr, ==, count);
for (loop = 0; loop < count; loop++) {
+ priv = page_private(pv.pages[loop]);
+ trace_afs_page_dirty(vnode, tracepoint_string("clear"),
+ pv.pages[loop]->index, priv);
set_page_private(pv.pages[loop], 0);
end_page_writeback(pv.pages[loop]);
}
@@ -783,6 +785,8 @@ int afs_page_mkwrite(struct vm_fault *vmf)
priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
priv |= 0; /* From */
+ trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
+ vmf->page->index, priv);
SetPagePrivate(vmf->page);
set_page_private(vmf->page, priv);
@@ -840,9 +844,13 @@ int afs_launder_page(struct page *page)
t = priv >> AFS_PRIV_SHIFT;
}
+ trace_afs_page_dirty(vnode, tracepoint_string("launder"),
+ page->index, priv);
ret = afs_store_data(mapping, page->index, page->index, t, f);
}
+ trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
+ page->index, priv);
set_page_private(page, 0);
ClearPagePrivate(page);