summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-09 09:54:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-09 09:54:04 -0800
commitfa82dcbf2aed65dc3ea78eaca9ea56fd926b2b10 (patch)
treef3ae8794c5456468a5e25ca6bb6af18a688d3749 /fs
parentbd799eb63db4c61a5f2dc941672391fbca5bcab4 (diff)
parent27359fd6e5f3c5db8fe544b63238b6170e8806d8 (diff)
downloadlinux-fa82dcbf2aed65dc3ea78eaca9ea56fd926b2b10.tar.gz
linux-fa82dcbf2aed65dc3ea78eaca9ea56fd926b2b10.tar.bz2
linux-fa82dcbf2aed65dc3ea78eaca9ea56fd926b2b10.zip
Merge tag 'dax-fixes-4.20-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull dax fixes from Dan Williams: "The last of the known regression fixes and fallout from the Xarray conversion of the filesystem-dax implementation. On the path to debugging why the dax memory-failure injection test started failing after the Xarray conversion a couple more fixes for the dax_lock_mapping_entry(), now called dax_lock_page(), surfaced. Those plus the bug that started the hunt are now addressed. These patches have appeared in a -next release with no issues reported. Note the touches to mm/memory-failure.c are just the conversion to the new function signature for dax_lock_page(). Summary: - Fix the Xarray conversion of fsdax to properly handle dax_lock_mapping_entry() in the presense of pmd entries - Fix inode destruction racing a new lock request" * tag 'dax-fixes-4.20-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: dax: Fix unlock mismatch with updated API dax: Don't access a freed inode dax: Check page->mapping isn't NULL
Diffstat (limited to 'fs')
-rw-r--r--fs/dax.c55
1 files changed, 38 insertions, 17 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 9bcce89ea18e..48132eca3761 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas)
}
}
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+
+ /*
+ * Entry lock waits are exclusive. Wake up the next waiter since
+ * we aren't sure we will acquire the entry lock and thus wake
+ * the next waiter up on unlock.
+ */
+ if (waitqueue_active(wq))
+ __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
+}
+
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
@@ -351,21 +379,21 @@ static struct page *dax_busy_page(void *entry)
* @page: The page whose entry we want to lock
*
* Context: Process context.
- * Return: %true if the entry was locked or does not need to be locked.
+ * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * not be locked.
*/
-bool dax_lock_mapping_entry(struct page *page)
+dax_entry_t dax_lock_page(struct page *page)
{
XA_STATE(xas, NULL, 0);
void *entry;
- bool locked;
/* Ensure page->mapping isn't freed while we look at it */
rcu_read_lock();
for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping);
- locked = false;
- if (!dax_mapping(mapping))
+ entry = NULL;
+ if (!mapping || !dax_mapping(mapping))
break;
/*
@@ -375,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
- locked = true;
+ entry = (void *)~0UL;
if (S_ISCHR(mapping->host->i_mode))
break;
@@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page)
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
rcu_read_unlock();
- entry = get_unlocked_entry(&xas);
- xas_unlock_irq(&xas);
- put_unlocked_entry(&xas, entry);
+ wait_entry_unlocked(&xas, entry);
rcu_read_lock();
continue;
}
@@ -400,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page)
break;
}
rcu_read_unlock();
- return locked;
+ return (dax_entry_t)entry;
}
-void dax_unlock_mapping_entry(struct page *page)
+void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index);
- void *entry;
if (S_ISCHR(mapping->host->i_mode))
return;
- rcu_read_lock();
- entry = xas_load(&xas);
- rcu_read_unlock();
- entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
- dax_unlock_entry(&xas, entry);
+ dax_unlock_entry(&xas, (void *)cookie);
}
/*