diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-10 10:25:57 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-10 10:25:57 -0700 |
commit | 9f3a0941fb5efaa4d27911e251dc595034d58baa (patch) | |
tree | 7212d9872b41b73a0b3c4f8c991039b639add212 /drivers/dax | |
parent | fbe173e3ffbd897b5a859020d714c0eaf4af2a1a (diff) | |
parent | e13e75b86ef2f88e3a47d672dd4c52a293efb95b (diff) | |
download | linux-9f3a0941fb5efaa4d27911e251dc595034d58baa.tar.gz linux-9f3a0941fb5efaa4d27911e251dc595034d58baa.tar.bz2 linux-9f3a0941fb5efaa4d27911e251dc595034d58baa.zip |
Merge tag 'libnvdimm-for-4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams:
"This cycle was was not something I ever want to repeat as there were
several late changes that have only now just settled.
Half of the branch up to commit d2c997c0f145 ("fs, dax: use
page->mapping to warn...") have been in -next for several releases.
The of_pmem driver and the address range scrub rework were late
arrivals, and the dax work was scaled back at the last moment.
The of_pmem driver missed a previous merge window due to an oversight.
A sense of obligation to rectify that miss is why it is included for
4.17. It has acks from PowerPC folks. Stephen reported a build failure
that only occurs when merging it with your latest tree, for now I have
fixed that up by disabling modular builds of of_pmem. A test merge
with your tree has received a build success report from the 0day robot
over 156 configs.
An initial version of the ARS rework was submitted before the merge
window. It is self contained to libnvdimm, a net code reduction, and
passing all unit tests.
The filesystem-dax changes are based on the wait_var_event()
functionality from tip/sched/core. However, late review feedback
showed that those changes regressed truncate performance to a large
degree. The branch was rewound to drop the truncate behavior change
and now only includes preparation patches and cleanups (with full acks
and reviews). The finalization of this dax-dma-vs-trnucate work will
need to wait for 4.18.
Summary:
- A rework of the filesytem-dax implementation provides for detection
of unmap operations (truncate / hole punch) colliding with
in-progress device-DMA. A fix for these collisions remains a
work-in-progress pending resolution of truncate latency and
starvation regressions.
- The of_pmem driver expands the users of libnvdimm outside of x86
and ACPI to describe an implementation of persistent memory on
PowerPC with Open Firmware / Device tree.
- Address Range Scrub (ARS) handling is completely rewritten to
account for the fact that ARS may run for 100s of seconds and there
is no platform defined way to cancel it. ARS will now no longer
block namespace initialization.
- The NVDIMM Namespace Label implementation is updated to handle
label areas as small as 1K, down from 128K.
- Miscellaneous cleanups and updates to unit test infrastructure"
* tag 'libnvdimm-for-4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (39 commits)
libnvdimm, of_pmem: workaround OF_NUMA=n build error
nfit, address-range-scrub: add module option to skip initial ars
nfit, address-range-scrub: rework and simplify ARS state machine
nfit, address-range-scrub: determine one platform max_ars value
powerpc/powernv: Create platform devs for nvdimm buses
doc/devicetree: Persistent memory region bindings
libnvdimm: Add device-tree based driver
libnvdimm: Add of_node to region and bus descriptors
libnvdimm, region: quiet region probe
libnvdimm, namespace: use a safe lookup for dimm device name
libnvdimm, dimm: fix dpa reservation vs uninitialized label area
libnvdimm, testing: update the default smart ctrl_temperature
libnvdimm, testing: Add emulation for smart injection commands
nfit, address-range-scrub: introduce nfit_spa->ars_state
libnvdimm: add an api to cast a 'struct nd_region' to its 'struct device'
nfit, address-range-scrub: fix scrub in-progress reporting
dax, dm: allow device-mapper to operate without dax support
dax: introduce CONFIG_DAX_DRIVER
fs, dax: use page->mapping to warn if truncate collides with a busy page
ext2, dax: introduce ext2_dax_aops
...
Diffstat (limited to 'drivers/dax')
-rw-r--r-- | drivers/dax/Kconfig | 5 | ||||
-rw-r--r-- | drivers/dax/device.c | 38 | ||||
-rw-r--r-- | drivers/dax/pmem.c | 18 | ||||
-rw-r--r-- | drivers/dax/super.c | 15 |
4 files changed, 37 insertions, 39 deletions
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index b79aa8f7a497..e0700bf4893a 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -1,3 +1,7 @@ +config DAX_DRIVER + select DAX + bool + menuconfig DAX tristate "DAX: direct access to differentiated memory" select SRCU @@ -16,7 +20,6 @@ config DEV_DAX baseline memory pool. Mappings of a /dev/daxX.Y device impose restrictions that make the mapping behavior deterministic. - config DEV_DAX_PMEM tristate "PMEM DAX: direct access to persistent memory" depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 0b61f48f21a6..be8606457f27 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -257,8 +257,8 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) dax_region = dev_dax->region; if (dax_region->align > PAGE_SIZE) { - dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", - __func__, dax_region->align, fault_size); + dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", + dax_region->align, fault_size); return VM_FAULT_SIGBUS; } @@ -267,8 +267,7 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, - vmf->pgoff); + dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); return VM_FAULT_SIGBUS; } @@ -299,14 +298,14 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) dax_region = dev_dax->region; if (dax_region->align > PMD_SIZE) { - dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", - __func__, dax_region->align, fault_size); + dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", + dax_region->align, fault_size); return VM_FAULT_SIGBUS; } /* dax pmd mappings require pfn_t_devmap() */ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { - dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); + dev_dbg(dev, "region lacks devmap flags\n"); return VM_FAULT_SIGBUS; } @@ -323,8 +322,7 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) pgoff = linear_page_index(vmf->vma, pmd_addr); phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, - pgoff); + dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); return VM_FAULT_SIGBUS; } @@ -351,14 +349,14 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) dax_region = dev_dax->region; if (dax_region->align > PUD_SIZE) { - dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n", - __func__, dax_region->align, fault_size); + dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", + dax_region->align, fault_size); return VM_FAULT_SIGBUS; } /* dax pud mappings require pfn_t_devmap() */ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { - dev_dbg(dev, "%s: region lacks devmap flags\n", __func__); + dev_dbg(dev, "region lacks devmap flags\n"); return VM_FAULT_SIGBUS; } @@ -375,8 +373,7 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) pgoff = linear_page_index(vmf->vma, pud_addr); phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); if (phys == -1) { - dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, - pgoff); + dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); return VM_FAULT_SIGBUS; } @@ -399,9 +396,8 @@ static int dev_dax_huge_fault(struct vm_fault *vmf, struct file *filp = vmf->vma->vm_file; struct dev_dax *dev_dax = filp->private_data; - dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__, - current->comm, (vmf->flags & FAULT_FLAG_WRITE) - ? "write" : "read", + dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, + (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", vmf->vma->vm_start, vmf->vma->vm_end, pe_size); id = dax_read_lock(); @@ -460,7 +456,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma) struct dev_dax *dev_dax = filp->private_data; int rc, id; - dev_dbg(&dev_dax->dev, "%s\n", __func__); + dev_dbg(&dev_dax->dev, "trace\n"); /* * We lock to check dax_dev liveness and will re-check at @@ -518,7 +514,7 @@ static int dax_open(struct inode *inode, struct file *filp) struct inode *__dax_inode = dax_inode(dax_dev); struct dev_dax *dev_dax = dax_get_private(dax_dev); - dev_dbg(&dev_dax->dev, "%s\n", __func__); + dev_dbg(&dev_dax->dev, "trace\n"); inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping->host = __dax_inode; filp->f_mapping = inode->i_mapping; @@ -533,7 +529,7 @@ static int dax_release(struct inode *inode, struct file *filp) { struct dev_dax *dev_dax = filp->private_data; - dev_dbg(&dev_dax->dev, "%s\n", __func__); + dev_dbg(&dev_dax->dev, "trace\n"); return 0; } @@ -575,7 +571,7 @@ static void unregister_dev_dax(void *dev) struct inode *inode = dax_inode(dax_dev); struct cdev *cdev = inode->i_cdev; - dev_dbg(dev, "%s\n", __func__); + dev_dbg(dev, "trace\n"); kill_dev_dax(dev_dax); cdev_device_del(cdev, dev); diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 31b6ecce4c64..fd49b24fd6af 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -34,7 +34,7 @@ static void dax_pmem_percpu_release(struct percpu_ref *ref) { struct dax_pmem *dax_pmem = to_dax_pmem(ref); - dev_dbg(dax_pmem->dev, "%s\n", __func__); + dev_dbg(dax_pmem->dev, "trace\n"); complete(&dax_pmem->cmp); } @@ -43,7 +43,7 @@ static void dax_pmem_percpu_exit(void *data) struct percpu_ref *ref = data; struct dax_pmem *dax_pmem = to_dax_pmem(ref); - dev_dbg(dax_pmem->dev, "%s\n", __func__); + dev_dbg(dax_pmem->dev, "trace\n"); wait_for_completion(&dax_pmem->cmp); percpu_ref_exit(ref); } @@ -53,7 +53,7 @@ static void dax_pmem_percpu_kill(void *data) struct percpu_ref *ref = data; struct dax_pmem *dax_pmem = to_dax_pmem(ref); - dev_dbg(dax_pmem->dev, "%s\n", __func__); + dev_dbg(dax_pmem->dev, "trace\n"); percpu_ref_kill(ref); } @@ -150,17 +150,7 @@ static struct nd_device_driver dax_pmem_driver = { .type = ND_DRIVER_DAX_PMEM, }; -static int __init dax_pmem_init(void) -{ - return nd_driver_register(&dax_pmem_driver); -} -module_init(dax_pmem_init); - -static void __exit dax_pmem_exit(void) -{ - driver_unregister(&dax_pmem_driver.drv); -} -module_exit(dax_pmem_exit); +module_nd_driver(dax_pmem_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index ecdc292aa4e4..2b2332b605e4 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -124,10 +124,19 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize) return len < 0 ? len : -EIO; } - if ((IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) - || pfn_t_devmap(pfn)) + if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { + /* + * An arch that has enabled the pmem api should also + * have its drivers support pfn_t_devmap() + * + * This is a developer warning and should not trigger in + * production. dax_flush() will crash since it depends + * on being able to do (page_address(pfn_to_page())). + */ + WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); + } else if (pfn_t_devmap(pfn)) { /* pass */; - else { + } else { pr_debug("VFS (%s): error: dax support not enabled\n", sb->s_id); return -EOPNOTSUPP; |