summaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm/claim.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-03-22 00:22:16 -0700
committerDan Williams <dan.j.williams@intel.com>2016-04-22 12:26:23 -0700
commit200c79da824c978fcf6eec1dc9c0a1e521133267 (patch)
treebbc2182ead10eaa034278bb42c01a924db9a7d7d /drivers/nvdimm/claim.c
parent947df02d255a6a81a3832e831c5ca02078cfd529 (diff)
downloadlinux-200c79da824c978fcf6eec1dc9c0a1e521133267.tar.gz
linux-200c79da824c978fcf6eec1dc9c0a1e521133267.tar.bz2
linux-200c79da824c978fcf6eec1dc9c0a1e521133267.zip
libnvdimm, pmem, pfn: make pmem_rw_bytes generic and refactor pfn setup
In preparation for providing an alternative (to block device) access mechanism to persistent memory, convert pmem_rw_bytes() to nsio_rw_bytes(). This allows ->rw_bytes() functionality without requiring a 'struct pmem_device' to be instantiated. In other words, when ->rw_bytes() is in use i/o is driven through 'struct nd_namespace_io', otherwise it is driven through 'struct pmem_device' and the block layer. This consolidates the disjoint calls to devm_exit_badblocks() and devm_memunmap() into a common devm_nsio_disable() and cleans up the init path to use a unified pmem_attach_disk() implementation. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/nvdimm/claim.c')
-rw-r--r--drivers/nvdimm/claim.c61
1 files changed, 61 insertions, 0 deletions
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index e8f03b0e95e4..6bbd0a36994a 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -12,6 +12,7 @@
*/
#include <linux/device.h>
#include <linux/sizes.h>
+#include <linux/pmem.h>
#include "nd-core.h"
#include "pfn.h"
#include "btt.h"
@@ -199,3 +200,63 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
return sum;
}
EXPORT_SYMBOL(nd_sb_checksum);
+
+static int nsio_rw_bytes(struct nd_namespace_common *ndns,
+ resource_size_t offset, void *buf, size_t size, int rw)
+{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+
+ if (unlikely(offset + size > nsio->size)) {
+ dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
+ return -EFAULT;
+ }
+
+ if (rw == READ) {
+ unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
+
+ if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
+ return -EIO;
+ return memcpy_from_pmem(buf, nsio->addr + offset, size);
+ } else {
+ memcpy_to_pmem(nsio->addr + offset, buf, size);
+ wmb_pmem();
+ }
+
+ return 0;
+}
+
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+{
+ struct resource *res = &nsio->res;
+ struct nd_namespace_common *ndns = &nsio->common;
+
+ nsio->size = resource_size(res);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_warn(dev, "could not reserve region %pR\n", res);
+ return -EBUSY;
+ }
+
+ ndns->rw_bytes = nsio_rw_bytes;
+ if (devm_init_badblocks(dev, &nsio->bb))
+ return -ENOMEM;
+ nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
+ &nsio->res);
+
+ nsio->addr = devm_memremap(dev, res->start, resource_size(res),
+ ARCH_MEMREMAP_PMEM);
+ if (IS_ERR(nsio->addr))
+ return PTR_ERR(nsio->addr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_nsio_enable);
+
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
+{
+ struct resource *res = &nsio->res;
+
+ devm_memunmap(dev, nsio->addr);
+ devm_exit_badblocks(dev, &nsio->bb);
+ devm_release_mem_region(dev, res->start, resource_size(res));
+}
+EXPORT_SYMBOL_GPL(devm_nsio_disable);