summaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-01-25 14:34:36 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:34 -0800
commit3222d8c2a7f888bf38b845b125e9470b12108a4d (patch)
treec6a764721e2ae8444f8c4d4a2d784226b7c19d9e /drivers/nvdimm
parent05cda97ecb7046f4192a921741aae33b300dd628 (diff)
downloadlinux-3222d8c2a7f888bf38b845b125e9470b12108a4d.tar.gz
linux-3222d8c2a7f888bf38b845b125e9470b12108a4d.tar.bz2
linux-3222d8c2a7f888bf38b845b125e9470b12108a4d.zip
block: remove ->rw_page
The ->rw_page method is a special purpose bypass of the usual bio handling path that is limited to single-page reads and writes and synchronous which causes a lot of extra code in the drivers, callers and the block layer. The only remaining user is the MM swap code. Switch that swap code to simply submit a single-vec on-stack bio an synchronously wait on it based on a newly added QUEUE_FLAG_SYNCHRONOUS flag set by the drivers that currently implement ->rw_page instead. While this touches one extra cache line and executes extra code, it simplifies the block layer and drivers and ensures that all feastures are properly supported by all drivers, e.g. right now ->rw_page bypassed cgroup writeback entirely. [akpm@linux-foundation.org: fix comment typo, per Dan] Link: https://lkml.kernel.org/r/20230125133436.447864-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Keith Busch <kbusch@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r--drivers/nvdimm/btt.c16
-rw-r--r--drivers/nvdimm/pmem.c24
2 files changed, 2 insertions, 38 deletions
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0297b7882e33..d5593b0dc700 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1482,20 +1482,6 @@ static void btt_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- struct btt *btt = bdev->bd_disk->private_data;
- int rc;
-
- rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
- if (rc == 0)
- page_endio(page, op_is_write(op), 0);
-
- return rc;
-}
-
-
static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
/* some standard values */
@@ -1508,7 +1494,6 @@ static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
static const struct block_device_operations btt_fops = {
.owner = THIS_MODULE,
.submit_bio = btt_submit_bio,
- .rw_page = btt_rw_page,
.getgeo = btt_getgeo,
};
@@ -1530,6 +1515,7 @@ static int btt_blk_init(struct btt *btt)
blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
if (btt_meta_size(btt)) {
rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 96e6e9a5f235..ceea55f621cc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -238,28 +238,6 @@ static void pmem_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- struct pmem_device *pmem = bdev->bd_disk->private_data;
- blk_status_t rc;
-
- if (op_is_write(op))
- rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
- else
- rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
- /*
- * The ->rw_page interface is subtle and tricky. The core
- * retries on any error, so we can only invoke page_endio() in
- * the successful completion case. Otherwise, we'll see crashes
- * caused by double completion.
- */
- if (rc == 0)
- page_endio(page, op_is_write(op), 0);
-
- return blk_status_to_errno(rc);
-}
-
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
@@ -310,7 +288,6 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
.submit_bio = pmem_submit_bio,
- .rw_page = pmem_rw_page,
};
static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
@@ -565,6 +542,7 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP)
blk_queue_flag_set(QUEUE_FLAG_DAX, q);