diff options
author | Christoph Hellwig <hch@lst.de> | 2021-07-29 08:48:42 +0200 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2021-07-30 22:22:36 -0400 |
commit | ead09dd3aed5cc6a6c6288a87a5bfa9bbc8d5ecf (patch) | |
tree | 745a435c272dbf099f340c69c827ee43f6f016a8 /block | |
parent | ba51bdafaafc065019c6f6a2cdae006d176cee48 (diff) | |
download | linux-ead09dd3aed5cc6a6c6288a87a5bfa9bbc8d5ecf.tar.gz linux-ead09dd3aed5cc6a6c6288a87a5bfa9bbc8d5ecf.tar.bz2 linux-ead09dd3aed5cc6a6c6288a87a5bfa9bbc8d5ecf.zip |
scsi: bsg: Simplify device registration
Use the per-device cdev_device_interface to store the bsg data in the char
device inode, and thus remove the need to embedd the bsg_class_device
structure in the request_queue.
Link: https://lore.kernel.org/r/20210729064845.1044147-2-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/bsg-lib.c | 11 | ||||
-rw-r--r-- | block/bsg.c | 304 |
2 files changed, 78 insertions, 237 deletions
diff --git a/block/bsg-lib.c b/block/bsg-lib.c index a89d80102304..fe43f5fda6e5 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -6,6 +6,7 @@ * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * Copyright (C) 2011 Mike Christie */ +#include <linux/bsg.h> #include <linux/slab.h> #include <linux/blk-mq.h> #include <linux/delay.h> @@ -19,6 +20,7 @@ struct bsg_set { struct blk_mq_tag_set tag_set; + struct bsg_device *bd; bsg_job_fn *job_fn; bsg_timeout_fn *timeout_fn; }; @@ -327,7 +329,7 @@ void bsg_remove_queue(struct request_queue *q) struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); - bsg_unregister_queue(q); + bsg_unregister_queue(bset->bd); blk_cleanup_queue(q); blk_mq_free_tag_set(&bset->tag_set); kfree(bset); @@ -396,10 +398,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, q->queuedata = dev; blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); - ret = bsg_register_queue(q, dev, name, &bsg_transport_ops); - if (ret) { - printk(KERN_ERR "%s: bsg interface failed to " - "initialize - register queue\n", dev->kobj.name); + bset->bd = bsg_register_queue(q, dev, name, &bsg_transport_ops); + if (IS_ERR(bset->bd)) { + ret = PTR_ERR(bset->bd); goto out_cleanup_queue; } diff --git a/block/bsg.c b/block/bsg.c index 3dbfd2c6aef3..83a095185d33 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -20,38 +20,29 @@ #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" #define BSG_VERSION "0.4" -#define bsg_dbg(bd, fmt, ...) \ - pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__) - struct bsg_device { struct request_queue *queue; - spinlock_t lock; - struct hlist_node dev_list; - refcount_t ref_count; - char name[20]; + const struct bsg_ops *ops; + struct device device; + struct cdev cdev; int max_queue; }; +static inline struct bsg_device *to_bsg_device(struct inode *inode) +{ + return container_of(inode->i_cdev, struct bsg_device, cdev); +} + #define BSG_DEFAULT_CMDS 64 #define BSG_MAX_DEVS 32768 -static DEFINE_MUTEX(bsg_mutex); -static DEFINE_IDR(bsg_minor_idr); - -#define BSG_LIST_ARRAY_SIZE 8 -static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; - +static DEFINE_IDA(bsg_minor_ida); static struct class *bsg_class; static int bsg_major; -static inline struct hlist_head *bsg_dev_idx_hash(int index) -{ - return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; -} - #define uptr64(val) ((void __user *)(uintptr_t)(val)) -static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) +static int bsg_sg_io(struct bsg_device *bd, fmode_t mode, void __user *uarg) { struct request *rq; struct bio *bio; @@ -61,21 +52,18 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; - if (!q->bsg_dev.class_dev) - return -ENXIO; - if (hdr.guard != 'Q') return -EINVAL; - ret = q->bsg_dev.ops->check_proto(&hdr); + ret = bd->ops->check_proto(&hdr); if (ret) return ret; - rq = blk_get_request(q, hdr.dout_xfer_len ? + rq = blk_get_request(bd->queue, hdr.dout_xfer_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); if (IS_ERR(rq)) return PTR_ERR(rq); - ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); + ret = bd->ops->fill_hdr(rq, &hdr, mode); if (ret) { blk_put_request(rq); return ret; @@ -83,17 +71,17 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) rq->timeout = msecs_to_jiffies(hdr.timeout); if (!rq->timeout) - rq->timeout = q->sg_timeout; + rq->timeout = rq->q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_SG_TIMEOUT; if (rq->timeout < BLK_MIN_SG_TIMEOUT) rq->timeout = BLK_MIN_SG_TIMEOUT; if (hdr.dout_xfer_len) { - ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp), + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.dout_xferp), hdr.dout_xfer_len, GFP_KERNEL); } else if (hdr.din_xfer_len) { - ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp), + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr.din_xferp), hdr.din_xfer_len, GFP_KERNEL); } @@ -103,171 +91,50 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) bio = rq->bio; blk_execute_rq(NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL)); - ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr); + ret = bd->ops->complete_rq(rq, &hdr); blk_rq_unmap_user(bio); out_free_rq: - rq->q->bsg_dev.ops->free_rq(rq); + bd->ops->free_rq(rq); blk_put_request(rq); if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; return ret; } -static struct bsg_device *bsg_alloc_device(void) -{ - struct bsg_device *bd; - - bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); - if (unlikely(!bd)) - return NULL; - - spin_lock_init(&bd->lock); - bd->max_queue = BSG_DEFAULT_CMDS; - INIT_HLIST_NODE(&bd->dev_list); - return bd; -} - -static int bsg_put_device(struct bsg_device *bd) -{ - struct request_queue *q = bd->queue; - - mutex_lock(&bsg_mutex); - - if (!refcount_dec_and_test(&bd->ref_count)) { - mutex_unlock(&bsg_mutex); - return 0; - } - - hlist_del(&bd->dev_list); - mutex_unlock(&bsg_mutex); - - bsg_dbg(bd, "tearing down\n"); - - /* - * close can always block - */ - kfree(bd); - blk_put_queue(q); - return 0; -} - -static struct bsg_device *bsg_add_device(struct inode *inode, - struct request_queue *rq, - struct file *file) -{ - struct bsg_device *bd; - unsigned char buf[32]; - - lockdep_assert_held(&bsg_mutex); - - if (!blk_get_queue(rq)) - return ERR_PTR(-ENXIO); - - bd = bsg_alloc_device(); - if (!bd) { - blk_put_queue(rq); - return ERR_PTR(-ENOMEM); - } - - bd->queue = rq; - - refcount_set(&bd->ref_count, 1); - hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); - - strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); - bsg_dbg(bd, "bound to <%s>, max queue %d\n", - format_dev_t(buf, inode->i_rdev), bd->max_queue); - - return bd; -} - -static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) -{ - struct bsg_device *bd; - - lockdep_assert_held(&bsg_mutex); - - hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { - if (bd->queue == q) { - refcount_inc(&bd->ref_count); - goto found; - } - } - bd = NULL; -found: - return bd; -} - -static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) -{ - struct bsg_device *bd; - struct bsg_class_device *bcd; - - /* - * find the class device - */ - mutex_lock(&bsg_mutex); - bcd = idr_find(&bsg_minor_idr, iminor(inode)); - - if (!bcd) { - bd = ERR_PTR(-ENODEV); - goto out_unlock; - } - - bd = __bsg_get_device(iminor(inode), bcd->queue); - if (!bd) - bd = bsg_add_device(inode, bcd->queue, file); - -out_unlock: - mutex_unlock(&bsg_mutex); - return bd; -} - static int bsg_open(struct inode *inode, struct file *file) { - struct bsg_device *bd; - - bd = bsg_get_device(inode, file); - - if (IS_ERR(bd)) - return PTR_ERR(bd); - - file->private_data = bd; + if (!blk_get_queue(to_bsg_device(inode)->queue)) + return -ENXIO; return 0; } static int bsg_release(struct inode *inode, struct file *file) { - struct bsg_device *bd = file->private_data; - - file->private_data = NULL; - return bsg_put_device(bd); + blk_put_queue(to_bsg_device(inode)->queue); + return 0; } static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg) { - return put_user(bd->max_queue, uarg); + return put_user(READ_ONCE(bd->max_queue), uarg); } static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg) { - int queue; + int max_queue; - if (get_user(queue, uarg)) + if (get_user(max_queue, uarg)) return -EFAULT; - if (queue < 1) + if (max_queue < 1) return -EINVAL; - - spin_lock_irq(&bd->lock); - bd->max_queue = queue; - spin_unlock_irq(&bd->lock); + WRITE_ONCE(bd->max_queue, max_queue); return 0; } static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct bsg_device *bd = file->private_data; + struct bsg_device *bd = to_bsg_device(file_inode(file)); struct request_queue *q = bd->queue; void __user *uarg = (void __user *) arg; int __user *intp = uarg; @@ -312,7 +179,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case SG_EMULATED_HOST: return put_user(1, intp); case SG_IO: - return bsg_sg_io(q, file->f_mode, uarg); + return bsg_sg_io(bd, file->f_mode, uarg); case SCSI_IOCTL_SEND_COMMAND: pr_warn_ratelimited("%s: calling unsupported SCSI_IOCTL_SEND_COMMAND\n", current->comm); @@ -331,83 +198,66 @@ static const struct file_operations bsg_fops = { .llseek = default_llseek, }; -void bsg_unregister_queue(struct request_queue *q) +void bsg_unregister_queue(struct bsg_device *bd) { - struct bsg_class_device *bcd = &q->bsg_dev; - - if (!bcd->class_dev) - return; - - mutex_lock(&bsg_mutex); - idr_remove(&bsg_minor_idr, bcd->minor); - if (q->kobj.sd) - sysfs_remove_link(&q->kobj, "bsg"); - device_unregister(bcd->class_dev); - bcd->class_dev = NULL; - mutex_unlock(&bsg_mutex); + if (bd->queue->kobj.sd) + sysfs_remove_link(&bd->queue->kobj, "bsg"); + cdev_device_del(&bd->cdev, &bd->device); + ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); + kfree(bd); } EXPORT_SYMBOL_GPL(bsg_unregister_queue); -int bsg_register_queue(struct request_queue *q, struct device *parent, - const char *name, const struct bsg_ops *ops) +struct bsg_device *bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + const struct bsg_ops *ops) { - struct bsg_class_device *bcd; - dev_t dev; + struct bsg_device *bd; int ret; - struct device *class_dev = NULL; - - /* - * we need a proper transport to send commands, not a stacked device - */ - if (!queue_is_mq(q)) - return 0; - bcd = &q->bsg_dev; - memset(bcd, 0, sizeof(*bcd)); - - mutex_lock(&bsg_mutex); + bd = kzalloc(sizeof(*bd), GFP_KERNEL); + if (!bd) + return ERR_PTR(-ENOMEM); + bd->max_queue = BSG_DEFAULT_CMDS; + bd->queue = q; + bd->ops = ops; - ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); + ret = ida_simple_get(&bsg_minor_ida, 0, BSG_MAX_DEVS, GFP_KERNEL); if (ret < 0) { - if (ret == -ENOSPC) { - printk(KERN_ERR "bsg: too many bsg devices\n"); - ret = -EINVAL; - } - goto unlock; - } - - bcd->minor = ret; - bcd->queue = q; - bcd->ops = ops; - dev = MKDEV(bsg_major, bcd->minor); - class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name); - if (IS_ERR(class_dev)) { - ret = PTR_ERR(class_dev); - goto idr_remove; + if (ret == -ENOSPC) + dev_err(parent, "bsg: too many bsg devices\n"); + goto out_kfree; } - bcd->class_dev = class_dev; + bd->device.devt = MKDEV(bsg_major, ret); + bd->device.class = bsg_class; + bd->device.parent = parent; + dev_set_name(&bd->device, "%s", name); + device_initialize(&bd->device); + + cdev_init(&bd->cdev, &bsg_fops); + bd->cdev.owner = THIS_MODULE; + ret = cdev_device_add(&bd->cdev, &bd->device); + if (ret) + goto out_ida_remove; if (q->kobj.sd) { - ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); + ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); if (ret) - goto unregister_class_dev; + goto out_device_del; } - mutex_unlock(&bsg_mutex); - return 0; + return bd; -unregister_class_dev: - device_unregister(class_dev); -idr_remove: - idr_remove(&bsg_minor_idr, bcd->minor); -unlock: - mutex_unlock(&bsg_mutex); - return ret; +out_device_del: + cdev_device_del(&bd->cdev, &bd->device); +out_ida_remove: + ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); +out_kfree: + kfree(bd); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(bsg_register_queue); -static struct cdev bsg_cdev; - static char *bsg_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); @@ -415,11 +265,8 @@ static char *bsg_devnode(struct device *dev, umode_t *mode) static int __init bsg_init(void) { - int ret, i; dev_t devid; - - for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) - INIT_HLIST_HEAD(&bsg_device_list[i]); + int ret; bsg_class = class_create(THIS_MODULE, "bsg"); if (IS_ERR(bsg_class)) @@ -429,19 +276,12 @@ static int __init bsg_init(void) ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) goto destroy_bsg_class; - bsg_major = MAJOR(devid); - cdev_init(&bsg_cdev, &bsg_fops); - ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); - if (ret) - goto unregister_chrdev; - printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION " loaded (major %d)\n", bsg_major); return 0; -unregister_chrdev: - unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); + destroy_bsg_class: class_destroy(bsg_class); return ret; |