diff options
author | Christoph Hellwig <hch@lst.de> | 2017-01-27 09:46:29 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-01-27 15:08:35 -0700 |
commit | 82ed4db499b8598f16f8871261bff088d6b0597f (patch) | |
tree | e1cc0a433bf5ae2b9723837291617bdfeeb61816 /block | |
parent | 8ae94eb65be9425af4d57a4f4cfebfdf03081e93 (diff) | |
download | linux-82ed4db499b8598f16f8871261bff088d6b0597f.tar.gz linux-82ed4db499b8598f16f8871261bff088d6b0597f.tar.bz2 linux-82ed4db499b8598f16f8871261bff088d6b0597f.zip |
block: split scsi_request out of struct request
And require all drivers that want to support BLOCK_PC to allocate it
as the first thing of their private data. To support this the legacy
IDE and BSG code is switched to set cmd_size on their queues to let
the block layer allocate the additional space.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 31 | ||||
-rw-r--r-- | block/blk-exec.c | 17 | ||||
-rw-r--r-- | block/blk-mq.c | 10 | ||||
-rw-r--r-- | block/bsg-lib.c | 34 | ||||
-rw-r--r-- | block/bsg.c | 47 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 76 |
6 files changed, 87 insertions, 128 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 0a485ad802c9..f3bc083d4c14 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -132,8 +132,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); - rq->cmd = rq->__cmd; - rq->cmd_len = BLK_MAX_CDB; rq->tag = -1; rq->internal_tag = -1; rq->start_time = jiffies; @@ -160,8 +158,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio, void blk_dump_rq_flags(struct request *rq, char *msg) { - int bit; - printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, (unsigned long long) rq->cmd_flags); @@ -171,13 +167,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg) blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, len %u\n", rq->bio, rq->biotail, blk_rq_bytes(rq)); - - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - printk(KERN_INFO " cdb: "); - for (bit = 0; bit < BLK_MAX_CDB; bit++) - printk("%02x ", rq->cmd[bit]); - printk("\n"); - } } EXPORT_SYMBOL(blk_dump_rq_flags); @@ -1316,18 +1305,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) EXPORT_SYMBOL(blk_get_request); /** - * blk_rq_set_block_pc - initialize a request to type BLOCK_PC - * @rq: request to be initialized - * - */ -void blk_rq_set_block_pc(struct request *rq) -{ - rq->cmd_type = REQ_TYPE_BLOCK_PC; - memset(rq->__cmd, 0, sizeof(rq->__cmd)); -} -EXPORT_SYMBOL(blk_rq_set_block_pc); - -/** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted * @rq: request to be inserted @@ -2459,14 +2436,6 @@ void blk_start_request(struct request *req) wbt_issue(req->q->rq_wb, &req->issue_stat); } - /* - * We are now handing the request to the hardware, initialize - * resid_len to full count and add the timeout handler. - */ - req->resid_len = blk_rq_bytes(req); - if (unlikely(blk_bidi_rq(req))) - req->next_rq->resid_len = blk_rq_bytes(req->next_rq); - BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); blk_add_timer(req); } diff --git a/block/blk-exec.c b/block/blk-exec.c index ed1f10165268..ed51800f4b44 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -11,11 +11,6 @@ #include "blk.h" #include "blk-mq-sched.h" -/* - * for max sense size - */ -#include <scsi/scsi_cmnd.h> - /** * blk_end_sync_rq - executes a completion event on a request * @rq: request to complete @@ -101,16 +96,9 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, struct request *rq, int at_head) { DECLARE_COMPLETION_ONSTACK(wait); - char sense[SCSI_SENSE_BUFFERSIZE]; int err = 0; unsigned long hang_check; - if (!rq->sense) { - memset(sense, 0, sizeof(sense)); - rq->sense = sense; - rq->sense_len = 0; - } - rq->end_io_data = &wait; blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); @@ -124,11 +112,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, if (rq->errors) err = -EIO; - if (rq->sense == sense) { - rq->sense = NULL; - rq->sense_len = 0; - } - return err; } EXPORT_SYMBOL(blk_execute_rq); diff --git a/block/blk-mq.c b/block/blk-mq.c index 60dac10228fe..3d76e860f126 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -199,13 +199,7 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, rq->special = NULL; /* tag was already set */ rq->errors = 0; - - rq->cmd = rq->__cmd; - rq->extra_len = 0; - rq->sense_len = 0; - rq->resid_len = 0; - rq->sense = NULL; INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; @@ -487,10 +481,6 @@ void blk_mq_start_request(struct request *rq) trace_block_rq_issue(q, rq); - rq->resid_len = blk_rq_bytes(rq); - if (unlikely(blk_bidi_rq(rq))) - rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); - if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { blk_stat_set_issue_time(&rq->issue_stat); rq->rq_flags |= RQF_STATS; diff --git a/block/bsg-lib.c b/block/bsg-lib.c index c74acf426840..cd15f9dbb147 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -71,22 +71,24 @@ void bsg_job_done(struct bsg_job *job, int result, { struct request *req = job->req; struct request *rsp = req->next_rq; + struct scsi_request *rq = scsi_req(req); int err; err = job->req->errors = result; if (err < 0) /* we're only returning the result field in the reply */ - job->req->sense_len = sizeof(u32); + rq->sense_len = sizeof(u32); else - job->req->sense_len = job->reply_len; + rq->sense_len = job->reply_len; /* we assume all request payload was transferred, residual == 0 */ - req->resid_len = 0; + rq->resid_len = 0; if (rsp) { - WARN_ON(reply_payload_rcv_len > rsp->resid_len); + WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len); /* set reply (bidi) residual */ - rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); + scsi_req(rsp)->resid_len -= + min(reply_payload_rcv_len, scsi_req(rsp)->resid_len); } blk_complete_request(req); } @@ -113,6 +115,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) if (!buf->sg_list) return -ENOMEM; sg_init_table(buf->sg_list, req->nr_phys_segments); + scsi_req(req)->resid_len = blk_rq_bytes(req); buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); buf->payload_len = blk_rq_bytes(req); return 0; @@ -127,6 +130,7 @@ static int bsg_create_job(struct device *dev, struct request *req) { struct request *rsp = req->next_rq; struct request_queue *q = req->q; + struct scsi_request *rq = scsi_req(req); struct bsg_job *job; int ret; @@ -140,9 +144,9 @@ static int bsg_create_job(struct device *dev, struct request *req) job->req = req; if (q->bsg_job_size) job->dd_data = (void *)&job[1]; - job->request = req->cmd; - job->request_len = req->cmd_len; - job->reply = req->sense; + job->request = rq->cmd; + job->request_len = rq->cmd_len; + job->reply = rq->sense; job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer * allocated */ if (req->bio) { @@ -228,9 +232,15 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, struct request_queue *q; int ret; - q = blk_init_queue(bsg_request_fn, NULL); + q = blk_alloc_queue(GFP_KERNEL); if (!q) return ERR_PTR(-ENOMEM); + q->cmd_size = sizeof(struct scsi_request); + q->request_fn = bsg_request_fn; + + ret = blk_init_allocated_queue(q); + if (ret) + goto out_cleanup_queue; q->queuedata = dev; q->bsg_job_size = dd_job_size; @@ -243,10 +253,12 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, if (ret) { printk(KERN_ERR "%s: bsg interface failed to " "initialize - register queue\n", dev->kobj.name); - blk_cleanup_queue(q); - return ERR_PTR(ret); + goto out_cleanup_queue; } return q; +out_cleanup_queue: + blk_cleanup_queue(q); + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(bsg_setup_queue); diff --git a/block/bsg.c b/block/bsg.c index a57046de2f07..e34c3320956c 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -85,7 +85,6 @@ struct bsg_command { struct bio *bidi_bio; int err; struct sg_io_v4 hdr; - char sense[SCSI_SENSE_BUFFERSIZE]; }; static void bsg_free_command(struct bsg_command *bc) @@ -140,18 +139,20 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, struct sg_io_v4 *hdr, struct bsg_device *bd, fmode_t has_write_perm) { + struct scsi_request *req = scsi_req(rq); + if (hdr->request_len > BLK_MAX_CDB) { - rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); - if (!rq->cmd) + req->cmd = kzalloc(hdr->request_len, GFP_KERNEL); + if (!req->cmd) return -ENOMEM; } - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, + if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request, hdr->request_len)) return -EFAULT; if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { - if (blk_verify_command(rq->cmd, has_write_perm)) + if (blk_verify_command(req->cmd, has_write_perm)) return -EPERM; } else if (!capable(CAP_SYS_RAWIO)) return -EPERM; @@ -159,7 +160,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, /* * fill in request structure */ - rq->cmd_len = hdr->request_len; + req->cmd_len = hdr->request_len; rq->timeout = msecs_to_jiffies(hdr->timeout); if (!rq->timeout) @@ -205,8 +206,7 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw) * map sg_io_v4 to a request. */ static struct request * -bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, - u8 *sense) +bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) { struct request_queue *q = bd->queue; struct request *rq, *next_rq = NULL; @@ -236,7 +236,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, rq = blk_get_request(q, rw, GFP_KERNEL); if (IS_ERR(rq)) return rq; - blk_rq_set_block_pc(rq); + scsi_req_init(rq); ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); if (ret) @@ -280,13 +280,9 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, goto out; } - rq->sense = sense; - rq->sense_len = 0; - return rq; out: - if (rq->cmd != rq->__cmd) - kfree(rq->cmd); + scsi_req_free_cmd(scsi_req(rq)); blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); @@ -393,6 +389,7 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, struct bio *bio, struct bio *bidi_bio) { + struct scsi_request *req = scsi_req(rq); int ret = 0; dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); @@ -407,12 +404,12 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; - if (rq->sense_len && hdr->response) { + if (req->sense_len && hdr->response) { int len = min_t(unsigned int, hdr->max_response_len, - rq->sense_len); + req->sense_len); ret = copy_to_user((void __user *)(unsigned long)hdr->response, - rq->sense, len); + req->sense, len); if (!ret) hdr->response_len = len; else @@ -420,14 +417,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, } if (rq->next_rq) { - hdr->dout_resid = rq->resid_len; - hdr->din_resid = rq->next_rq->resid_len; + hdr->dout_resid = req->resid_len; + hdr->din_resid = scsi_req(rq->next_rq)->resid_len; blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) - hdr->din_resid = rq->resid_len; + hdr->din_resid = req->resid_len; else - hdr->dout_resid = rq->resid_len; + hdr->dout_resid = req->resid_len; /* * If the request generated a negative error number, return it @@ -439,8 +436,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, ret = rq->errors; blk_rq_unmap_user(bio); - if (rq->cmd != rq->__cmd) - kfree(rq->cmd); + scsi_req_free_cmd(req); blk_put_request(rq); return ret; @@ -625,7 +621,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf, /* * get a request, fill in the blanks, and add to request queue */ - rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); + rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm); if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; @@ -911,12 +907,11 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct bio *bio, *bidi_bio = NULL; struct sg_io_v4 hdr; int at_head; - u8 sense[SCSI_SENSE_BUFFERSIZE]; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; - rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); + rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE); if (IS_ERR(rq)) return PTR_ERR(rq); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index c2b64923ab66..7edf44f25e08 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -230,15 +230,17 @@ EXPORT_SYMBOL(blk_verify_command); static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, struct sg_io_hdr *hdr, fmode_t mode) { - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) + struct scsi_request *req = scsi_req(rq); + + if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; - if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) + if (blk_verify_command(req->cmd, mode & FMODE_WRITE)) return -EPERM; /* * fill in request structure */ - rq->cmd_len = hdr->cmd_len; + req->cmd_len = hdr->cmd_len; rq->timeout = msecs_to_jiffies(hdr->timeout); if (!rq->timeout) @@ -254,6 +256,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, struct bio *bio) { + struct scsi_request *req = scsi_req(rq); int r, ret = 0; /* @@ -267,13 +270,13 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, hdr->info = 0; if (hdr->masked_status || hdr->host_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; - hdr->resid = rq->resid_len; + hdr->resid = req->resid_len; hdr->sb_len_wr = 0; - if (rq->sense_len && hdr->sbp) { - int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); + if (req->sense_len && hdr->sbp) { + int len = min((unsigned int) hdr->mx_sb_len, req->sense_len); - if (!copy_to_user(hdr->sbp, rq->sense, len)) + if (!copy_to_user(hdr->sbp, req->sense, len)) hdr->sb_len_wr = len; else ret = -EFAULT; @@ -294,7 +297,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, int writing = 0; int at_head = 0; struct request *rq; - char sense[SCSI_SENSE_BUFFERSIZE]; + struct scsi_request *req; struct bio *bio; if (hdr->interface_id != 'S') @@ -321,11 +324,12 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); if (IS_ERR(rq)) return PTR_ERR(rq); - blk_rq_set_block_pc(rq); + req = scsi_req(rq); + scsi_req_init(rq); if (hdr->cmd_len > BLK_MAX_CDB) { - rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); - if (!rq->cmd) + req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); + if (!req->cmd) goto out_put_request; } @@ -357,9 +361,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, goto out_free_cdb; bio = rq->bio; - memset(sense, 0, sizeof(sense)); - rq->sense = sense; - rq->sense_len = 0; rq->retries = 0; start_time = jiffies; @@ -375,8 +376,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ret = blk_complete_sghdr_rq(rq, hdr, bio); out_free_cdb: - if (rq->cmd != rq->__cmd) - kfree(rq->cmd); + scsi_req_free_cmd(req); out_put_request: blk_put_request(rq); return ret; @@ -420,9 +420,10 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, struct scsi_ioctl_command __user *sic) { struct request *rq; + struct scsi_request *req; int err; unsigned int in_len, out_len, bytes, opcode, cmdlen; - char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; + char *buffer = NULL; if (!sic) return -EINVAL; @@ -452,7 +453,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, err = PTR_ERR(rq); goto error_free_buffer; } - blk_rq_set_block_pc(rq); + req = scsi_req(rq); + scsi_req_init(rq); cmdlen = COMMAND_SIZE(opcode); @@ -460,14 +462,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, * get command and data to send to device, if any */ err = -EFAULT; - rq->cmd_len = cmdlen; - if (copy_from_user(rq->cmd, sic->data, cmdlen)) + req->cmd_len = cmdlen; + if (copy_from_user(req->cmd, sic->data, cmdlen)) goto error; if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) goto error; - err = blk_verify_command(rq->cmd, mode & FMODE_WRITE); + err = blk_verify_command(req->cmd, mode & FMODE_WRITE); if (err) goto error; @@ -503,18 +505,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, goto error; } - memset(sense, 0, sizeof(sense)); - rq->sense = sense; - rq->sense_len = 0; - blk_execute_rq(q, disk, rq, 0); err = rq->errors & 0xff; /* only 8 bit SCSI status */ if (err) { - if (rq->sense_len && rq->sense) { - bytes = (OMAX_SB_LEN > rq->sense_len) ? - rq->sense_len : OMAX_SB_LEN; - if (copy_to_user(sic->data, rq->sense, bytes)) + if (req->sense_len && req->sense) { + bytes = (OMAX_SB_LEN > req->sense_len) ? + req->sense_len : OMAX_SB_LEN; + if (copy_to_user(sic->data, req->sense, bytes)) err = -EFAULT; } } else { @@ -542,11 +540,11 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, rq = blk_get_request(q, WRITE, __GFP_RECLAIM); if (IS_ERR(rq)) return PTR_ERR(rq); - blk_rq_set_block_pc(rq); + scsi_req_init(rq); rq->timeout = BLK_DEFAULT_SG_TIMEOUT; - rq->cmd[0] = cmd; - rq->cmd[4] = data; - rq->cmd_len = 6; + scsi_req(rq)->cmd[0] = cmd; + scsi_req(rq)->cmd[4] = data; + scsi_req(rq)->cmd_len = 6; err = blk_execute_rq(q, bd_disk, rq, 0); blk_put_request(rq); @@ -743,6 +741,18 @@ int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode, } EXPORT_SYMBOL(scsi_cmd_blk_ioctl); +void scsi_req_init(struct request *rq) +{ + struct scsi_request *req = scsi_req(rq); + + rq->cmd_type = REQ_TYPE_BLOCK_PC; + memset(req->__cmd, 0, sizeof(req->__cmd)); + req->cmd = req->__cmd; + req->cmd_len = BLK_MAX_CDB; + req->sense_len = 0; +} +EXPORT_SYMBOL(scsi_req_init); + static int __init blk_scsi_ioctl_init(void) { blk_set_cmd_filter_defaults(&blk_default_cmd_filter); |