diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 15 | ||||
-rw-r--r-- | block/as-iosched.c | 24 | ||||
-rw-r--r-- | block/blk-barrier.c | 27 | ||||
-rw-r--r-- | block/blk-core.c | 884 | ||||
-rw-r--r-- | block/blk-exec.c | 1 | ||||
-rw-r--r-- | block/blk-integrity.c | 2 | ||||
-rw-r--r-- | block/blk-ioc.c | 12 | ||||
-rw-r--r-- | block/blk-map.c | 25 | ||||
-rw-r--r-- | block/blk-merge.c | 71 | ||||
-rw-r--r-- | block/blk-settings.c | 304 | ||||
-rw-r--r-- | block/blk-sysfs.c | 65 | ||||
-rw-r--r-- | block/blk-tag.c | 17 | ||||
-rw-r--r-- | block/blk-timeout.c | 22 | ||||
-rw-r--r-- | block/blk.h | 51 | ||||
-rw-r--r-- | block/bsg.c | 17 | ||||
-rw-r--r-- | block/cfq-iosched.c | 45 | ||||
-rw-r--r-- | block/compat_ioctl.c | 6 | ||||
-rw-r--r-- | block/deadline-iosched.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 193 | ||||
-rw-r--r-- | block/genhd.c | 21 | ||||
-rw-r--r-- | block/ioctl.c | 12 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 13 |
22 files changed, 1131 insertions, 698 deletions
diff --git a/block/Kconfig b/block/Kconfig index e7d12782bcfb..95a86adc33a1 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -23,9 +23,10 @@ menuconfig BLOCK if BLOCK -config LBD - bool "Support for large block devices and files" +config LBDAF + bool "Support for large (2TB+) block devices and files" depends on !64BIT + default y help Enable block devices or files of size 2TB and larger. @@ -38,11 +39,13 @@ config LBD The ext4 filesystem requires that this feature be enabled in order to support filesystems that have the huge_file feature - enabled. Otherwise, it will refuse to mount any filesystems - that use the huge_file feature, which is enabled by default - by mke2fs.ext4. The GFS2 filesystem also requires this feature. + enabled. Otherwise, it will refuse to mount in the read-write + mode any filesystems that use the huge_file feature, which is + enabled by default by mke2fs.ext4. - If unsure, say N. + The GFS2 filesystem also requires this feature. + + If unsure, say Y. config BLK_DEV_BSG bool "Block layer SG support v4 (EXPERIMENTAL)" diff --git a/block/as-iosched.c b/block/as-iosched.c index c48fa670d221..7a12cf6ee1d3 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2) data_dir = rq_is_sync(rq1); last = ad->last_sector[data_dir]; - s1 = rq1->sector; - s2 = rq2->sector; + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); BUG_ON(data_dir != rq_is_sync(rq2)); @@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, as_update_thinktime(ad, aic, thinktime); /* Calculate read -> read seek distance */ - if (aic->last_request_pos < rq->sector) - seek_dist = rq->sector - aic->last_request_pos; + if (aic->last_request_pos < blk_rq_pos(rq)) + seek_dist = blk_rq_pos(rq) - + aic->last_request_pos; else - seek_dist = aic->last_request_pos - rq->sector; + seek_dist = aic->last_request_pos - + blk_rq_pos(rq); as_update_seekdist(ad, aic, seek_dist); } - aic->last_request_pos = rq->sector + rq->nr_sectors; + aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); set_bit(AS_TASK_IOSTARTED, &aic->state); spin_unlock(&aic->lock); } @@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic, { unsigned long delay; /* jiffies */ sector_t last = ad->last_sector[ad->batch_data_dir]; - sector_t next = rq->sector; + sector_t next = blk_rq_pos(rq); sector_t delta; /* acceptable close offset (in sectors) */ sector_t s; @@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq) * This has to be set in order to be correctly updated by * as_find_next_rq */ - ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; + ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq); if (data_dir == BLK_RW_SYNC) { struct io_context *ioc = RQ_IOC(rq); @@ -1312,12 +1314,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req, static void as_work_handler(struct work_struct *work) { struct as_data *ad = container_of(work, struct as_data, antic_work); - struct request_queue *q = ad->q; - unsigned long flags; - spin_lock_irqsave(q->queue_lock, flags); - blk_start_queueing(q); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_run_queue(ad->q); } static int as_may_queue(struct request_queue *q, int rw) diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 20b4111fa050..30022b4e2f63 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) */ q->ordseq = 0; rq = q->orig_bar_rq; - - if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) - BUG(); - + __blk_end_request_all(rq, q->orderr); return true; } @@ -166,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) * For an empty barrier, there's no actual BAR request, which * in turn makes POSTFLUSH unnecessary. Mask them off. */ - if (!rq->hard_nr_sectors) { + if (!blk_rq_sectors(rq)) { q->ordered &= ~(QUEUE_ORDERED_DO_BAR | QUEUE_ORDERED_DO_POSTFLUSH); /* @@ -183,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) } /* stash away the original request */ - elv_dequeue_request(q, rq); + blk_dequeue_request(rq); q->orig_bar_rq = rq; rq = NULL; @@ -221,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) } else skip |= QUEUE_ORDSEQ_PREFLUSH; - if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) + if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) rq = NULL; else skip |= QUEUE_ORDSEQ_DRAIN; @@ -251,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) * Queue ordering not supported. Terminate * with prejudice. */ - elv_dequeue_request(q, rq); - if (__blk_end_request(rq, -EOPNOTSUPP, - blk_rq_bytes(rq))) - BUG(); + blk_dequeue_request(rq); + __blk_end_request_all(rq, -EOPNOTSUPP); *rqp = NULL; return false; } @@ -329,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) /* * The driver must store the error location in ->bi_sector, if * it supports it. For non-stacked drivers, this should be copied - * from rq->sector. + * from blk_rq_pos(rq). */ if (error_sector) *error_sector = bio->bi_sector; @@ -393,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev, bio->bi_sector = sector; - if (nr_sects > q->max_hw_sectors) { - bio->bi_size = q->max_hw_sectors << 9; - nr_sects -= q->max_hw_sectors; - sector += q->max_hw_sectors; + if (nr_sects > queue_max_hw_sectors(q)) { + bio->bi_size = queue_max_hw_sectors(q) << 9; + nr_sects -= queue_max_hw_sectors(q); + sector += queue_max_hw_sectors(q); } else { bio->bi_size = nr_sects << 9; nr_sects = 0; diff --git a/block/blk-core.c b/block/blk-core.c index c89883be8737..b06cf5c2a829 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -26,24 +26,15 @@ #include <linux/swap.h> #include <linux/writeback.h> #include <linux/task_io_accounting_ops.h> -#include <linux/blktrace_api.h> #include <linux/fault-inject.h> -#include <trace/block.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/block.h> #include "blk.h" -DEFINE_TRACE(block_plug); -DEFINE_TRACE(block_unplug_io); -DEFINE_TRACE(block_unplug_timer); -DEFINE_TRACE(block_getrq); -DEFINE_TRACE(block_sleeprq); -DEFINE_TRACE(block_rq_requeue); -DEFINE_TRACE(block_bio_backmerge); -DEFINE_TRACE(block_bio_frontmerge); -DEFINE_TRACE(block_bio_queue); -DEFINE_TRACE(block_rq_complete); -DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */ EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); static int __make_request(struct request_queue *q, struct bio *bio); @@ -68,11 +59,11 @@ static void drive_stat_acct(struct request *rq, int new_io) int rw = rq_data_dir(rq); int cpu; - if (!blk_fs_request(rq) || !blk_do_io_stat(rq)) + if (!blk_do_io_stat(rq)) return; cpu = part_stat_lock(); - part = disk_map_sector_rcu(rq->rq_disk, rq->sector); + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); if (!new_io) part_stat_inc(cpu, part, merges[rw]); @@ -127,13 +118,14 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; - rq->sector = rq->hard_sector = (sector_t) -1; + rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->cmd = rq->__cmd; rq->cmd_len = BLK_MAX_CDB; rq->tag = -1; rq->ref_count = 1; + rq->start_time = jiffies; } EXPORT_SYMBOL(blk_rq_init); @@ -184,14 +176,11 @@ void blk_dump_rq_flags(struct request *rq, char *msg) rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, rq->cmd_flags); - printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", - (unsigned long long)rq->sector, - rq->nr_sectors, - rq->current_nr_sectors); - printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", - rq->bio, rq->biotail, - rq->buffer, rq->data, - rq->data_len); + printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", + (unsigned long long)blk_rq_pos(rq), + blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); + printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", + rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); if (blk_pc_request(rq)) { printk(KERN_INFO " cdb: "); @@ -333,24 +322,6 @@ void blk_unplug(struct request_queue *q) } EXPORT_SYMBOL(blk_unplug); -static void blk_invoke_request_fn(struct request_queue *q) -{ - if (unlikely(blk_queue_stopped(q))) - return; - - /* - * one level of recursion is ok and is much faster than kicking - * the unplug handling - */ - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { - q->request_fn(q); - queue_flag_clear(QUEUE_FLAG_REENTER, q); - } else { - queue_flag_set(QUEUE_FLAG_PLUGGED, q); - kblockd_schedule_work(q, &q->unplug_work); - } -} - /** * blk_start_queue - restart a previously stopped queue * @q: The &struct request_queue in question @@ -365,7 +336,7 @@ void blk_start_queue(struct request_queue *q) WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); - blk_invoke_request_fn(q); + __blk_run_queue(q); } EXPORT_SYMBOL(blk_start_queue); @@ -425,12 +396,23 @@ void __blk_run_queue(struct request_queue *q) { blk_remove_plug(q); + if (unlikely(blk_queue_stopped(q))) + return; + + if (elv_queue_empty(q)) + return; + /* * Only recurse once to avoid overrunning the stack, let the unplug * handling reinvoke the handler shortly if we already got there. */ - if (!elv_queue_empty(q)) - blk_invoke_request_fn(q); + if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { + q->request_fn(q); + queue_flag_clear(QUEUE_FLAG_REENTER, q); + } else { + queue_flag_set(QUEUE_FLAG_PLUGGED, q); + kblockd_schedule_work(q, &q->unplug_work); + } } EXPORT_SYMBOL(__blk_run_queue); @@ -440,9 +422,7 @@ EXPORT_SYMBOL(__blk_run_queue); * * Description: * Invoke request handling on this queue, if it has pending work to do. - * May be used to restart queueing when a request has completed. Also - * See @blk_start_queueing. - * + * May be used to restart queueing when a request has completed. */ void blk_run_queue(struct request_queue *q) { @@ -517,6 +497,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; q->backing_dev_info.unplug_io_data = q; + q->backing_dev_info.ra_pages = + (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + q->backing_dev_info.state = 0; + q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; + err = bdi_init(&q->backing_dev_info); if (err) { kmem_cache_free(blk_requestq_cachep, q); @@ -902,26 +887,59 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) EXPORT_SYMBOL(blk_get_request); /** - * blk_start_queueing - initiate dispatch of requests to device - * @q: request queue to kick into gear + * blk_make_request - given a bio, allocate a corresponding struct request. + * @q: target request queue + * @bio: The bio describing the memory mappings that will be submitted for IO. + * It may be a chained-bio properly constructed by block/bio layer. + * @gfp_mask: gfp flags to be used for memory allocation + * + * blk_make_request is the parallel of generic_make_request for BLOCK_PC + * type commands. Where the struct request needs to be farther initialized by + * the caller. It is passed a &struct bio, which describes the memory info of + * the I/O transfer. + * + * The caller of blk_make_request must make sure that bi_io_vec + * are set to describe the memory buffers. That bio_data_dir() will return + * the needed direction of the request. (And all bio's in the passed bio-chain + * are properly set accordingly) * - * This is basically a helper to remove the need to know whether a queue - * is plugged or not if someone just wants to initiate dispatch of requests - * for this queue. Should be used to start queueing on a device outside - * of ->request_fn() context. Also see @blk_run_queue. + * If called under none-sleepable conditions, mapped bio buffers must not + * need bouncing, by calling the appropriate masked or flagged allocator, + * suitable for the target device. Otherwise the call to blk_queue_bounce will + * BUG. * - * The queue lock must be held with interrupts disabled. + * WARNING: When allocating/cloning a bio-chain, careful consideration should be + * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for + * anything but the first bio in the chain. Otherwise you risk waiting for IO + * completion of a bio that hasn't been submitted yet, thus resulting in a + * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead + * of bio_alloc(), as that avoids the mempool deadlock. + * If possible a big IO should be split into smaller parts when allocation + * fails. Partial allocation should not be an error, or you risk a live-lock. */ -void blk_start_queueing(struct request_queue *q) +struct request *blk_make_request(struct request_queue *q, struct bio *bio, + gfp_t gfp_mask) { - if (!blk_queue_plugged(q)) { - if (unlikely(blk_queue_stopped(q))) - return; - q->request_fn(q); - } else - __generic_unplug_device(q); + struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); + + if (unlikely(!rq)) + return ERR_PTR(-ENOMEM); + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + int ret; + + blk_queue_bounce(q, &bounce_bio); + ret = blk_rq_append_bio(q, rq, bounce_bio); + if (unlikely(ret)) { + blk_put_request(rq); + return ERR_PTR(ret); + } + } + + return rq; } -EXPORT_SYMBOL(blk_start_queueing); +EXPORT_SYMBOL(blk_make_request); /** * blk_requeue_request - put a request back on queue @@ -942,6 +960,8 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); + BUG_ON(blk_queued_rq(rq)); + elv_requeue_request(q, rq); } EXPORT_SYMBOL(blk_requeue_request); @@ -977,7 +997,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq, * barrier */ rq->cmd_type = REQ_TYPE_SPECIAL; - rq->cmd_flags |= REQ_SOFTBARRIER; rq->special = data; @@ -991,7 +1010,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, drive_stat_acct(rq, 1); __elv_add_request(q, rq, where, 0); - blk_start_queueing(q); + __blk_run_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_insert_request); @@ -1113,16 +1132,13 @@ void init_request_from_bio(struct request *req, struct bio *bio) if (bio_failfast_driver(bio)) req->cmd_flags |= REQ_FAILFAST_DRIVER; - /* - * REQ_BARRIER implies no merging, but lets make it explicit - */ if (unlikely(bio_discard(bio))) { req->cmd_flags |= REQ_DISCARD; if (bio_barrier(bio)) req->cmd_flags |= REQ_SOFTBARRIER; req->q->prepare_discard_fn(req->q, req); } else if (unlikely(bio_barrier(bio))) - req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + req->cmd_flags |= REQ_HARDBARRIER; if (bio_sync(bio)) req->cmd_flags |= REQ_RW_SYNC; @@ -1132,9 +1148,8 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_NOIDLE; req->errors = 0; - req->hard_sector = req->sector = bio->bi_sector; + req->__sector = bio->bi_sector; req->ioprio = bio_prio(bio); - req->start_time = jiffies; blk_rq_bio_prep(req->q, req, bio); } @@ -1150,14 +1165,13 @@ static inline bool queue_should_plug(struct request_queue *q) static int __make_request(struct request_queue *q, struct bio *bio) { struct request *req; - int el_ret, nr_sectors; + int el_ret; + unsigned int bytes = bio->bi_size; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); const int unplug = bio_unplug(bio); int rw_flags; - nr_sectors = bio_sectors(bio); - /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even @@ -1182,7 +1196,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) req->biotail->bi_next = bio; req->biotail = bio; - req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->__data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1208,10 +1222,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->current_nr_sectors = bio_cur_sectors(bio); - req->hard_cur_sectors = req->current_nr_sectors; - req->sector = req->hard_sector = bio->bi_sector; - req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->__sector = bio->bi_sector; + req->__data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1277,7 +1289,7 @@ static inline void blk_partition_remap(struct bio *bio) bio->bi_bdev = bdev->bd_contains; trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, - bdev->bd_dev, bio->bi_sector, + bdev->bd_dev, bio->bi_sector - p->start_sect); } } @@ -1422,11 +1434,11 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; } - if (unlikely(nr_sectors > q->max_hw_sectors)) { + if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { printk(KERN_ERR "bio too big device %s (%u > %u)\n", - bdevname(bio->bi_bdev, b), - bio_sectors(bio), - q->max_hw_sectors); + bdevname(bio->bi_bdev, b), + bio_sectors(bio), + queue_max_hw_sectors(q)); goto end_io; } @@ -1446,8 +1458,7 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; if (old_sector != -1) - trace_block_remap(q, bio, old_dev, bio->bi_sector, - old_sector); + trace_block_remap(q, bio, old_dev, old_sector); trace_block_bio_queue(q, bio); @@ -1593,8 +1604,8 @@ EXPORT_SYMBOL(submit_bio); */ int blk_rq_check_limits(struct request_queue *q, struct request *rq) { - if (rq->nr_sectors > q->max_sectors || - rq->data_len > q->max_hw_sectors << 9) { + if (blk_rq_sectors(rq) > queue_max_sectors(q) || + blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; } @@ -1606,8 +1617,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) * limitation. */ blk_recalc_rq_segments(rq); - if (rq->nr_phys_segments > q->max_phys_segments || - rq->nr_phys_segments > q->max_hw_segments) { + if (rq->nr_phys_segments > queue_max_phys_segments(q) || + rq->nr_phys_segments > queue_max_hw_segments(q)) { printk(KERN_ERR "%s: over max segments limit.\n", __func__); return -EIO; } @@ -1651,40 +1662,15 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); -/** - * blkdev_dequeue_request - dequeue request and start timeout timer - * @req: request to dequeue - * - * Dequeue @req and start timeout timer on it. This hands off the - * request to the driver. - * - * Block internal functions which don't want to start timer should - * call elv_dequeue_request(). - */ -void blkdev_dequeue_request(struct request *req) -{ - elv_dequeue_request(req->q, req); - - /* - * We are now handing the request to the hardware, add the - * timeout handler. - */ - blk_add_timer(req); -} -EXPORT_SYMBOL(blkdev_dequeue_request); - static void blk_account_io_completion(struct request *req, unsigned int bytes) { - if (!blk_do_io_stat(req)) - return; - - if (blk_fs_request(req)) { + if (blk_do_io_stat(req)) { const int rw = rq_data_dir(req); struct hd_struct *part; int cpu; cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, req->sector); + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); part_stat_add(cpu, part, sectors[rw], bytes >> 9); part_stat_unlock(); } @@ -1692,22 +1678,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) static void blk_account_io_done(struct request *req) { - if (!blk_do_io_stat(req)) - return; - /* * Account IO completion. bar_rq isn't accounted as a normal * IO on queueing nor completion. Accounting the containing * request is enough. */ - if (blk_fs_request(req) && req != &req->q->bar_rq) { + if (blk_do_io_stat(req) && req != &req->q->bar_rq) { unsigned long duration = jiffies - req->start_time; const int rw = rq_data_dir(req); struct hd_struct *part; int cpu; cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, req->sector); + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); @@ -1719,38 +1702,226 @@ static void blk_account_io_done(struct request *req) } /** - * __end_that_request_first - end I/O on a request + * blk_peek_request - peek at the top of a request queue + * @q: request queue to peek at + * + * Description: + * Return the request at the top of @q. The returned request + * should be started using blk_start_request() before LLD starts + * processing it. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_peek_request(struct request_queue *q) +{ + struct request *rq; + int ret; + + while ((rq = __elv_next_request(q)) != NULL) { + if (!(rq->cmd_flags & REQ_STARTED)) { + /* + * This is the first time the device driver + * sees this request (possibly after + * requeueing). Notify IO scheduler. + */ + if (blk_sorted_rq(rq)) + elv_activate_rq(q, rq); + + /* + * just mark as started even if we don't start + * it, a request that has been delayed should + * not be passed by new incoming requests + */ + rq->cmd_flags |= REQ_STARTED; + trace_block_rq_issue(q, rq); + } + + if (!q->boundary_rq || q->boundary_rq == rq) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = NULL; + } + + if (rq->cmd_flags & REQ_DONTPREP) + break; + + if (q->dma_drain_size && blk_rq_bytes(rq)) { + /* + * make sure space for the drain appears we + * know we can do this because max_hw_segments + * has been adjusted to be one fewer than the + * device can handle + */ + rq->nr_phys_segments++; + } + + if (!q->prep_rq_fn) + break; + + ret = q->prep_rq_fn(q, rq); + if (ret == BLKPREP_OK) { + break; + } else if (ret == BLKPREP_DEFER) { + /* + * the request may have been (partially) prepped. + * we need to keep this request in the front to + * avoid resource deadlock. REQ_STARTED will + * prevent other fs requests from passing this one. + */ + if (q->dma_drain_size && blk_rq_bytes(rq) && + !(rq->cmd_flags & REQ_DONTPREP)) { + /* + * remove the space for the drain we added + * so that we don't add it again + */ + --rq->nr_phys_segments; + } + + rq = NULL; + break; + } else if (ret == BLKPREP_KILL) { + rq->cmd_flags |= REQ_QUIET; + /* + * Mark this request as started so we don't trigger + * any debug logic in the end I/O path. + */ + blk_start_request(rq); + __blk_end_request_all(rq, -EIO); + } else { + printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); + break; + } + } + + return rq; +} +EXPORT_SYMBOL(blk_peek_request); + +void blk_dequeue_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + BUG_ON(list_empty(&rq->queuelist)); + BUG_ON(ELV_ON_HASH(rq)); + + list_del_init(&rq->queuelist); + + /* + * the time frame between a request being removed from the lists + * and to it is freed is accounted as io that is in progress at + * the driver side. + */ + if (blk_account_rq(rq)) + q->in_flight[rq_is_sync(rq)]++; +} + +/** + * blk_start_request - start request processing on the driver + * @req: request to dequeue + * + * Description: + * Dequeue @req and start timeout timer on it. This hands off the + * request to the driver. + * + * Block internal functions which don't want to start timer should + * call blk_dequeue_request(). + * + * Context: + * queue_lock must be held. + */ +void blk_start_request(struct request *req) +{ + blk_dequeue_request(req); + + /* + * We are now handing the request to the hardware, initialize + * resid_len to full count and add the timeout handler. + */ + req->resid_len = blk_rq_bytes(req); + if (unlikely(blk_bidi_rq(req))) + req->next_rq->resid_len = blk_rq_bytes(req->next_rq); + + blk_add_timer(req); +} +EXPORT_SYMBOL(blk_start_request); + +/** + * blk_fetch_request - fetch a request from a request queue + * @q: request queue to fetch a request from + * + * Description: + * Return the request at the top of @q. The request is started on + * return and LLD can start processing it immediately. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_fetch_request(struct request_queue *q) +{ + struct request *rq; + + rq = blk_peek_request(q); + if (rq) + blk_start_request(rq); + return rq; +} +EXPORT_SYMBOL(blk_fetch_request); + +/** + * blk_update_request - Special helper function for request stacking drivers * @req: the request being processed * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete + * @nr_bytes: number of bytes to complete @req * * Description: - * Ends I/O on a number of bytes attached to @req, and sets it up - * for the next range of segments (if any) in the cluster. + * Ends I/O on a number of bytes attached to @req, but doesn't complete + * the request structure even if @req doesn't have leftover. + * If @req has leftover, sets it up for the next range of segments. + * + * This special helper function is only for request stacking drivers + * (e.g. request-based dm) so that they can handle partial completion. + * Actual device drivers should use blk_end_request instead. + * + * Passing the result of blk_rq_bytes() as @nr_bytes guarantees + * %false return from this function. * * Return: - * %0 - we are done with this request, call end_that_request_last() - * %1 - still buffers pending for this request + * %false - this request doesn't have any more data + * %true - this request has more data **/ -static int __end_that_request_first(struct request *req, int error, - int nr_bytes) +bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) { int total_bytes, bio_nbytes, next_idx = 0; struct bio *bio; + if (!req->bio) + return false; + trace_block_rq_complete(req->q, req); /* - * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual - * sense key with us all the way through + * For fs requests, rq is just carrier of independent bio's + * and each partial completion should be handled separately. + * Reset per-request error on each partial completion. + * + * TODO: tj: This is too subtle. It would be better to let + * low level drivers do what they see fit. */ - if (!blk_pc_request(req)) + if (blk_fs_request(req)) req->errors = 0; if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", req->rq_disk ? req->rq_disk->disk_name : "?", - (unsigned long long)req->sector); + (unsigned long long)blk_rq_pos(req)); } blk_account_io_completion(req, nr_bytes); @@ -1810,8 +1981,15 @@ static int __end_that_request_first(struct request *req, int error, /* * completely done */ - if (!req->bio) - return 0; + if (!req->bio) { + /* + * Reset counters so that the request stacking driver + * can find how many bytes remain in the request + * later. + */ + req->__data_len = 0; + return false; + } /* * if the request wasn't completed, update state @@ -1823,21 +2001,55 @@ static int __end_that_request_first(struct request *req, int error, bio_iovec(bio)->bv_len -= nr_bytes; } - blk_recalc_rq_sectors(req, total_bytes >> 9); + req->__data_len -= total_bytes; + req->buffer = bio_data(req->bio); + + /* update sector only for requests with clear definition of sector */ + if (blk_fs_request(req) || blk_discard_rq(req)) + req->__sector += total_bytes >> 9; + + /* + * If total number of sectors is less than the first segment + * size, something has gone terribly wrong. + */ + if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { + printk(KERN_ERR "blk: request botched\n"); + req->__data_len = blk_rq_cur_bytes(req); + } + + /* recalculate the number of segments */ blk_recalc_rq_segments(req); - return 1; + + return true; +} +EXPORT_SYMBOL_GPL(blk_update_request); + +static bool blk_update_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, + unsigned int bidi_bytes) +{ + if (blk_update_request(rq, error, nr_bytes)) + return true; + + /* Bidi request must be completed as a whole */ + if (unlikely(blk_bidi_rq(rq)) && + blk_update_request(rq->next_rq, error, bidi_bytes)) + return true; + + add_disk_randomness(rq->rq_disk); + + return false; } /* * queue lock must be held */ -static void end_that_request_last(struct request *req, int error) +static void blk_finish_request(struct request *req, int error) { if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); - if (blk_queued_rq(req)) - elv_dequeue_request(req->q, req); + BUG_ON(blk_queued_rq(req)); if (unlikely(laptop_mode) && blk_fs_request(req)) laptop_io_completion(); @@ -1857,117 +2069,62 @@ static void end_that_request_last(struct request *req, int error) } /** - * blk_rq_bytes - Returns bytes left to complete in the entire request - * @rq: the request being processed - **/ -unsigned int blk_rq_bytes(struct request *rq) -{ - if (blk_fs_request(rq)) - return rq->hard_nr_sectors << 9; - - return rq->data_len; -} -EXPORT_SYMBOL_GPL(blk_rq_bytes); - -/** - * blk_rq_cur_bytes - Returns bytes left to complete in the current segment - * @rq: the request being processed - **/ -unsigned int blk_rq_cur_bytes(struct request *rq) -{ - if (blk_fs_request(rq)) - return rq->current_nr_sectors << 9; - - if (rq->bio) - return rq->bio->bi_size; - - return rq->data_len; -} -EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); - -/** - * end_request - end I/O on the current segment of the request - * @req: the request being processed - * @uptodate: error value or %0/%1 uptodate flag + * blk_end_bidi_request - Complete a bidi request + * @rq: the request to complete + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @rq + * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: - * Ends I/O on the current segment of a request. If that is the only - * remaining segment, the request is also completed and freed. - * - * This is a remnant of how older block drivers handled I/O completions. - * Modern drivers typically end I/O on the full request in one go, unless - * they have a residual value to account for. For that case this function - * isn't really useful, unless the residual just happens to be the - * full current segment. In other words, don't use this function in new - * code. Use blk_end_request() or __blk_end_request() to end a request. + * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. + * Drivers that supports bidi can safely call this member for any + * type of request, bidi or uni. In the later case @bidi_bytes is + * just ignored. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request **/ -void end_request(struct request *req, int uptodate) -{ - int error = 0; - - if (uptodate <= 0) - error = uptodate ? uptodate : -EIO; - - __blk_end_request(req, error, req->hard_cur_sectors << 9); -} -EXPORT_SYMBOL(end_request); - -static int end_that_request_data(struct request *rq, int error, +static bool blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes) { - if (rq->bio) { - if (__end_that_request_first(rq, error, nr_bytes)) - return 1; + struct request_queue *q = rq->q; + unsigned long flags; - /* Bidi request must be completed as a whole */ - if (blk_bidi_rq(rq) && - __end_that_request_first(rq->next_rq, error, bidi_bytes)) - return 1; - } + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; - return 0; + spin_lock_irqsave(q->queue_lock, flags); + blk_finish_request(rq, error); + spin_unlock_irqrestore(q->queue_lock, flags); + + return false; } /** - * blk_end_io - Generic end_io function to complete a request. - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete @rq - * @bidi_bytes: number of bytes to complete @rq->next_rq - * @drv_callback: function called between completion of bios in the request - * and completion of the request. - * If the callback returns non %0, this helper returns without - * completion of the request. + * __blk_end_bidi_request - Complete a bidi request with queue lock held + * @rq: the request to complete + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @rq + * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: - * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. - * If @rq has leftover, sets it up for the next range of segments. + * Identical to blk_end_bidi_request() except that queue lock is + * assumed to be locked on entry and remains so on return. * * Return: - * %0 - we are done with this request - * %1 - this request is not freed yet, it still has pending buffers. + * %false - we are done with this request + * %true - still buffers pending for this request **/ -static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, - unsigned int bidi_bytes, - int (drv_callback)(struct request *)) +static bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) { - struct request_queue *q = rq->q; - unsigned long flags = 0UL; - - if (end_that_request_data(rq, error, nr_bytes, bidi_bytes)) - return 1; - - /* Special feature for tricky drivers */ - if (drv_callback && drv_callback(rq)) - return 1; + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; - add_disk_randomness(rq->rq_disk); + blk_finish_request(rq, error); - spin_lock_irqsave(q->queue_lock, flags); - end_that_request_last(rq, error); - spin_unlock_irqrestore(q->queue_lock, flags); - - return 0; + return false; } /** @@ -1981,124 +2138,112 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, * If @rq has leftover, sets it up for the next range of segments. * * Return: - * %0 - we are done with this request - * %1 - still buffers pending for this request + * %false - we are done with this request + * %true - still buffers pending for this request **/ -int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { - return blk_end_io(rq, error, nr_bytes, 0, NULL); + return blk_end_bidi_request(rq, error, nr_bytes, 0); } EXPORT_SYMBOL_GPL(blk_end_request); /** - * __blk_end_request - Helper function for drivers to complete the request. - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete + * blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @error: %0 for success, < %0 for error * * Description: - * Must be called with queue lock held unlike blk_end_request(). - * - * Return: - * %0 - we are done with this request - * %1 - still buffers pending for this request - **/ -int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) + * Completely finish @rq. + */ +void blk_end_request_all(struct request *rq, int error) { - if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) - return 1; + bool pending; + unsigned int bidi_bytes = 0; - add_disk_randomness(rq->rq_disk); + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); - end_that_request_last(rq, error); + pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL_GPL(blk_end_request_all); - return 0; +/** + * blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @error: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool blk_end_request_cur(struct request *rq, int error) +{ + return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } -EXPORT_SYMBOL_GPL(__blk_end_request); +EXPORT_SYMBOL_GPL(blk_end_request_cur); /** - * blk_end_bidi_request - Helper function for drivers to complete bidi request. - * @rq: the bidi request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete @rq - * @bidi_bytes: number of bytes to complete @rq->next_rq + * __blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete * * Description: - * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. + * Must be called with queue lock held unlike blk_end_request(). * * Return: - * %0 - we are done with this request - * %1 - still buffers pending for this request + * %false - we are done with this request + * %true - still buffers pending for this request **/ -int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, - unsigned int bidi_bytes) +bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { - return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); + return __blk_end_bidi_request(rq, error, nr_bytes, 0); } -EXPORT_SYMBOL_GPL(blk_end_bidi_request); +EXPORT_SYMBOL_GPL(__blk_end_request); /** - * blk_update_request - Special helper function for request stacking drivers - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete @rq + * __blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @error: %0 for success, < %0 for error * * Description: - * Ends I/O on a number of bytes attached to @rq, but doesn't complete - * the request structure even if @rq doesn't have leftover. - * If @rq has leftover, sets it up for the next range of segments. - * - * This special helper function is only for request stacking drivers - * (e.g. request-based dm) so that they can handle partial completion. - * Actual device drivers should use blk_end_request instead. + * Completely finish @rq. Must be called with queue lock held. */ -void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) +void __blk_end_request_all(struct request *rq, int error) { - if (!end_that_request_data(rq, error, nr_bytes, 0)) { - /* - * These members are not updated in end_that_request_data() - * when all bios are completed. - * Update them so that the request stacking driver can find - * how many bytes remain in the request later. - */ - rq->nr_sectors = rq->hard_nr_sectors = 0; - rq->current_nr_sectors = rq->hard_cur_sectors = 0; - } + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); } -EXPORT_SYMBOL_GPL(blk_update_request); +EXPORT_SYMBOL_GPL(__blk_end_request_all); /** - * blk_end_request_callback - Special helper function for tricky drivers - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete - * @drv_callback: function called between completion of bios in the request - * and completion of the request. - * If the callback returns non %0, this helper returns without - * completion of the request. + * __blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @error: %0 for success, < %0 for error * * Description: - * Ends I/O on a number of bytes attached to @rq. - * If @rq has leftover, sets it up for the next range of segments. - * - * This special helper function is used only for existing tricky drivers. - * (e.g. cdrom_newpc_intr() of ide-cd) - * This interface will be removed when such drivers are rewritten. - * Don't use this interface in other places anymore. + * Complete the current consecutively mapped chunk from @rq. Must + * be called with queue lock held. * * Return: - * %0 - we are done with this request - * %1 - this request is not freed yet. - * this request still has pending buffers or - * the driver doesn't want to finish this request yet. - **/ -int blk_end_request_callback(struct request *rq, int error, - unsigned int nr_bytes, - int (drv_callback)(struct request *)) + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool __blk_end_request_cur(struct request *rq, int error) { - return blk_end_io(rq, error, nr_bytes, 0, drv_callback); + return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } -EXPORT_SYMBOL_GPL(blk_end_request_callback); +EXPORT_SYMBOL_GPL(__blk_end_request_cur); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) @@ -2111,11 +2256,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->nr_phys_segments = bio_phys_segments(q, bio); rq->buffer = bio_data(bio); } - rq->current_nr_sectors = bio_cur_sectors(bio); - rq->hard_cur_sectors = rq->current_nr_sectors; - rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); - rq->data_len = bio->bi_size; - + rq->__data_len = bio->bi_size; rq->bio = rq->biotail = bio; if (bio->bi_bdev) @@ -2150,6 +2291,106 @@ int blk_lld_busy(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_lld_busy); +/** + * blk_rq_unprep_clone - Helper function to free all bios in a cloned request + * @rq: the clone request to be cleaned up + * + * Description: + * Free all bios in @rq for a cloned request. + */ +void blk_rq_unprep_clone(struct request *rq) +{ + struct bio *bio; + + while ((bio = rq->bio) != NULL) { + rq->bio = bio->bi_next; + + bio_put(bio); + } +} +EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); + +/* + * Copy attributes of the original request to the clone request. + * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. + */ +static void __blk_rq_prep_clone(struct request *dst, struct request *src) +{ + dst->cpu = src->cpu; + dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); + dst->cmd_type = src->cmd_type; + dst->__sector = blk_rq_pos(src); + dst->__data_len = blk_rq_bytes(src); + dst->nr_phys_segments = src->nr_phys_segments; + dst->ioprio = src->ioprio; + dst->extra_len = src->extra_len; +} + +/** + * blk_rq_prep_clone - Helper function to setup clone request + * @rq: the request to be setup + * @rq_src: original request to be cloned + * @bs: bio_set that bios for clone are allocated from + * @gfp_mask: memory allocation mask for bio + * @bio_ctr: setup function to be called for each clone bio. + * Returns %0 for success, non %0 for failure. + * @data: private data to be passed to @bio_ctr + * + * Description: + * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. + * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) + * are not copied, and copying such parts is the caller's responsibility. + * Also, pages which the original bios are pointing to are not copied + * and the cloned bios just point same pages. + * So cloned bios must be completed before original bios, which means + * the caller must complete @rq before @rq_src. + */ +int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data) +{ + struct bio *bio, *bio_src; + + if (!bs) + bs = fs_bio_set; + + blk_rq_init(NULL, rq); + + __rq_for_each_bio(bio_src, rq_src) { + bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); + if (!bio) + goto free_and_out; + + __bio_clone(bio, bio_src); + + if (bio_integrity(bio_src) && + bio_integrity_clone(bio, bio_src, gfp_mask)) + goto free_and_out; + + if (bio_ctr && bio_ctr(bio, bio_src, data)) + goto free_and_out; + + if (rq->bio) { + rq->biotail->bi_next = bio; + rq->biotail = bio; + } else + rq->bio = rq->biotail = bio; + } + + __blk_rq_prep_clone(rq, rq_src); + + return 0; + +free_and_out: + if (bio) + bio_free(bio, bs); + blk_rq_unprep_clone(rq); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(blk_rq_prep_clone); + int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) { return queue_work(kblockd_workqueue, work); @@ -2158,6 +2399,9 @@ EXPORT_SYMBOL(kblockd_schedule_work); int __init blk_dev_init(void) { + BUILD_BUG_ON(__REQ_NR_BITS > 8 * + sizeof(((struct request *)0)->cmd_flags)); + kblockd_workqueue = create_workqueue("kblockd"); if (!kblockd_workqueue) panic("Failed to create kblockd\n"); diff --git a/block/blk-exec.c b/block/blk-exec.c index 6af716d1e54e..49557e91f0da 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; rq->rq_disk = bd_disk; - rq->cmd_flags |= REQ_NOMERGE; rq->end_io = done; WARN_ON(irqs_disabled()); spin_lock_irq(q->queue_lock); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 91fa8e06b6a5..73e28d355688 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) kobject_uevent(&bi->kobj, KOBJ_ADD); bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; - bi->sector_size = disk->queue->hardsect_size; + bi->sector_size = queue_logical_block_size(disk->queue); disk->integrity = bi; } else bi = disk->integrity; diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 012f065ac8e2..d4ed6000147d 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc) if (ioc == NULL) return 1; - BUG_ON(atomic_read(&ioc->refcount) == 0); + BUG_ON(atomic_long_read(&ioc->refcount) == 0); - if (atomic_dec_and_test(&ioc->refcount)) { + if (atomic_long_dec_and_test(&ioc->refcount)) { rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); @@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); if (ret) { - atomic_set(&ret->refcount, 1); + atomic_long_set(&ret->refcount, 1); atomic_set(&ret->nr_tasks, 1); spin_lock_init(&ret->lock); ret->ioprio_changed = 0; @@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node) ret = current_io_context(gfp_flags, node); if (unlikely(!ret)) break; - } while (!atomic_inc_not_zero(&ret->refcount)); + } while (!atomic_long_inc_not_zero(&ret->refcount)); return ret; } @@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc) struct io_context *dst = *pdst; if (src) { - BUG_ON(atomic_read(&src->refcount) == 0); - atomic_inc(&src->refcount); + BUG_ON(atomic_long_read(&src->refcount) == 0); + atomic_long_inc(&src->refcount); put_io_context(dst); *pdst = src; } diff --git a/block/blk-map.c b/block/blk-map.c index f103729b462f..9083cf0180cc 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, rq->biotail->bi_next = bio; rq->biotail = bio; - rq->data_len += bio->bi_size; + rq->__data_len += bio->bi_size; } return 0; } -EXPORT_SYMBOL(blk_rq_append_bio); static int __blk_rq_unmap_user(struct bio *bio) { @@ -116,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, struct bio *bio = NULL; int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len) return -EINVAL; @@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); @@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * * Description: * Data will be mapped directly if possible. Otherwise a bounce - * buffer is used. + * buffer is used. Can be called multple times to append multple + * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) @@ -290,8 +290,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; int do_copy = 0; struct bio *bio; + int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; @@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->cmd_flags |= REQ_COPY_USER; - blk_rq_bio_prep(q, rq, bio); + ret = blk_rq_append_bio(q, rq, bio); + if (unlikely(ret)) { + /* request is too big */ + bio_put(bio); + return ret; + } + blk_queue_bounce(q, &rq->bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/block/blk-merge.c b/block/blk-merge.c index 23d2a6fe34a3..39ce64432ba6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -9,35 +9,6 @@ #include "blk.h" -void blk_recalc_rq_sectors(struct request *rq, int nsect) -{ - if (blk_fs_request(rq) || blk_discard_rq(rq)) { - rq->hard_sector += nsect; - rq->hard_nr_sectors -= nsect; - - /* - * Move the I/O submission pointers ahead if required. - */ - if ((rq->nr_sectors >= rq->hard_nr_sectors) && - (rq->sector <= rq->hard_sector)) { - rq->sector = rq->hard_sector; - rq->nr_sectors = rq->hard_nr_sectors; - rq->hard_cur_sectors = bio_cur_sectors(rq->bio); - rq->current_nr_sectors = rq->hard_cur_sectors; - rq->buffer = bio_data(rq->bio); - } - - /* - * if total number of sectors is less than the first segment - * size, something has gone terribly wrong - */ - if (rq->nr_sectors < rq->current_nr_sectors) { - printk(KERN_ERR "blk: request botched\n"); - rq->nr_sectors = rq->current_nr_sectors; - } - } -} - static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio) { @@ -61,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, * never considered part of another segment, since that * might change with the bounce page. */ - high = page_to_pfn(bv->bv_page) > q->bounce_pfn; + high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); if (high || highprv) goto new_segment; if (cluster) { - if (seg_size + bv->bv_len > q->max_segment_size) + if (seg_size + bv->bv_len + > queue_max_segment_size(q)) goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) goto new_segment; @@ -120,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, return 0; if (bio->bi_seg_back_size + nxt->bi_seg_front_size > - q->max_segment_size) + queue_max_segment_size(q)) return 0; if (!bio_has_data(bio)) @@ -163,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, int nbytes = bvec->bv_len; if (bvprv && cluster) { - if (sg->length + nbytes > q->max_segment_size) + if (sg->length + nbytes > queue_max_segment_size(q)) goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) @@ -199,8 +171,9 @@ new_segment: if (unlikely(rq->cmd_flags & REQ_COPY_USER) && - (rq->data_len & q->dma_pad_mask)) { - unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; + (blk_rq_bytes(rq) & q->dma_pad_mask)) { + unsigned int pad_len = + (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; sg->length += pad_len; rq->extra_len += pad_len; @@ -233,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q, { int nr_phys_segs = bio_phys_segments(q, bio); - if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments - || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { + if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || + req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) { req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -255,11 +228,11 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, unsigned short max_sectors; if (unlikely(blk_pc_request(req))) - max_sectors = q->max_hw_sectors; + max_sectors = queue_max_hw_sectors(q); else - max_sectors = q->max_sectors; + max_sectors = queue_max_sectors(q); - if (req->nr_sectors + bio_sectors(bio) > max_sectors) { + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -279,12 +252,12 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, unsigned short max_sectors; if (unlikely(blk_pc_request(req))) - max_sectors = q->max_hw_sectors; + max_sectors = queue_max_hw_sectors(q); else - max_sectors = q->max_sectors; + max_sectors = queue_max_sectors(q); - if (req->nr_sectors + bio_sectors(bio) > max_sectors) { + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -315,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, /* * Will it become too large? */ - if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) + if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; @@ -327,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, total_phys_segments--; } - if (total_phys_segments > q->max_phys_segments) + if (total_phys_segments > queue_max_phys_segments(q)) return 0; - if (total_phys_segments > q->max_hw_segments) + if (total_phys_segments > queue_max_hw_segments(q)) return 0; /* Merge is OK... */ @@ -345,7 +318,7 @@ static void blk_account_io_merge(struct request *req) int cpu; cpu = part_stat_lock(); - part = disk_map_sector_rcu(req->rq_disk, req->sector); + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); part_round_stats(cpu, part); part_dec_in_flight(part); @@ -366,7 +339,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, /* * not contiguous */ - if (req->sector + req->nr_sectors != next->sector) + if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) return 0; if (rq_data_dir(req) != rq_data_dir(next) @@ -398,7 +371,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, req->biotail->bi_next = next->bio; req->biotail = next->biotail; - req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; + req->__data_len += blk_rq_bytes(next); elv_merge_requests(q, req, next); diff --git a/block/blk-settings.c b/block/blk-settings.c index 57af728d94bb..bd582a7f5310 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -96,6 +96,31 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) EXPORT_SYMBOL_GPL(blk_queue_lld_busy); /** + * blk_set_default_limits - reset limits to default values + * @lim: the queue_limits structure to reset + * + * Description: + * Returns a queue_limit struct to its default state. Can be used by + * stacking drivers like DM that stage table swaps and reuse an + * existing device queue. + */ +void blk_set_default_limits(struct queue_limits *lim) +{ + lim->max_phys_segments = MAX_PHYS_SEGMENTS; + lim->max_hw_segments = MAX_HW_SEGMENTS; + lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + lim->max_segment_size = MAX_SEGMENT_SIZE; + lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; + lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; + lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); + lim->alignment_offset = 0; + lim->io_opt = 0; + lim->misaligned = 0; + lim->no_cluster = 0; +} +EXPORT_SYMBOL(blk_set_default_limits); + +/** * blk_queue_make_request - define an alternate make_request function for a device * @q: the request queue for the device to be affected * @mfn: the alternate make_request function @@ -123,18 +148,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) * set defaults */ q->nr_requests = BLKDEV_MAX_RQ; - blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); - blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); - blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); - blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); q->make_request_fn = mfn; - q->backing_dev_info.ra_pages = - (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; - q->backing_dev_info.state = 0; - q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; - blk_queue_max_sectors(q, SAFE_MAX_SECTORS); - blk_queue_hardsect_size(q, 512); blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); q->nr_batching = BLK_BATCH_REQ; @@ -147,6 +162,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) q->unplug_timer.function = blk_unplug_timeout; q->unplug_timer.data = (unsigned long)q; + blk_set_default_limits(&q->limits); + /* * by default assume old behaviour and bounce for any highmem page */ @@ -179,16 +196,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) */ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; - q->bounce_pfn = max_low_pfn; + q->limits.bounce_pfn = max_low_pfn; #else if (b_pfn < blk_max_low_pfn) dma = 1; - q->bounce_pfn = b_pfn; + q->limits.bounce_pfn = b_pfn; #endif if (dma) { init_emergency_isa_pool(); q->bounce_gfp = GFP_NOIO | GFP_DMA; - q->bounce_pfn = b_pfn; + q->limits.bounce_pfn = b_pfn; } } EXPORT_SYMBOL(blk_queue_bounce_limit); @@ -211,14 +228,23 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) } if (BLK_DEF_MAX_SECTORS > max_sectors) - q->max_hw_sectors = q->max_sectors = max_sectors; + q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; else { - q->max_sectors = BLK_DEF_MAX_SECTORS; - q->max_hw_sectors = max_sectors; + q->limits.max_sectors = BLK_DEF_MAX_SECTORS; + q->limits.max_hw_sectors = max_sectors; } } EXPORT_SYMBOL(blk_queue_max_sectors); +void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) +{ + if (BLK_DEF_MAX_SECTORS > max_sectors) + q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS; + else + q->limits.max_hw_sectors = max_sectors; +} +EXPORT_SYMBOL(blk_queue_max_hw_sectors); + /** * blk_queue_max_phys_segments - set max phys segments for a request for this queue * @q: the request queue for the device @@ -238,7 +264,7 @@ void blk_queue_max_phys_segments(struct request_queue *q, __func__, max_segments); } - q->max_phys_segments = max_segments; + q->limits.max_phys_segments = max_segments; } EXPORT_SYMBOL(blk_queue_max_phys_segments); @@ -262,7 +288,7 @@ void blk_queue_max_hw_segments(struct request_queue *q, __func__, max_segments); } - q->max_hw_segments = max_segments; + q->limits.max_hw_segments = max_segments; } EXPORT_SYMBOL(blk_queue_max_hw_segments); @@ -283,26 +309,110 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) __func__, max_size); } - q->max_segment_size = max_size; + q->limits.max_segment_size = max_size; } EXPORT_SYMBOL(blk_queue_max_segment_size); /** - * blk_queue_hardsect_size - set hardware sector size for the queue + * blk_queue_logical_block_size - set logical block size for the queue * @q: the request queue for the device - * @size: the hardware sector size, in bytes + * @size: the logical block size, in bytes * * Description: - * This should typically be set to the lowest possible sector size - * that the hardware can operate on (possible without reverting to - * even internal read-modify-write operations). Usually the default - * of 512 covers most hardware. + * This should be set to the lowest possible block size that the + * storage device can address. The default of 512 covers most + * hardware. **/ -void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) +void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) +{ + q->limits.logical_block_size = size; + + if (q->limits.physical_block_size < size) + q->limits.physical_block_size = size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_logical_block_size); + +/** + * blk_queue_physical_block_size - set physical block size for the queue + * @q: the request queue for the device + * @size: the physical block size, in bytes + * + * Description: + * This should be set to the lowest possible sector size that the + * hardware can operate on without reverting to read-modify-write + * operations. + */ +void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) +{ + q->limits.physical_block_size = size; + + if (q->limits.physical_block_size < q->limits.logical_block_size) + q->limits.physical_block_size = q->limits.logical_block_size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_physical_block_size); + +/** + * blk_queue_alignment_offset - set physical block alignment offset + * @q: the request queue for the device + * @offset: alignment offset in bytes + * + * Description: + * Some devices are naturally misaligned to compensate for things like + * the legacy DOS partition table 63-sector offset. Low-level drivers + * should call this function for devices whose first sector is not + * naturally aligned. + */ +void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) { - q->hardsect_size = size; + q->limits.alignment_offset = + offset & (q->limits.physical_block_size - 1); + q->limits.misaligned = 0; } -EXPORT_SYMBOL(blk_queue_hardsect_size); +EXPORT_SYMBOL(blk_queue_alignment_offset); + +/** + * blk_queue_io_min - set minimum request size for the queue + * @q: the request queue for the device + * @min: smallest I/O size in bytes + * + * Description: + * Some devices have an internal block size bigger than the reported + * hardware sector size. This function can be used to signal the + * smallest I/O the device can perform without incurring a performance + * penalty. + */ +void blk_queue_io_min(struct request_queue *q, unsigned int min) +{ + q->limits.io_min = min; + + if (q->limits.io_min < q->limits.logical_block_size) + q->limits.io_min = q->limits.logical_block_size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_io_min); + +/** + * blk_queue_io_opt - set optimal request size for the queue + * @q: the request queue for the device + * @opt: optimal request size in bytes + * + * Description: + * Drivers can call this function to set the preferred I/O request + * size for devices that report such a value. + */ +void blk_queue_io_opt(struct request_queue *q, unsigned int opt) +{ + q->limits.io_opt = opt; +} +EXPORT_SYMBOL(blk_queue_io_opt); /* * Returns the minimum that is _not_ zero, unless both are zero. @@ -317,14 +427,27 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { /* zero is "infinity" */ - t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); - t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); - t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); + t->limits.max_sectors = min_not_zero(queue_max_sectors(t), + queue_max_sectors(b)); + + t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t), + queue_max_hw_sectors(b)); + + t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t), + queue_segment_boundary(b)); + + t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t), + queue_max_phys_segments(b)); + + t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t), + queue_max_hw_segments(b)); + + t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t), + queue_max_segment_size(b)); + + t->limits.logical_block_size = max(queue_logical_block_size(t), + queue_logical_block_size(b)); - t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); - t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); - t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); - t->hardsect_size = max(t->hardsect_size, b->hardsect_size); if (!t->queue_lock) WARN_ON_ONCE(1); else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { @@ -337,6 +460,109 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) EXPORT_SYMBOL(blk_queue_stack_limits); /** + * blk_stack_limits - adjust queue_limits for stacked devices + * @t: the stacking driver limits (top) + * @b: the underlying queue limits (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges two queue_limit structs. Returns 0 if alignment didn't + * change. Returns -1 if adding the bottom device caused + * misalignment. + */ +int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset) +{ + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); + t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); + + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, + b->seg_boundary_mask); + + t->max_phys_segments = min_not_zero(t->max_phys_segments, + b->max_phys_segments); + + t->max_hw_segments = min_not_zero(t->max_hw_segments, + b->max_hw_segments); + + t->max_segment_size = min_not_zero(t->max_segment_size, + b->max_segment_size); + + t->logical_block_size = max(t->logical_block_size, + b->logical_block_size); + + t->physical_block_size = max(t->physical_block_size, + b->physical_block_size); + + t->io_min = max(t->io_min, b->io_min); + t->no_cluster |= b->no_cluster; + + /* Bottom device offset aligned? */ + if (offset && + (offset & (b->physical_block_size - 1)) != b->alignment_offset) { + t->misaligned = 1; + return -1; + } + + /* If top has no alignment offset, inherit from bottom */ + if (!t->alignment_offset) + t->alignment_offset = + b->alignment_offset & (b->physical_block_size - 1); + + /* Top device aligned on logical block boundary? */ + if (t->alignment_offset & (t->logical_block_size - 1)) { + t->misaligned = 1; + return -1; + } + + return 0; +} +EXPORT_SYMBOL(blk_stack_limits); + +/** + * disk_stack_limits - adjust queue limits for stacked drivers + * @disk: MD/DM gendisk (top) + * @bdev: the underlying block device (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges the limits for two queues. Returns 0 if alignment + * didn't change. Returns -1 if adding the bottom device caused + * misalignment. + */ +void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset) +{ + struct request_queue *t = disk->queue; + struct request_queue *b = bdev_get_queue(bdev); + + offset += get_start_sect(bdev) << 9; + + if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { + char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; + + disk_name(disk, 0, top); + bdevname(bdev, bottom); + + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", + top, bottom); + } + + if (!t->queue_lock) + WARN_ON_ONCE(1); + else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { + unsigned long flags; + + spin_lock_irqsave(t->queue_lock, flags); + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + queue_flag_clear(QUEUE_FLAG_CLUSTER, t); + spin_unlock_irqrestore(t->queue_lock, flags); + } +} +EXPORT_SYMBOL(disk_stack_limits); + +/** * blk_queue_dma_pad - set pad mask * @q: the request queue for the device * @mask: pad mask @@ -396,11 +622,11 @@ int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size) { - if (q->max_hw_segments < 2 || q->max_phys_segments < 2) + if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) return -EINVAL; /* make room for appending the drain */ - --q->max_hw_segments; - --q->max_phys_segments; + blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); + blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); q->dma_drain_needed = dma_drain_needed; q->dma_drain_buffer = buf; q->dma_drain_size = size; @@ -422,7 +648,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) __func__, mask); } - q->seg_boundary_mask = mask; + q->limits.seg_boundary_mask = mask; } EXPORT_SYMBOL(blk_queue_segment_boundary); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3ff9bba3379a..b1cd04087d6a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -95,21 +95,36 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) { - int max_sectors_kb = q->max_sectors >> 1; + int max_sectors_kb = queue_max_sectors(q) >> 1; return queue_var_show(max_sectors_kb, (page)); } -static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) +static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) { - return queue_var_show(q->hardsect_size, page); + return queue_var_show(queue_logical_block_size(q), page); +} + +static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_physical_block_size(q), page); +} + +static ssize_t queue_io_min_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_min(q), page); +} + +static ssize_t queue_io_opt_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_opt(q), page); } static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, - max_hw_sectors_kb = q->max_hw_sectors >> 1, + max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, page_kb = 1 << (PAGE_CACHE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); @@ -117,7 +132,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) return -EINVAL; spin_lock_irq(q->queue_lock); - q->max_sectors = max_sectors_kb << 1; + blk_queue_max_sectors(q, max_sectors_kb << 1); spin_unlock_irq(q->queue_lock); return ret; @@ -125,7 +140,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) { - int max_hw_sectors_kb = q->max_hw_sectors >> 1; + int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; return queue_var_show(max_hw_sectors_kb, (page)); } @@ -249,7 +264,27 @@ static struct queue_sysfs_entry queue_iosched_entry = { static struct queue_sysfs_entry queue_hw_sector_size_entry = { .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, - .show = queue_hw_sector_size_show, + .show = queue_logical_block_size_show, +}; + +static struct queue_sysfs_entry queue_logical_block_size_entry = { + .attr = {.name = "logical_block_size", .mode = S_IRUGO }, + .show = queue_logical_block_size_show, +}; + +static struct queue_sysfs_entry queue_physical_block_size_entry = { + .attr = {.name = "physical_block_size", .mode = S_IRUGO }, + .show = queue_physical_block_size_show, +}; + +static struct queue_sysfs_entry queue_io_min_entry = { + .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, + .show = queue_io_min_show, +}; + +static struct queue_sysfs_entry queue_io_opt_entry = { + .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, + .show = queue_io_opt_show, }; static struct queue_sysfs_entry queue_nonrot_entry = { @@ -283,6 +318,10 @@ static struct attribute *default_attrs[] = { &queue_max_sectors_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, + &queue_logical_block_size_entry.attr, + &queue_physical_block_size_entry.attr, + &queue_io_min_entry.attr, + &queue_io_opt_entry.attr, &queue_nonrot_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, @@ -383,22 +422,26 @@ struct kobj_type blk_queue_ktype = { int blk_register_queue(struct gendisk *disk) { int ret; + struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; if (WARN_ON(!q)) return -ENXIO; - if (!q->request_fn) - return 0; + ret = blk_trace_init_sysfs(dev); + if (ret) + return ret; - ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj), - "%s", "queue"); + ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); if (ret < 0) return ret; kobject_uevent(&q->kobj, KOBJ_ADD); + if (!q->request_fn) + return 0; + ret = elv_register_queue(q); if (ret) { kobject_uevent(&q->kobj, KOBJ_REMOVE); diff --git a/block/blk-tag.c b/block/blk-tag.c index 3c518e3303ae..2e5cfeb59333 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; - unsigned max_depth, offset; + unsigned max_depth; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { @@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) * to starve sync IO on behalf of flooding async IO. */ max_depth = bqt->max_depth; - if (rq_is_sync(rq)) - offset = 0; - else - offset = max_depth >> 2; + if (!rq_is_sync(rq) && max_depth > 1) { + max_depth -= 2; + if (!max_depth) + max_depth = 1; + if (q->in_flight[0] > max_depth) + return 1; + } do { - tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); + tag = find_first_zero_bit(bqt->tag_map, max_depth); if (tag >= max_depth) return 1; @@ -374,7 +377,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; - blkdev_dequeue_request(rq); + blk_start_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); return 0; } diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 1ec0d503cacd..1ba7e0aca878 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -122,10 +122,8 @@ void blk_rq_timed_out_timer(unsigned long data) if (blk_mark_rq_complete(rq)) continue; blk_rq_timed_out(rq); - } else { - if (!next || time_after(next, rq->deadline)) - next = rq->deadline; - } + } else if (!next || time_after(next, rq->deadline)) + next = rq->deadline; } /* @@ -176,16 +174,14 @@ void blk_add_timer(struct request *req) BUG_ON(!list_empty(&req->timeout_list)); BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); - if (req->timeout) - req->deadline = jiffies + req->timeout; - else { - req->deadline = jiffies + q->rq_timeout; - /* - * Some LLDs, like scsi, peek at the timeout to prevent - * a command from being retried forever. - */ + /* + * Some LLDs, like scsi, peek at the timeout to prevent a + * command from being retried forever. + */ + if (!req->timeout) req->timeout = q->rq_timeout; - } + + req->deadline = jiffies + req->timeout; list_add_tail(&req->timeout_list, &q->timeout_list); /* diff --git a/block/blk.h b/block/blk.h index 79c85f7c9ff5..3fae6add5430 100644 --- a/block/blk.h +++ b/block/blk.h @@ -13,6 +13,9 @@ extern struct kobj_type blk_queue_ktype; void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); +int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio); +void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); void blk_unplug_work(struct work_struct *work); @@ -43,6 +46,43 @@ static inline void blk_clear_rq_complete(struct request *rq) clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } +/* + * Internal elevator interface + */ +#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) + +static inline struct request *__elv_next_request(struct request_queue *q) +{ + struct request *rq; + + while (1) { + while (!list_empty(&q->queue_head)) { + rq = list_entry_rq(q->queue_head.next); + if (blk_do_ordered(q, &rq)) + return rq; + } + + if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) + return NULL; + } +} + +static inline void elv_activate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_activate_req_fn) + e->ops->elevator_activate_req_fn(q, rq); +} + +static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_deactivate_req_fn) + e->ops->elevator_deactivate_req_fn(q, rq); +} + #ifdef CONFIG_FAIL_IO_TIMEOUT int blk_should_fake_timeout(struct request_queue *); ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); @@ -64,7 +104,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq); void blk_recalc_rq_segments(struct request *rq); -void blk_recalc_rq_sectors(struct request *rq, int nsect); void blk_queue_congestion_threshold(struct request_queue *q); @@ -112,9 +151,17 @@ static inline int blk_cpu_to_group(int cpu) #endif } +/* + * Contribute to IO statistics IFF: + * + * a) it's attached to a gendisk, and + * b) the queue had IO stats enabled when this request was started, and + * c) it's a file system request or a discard request + */ static inline int blk_do_io_stat(struct request *rq) { - return rq->rq_disk && blk_rq_io_stat(rq); + return rq->rq_disk && blk_rq_io_stat(rq) && + (blk_fs_request(rq) || blk_discard_rq(rq)); } #endif diff --git a/block/bsg.c b/block/bsg.c index dd81be455e00..e7d475254248 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -315,7 +315,6 @@ out: blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); - next_rq->bio = NULL; blk_put_request(next_rq); } return ERR_PTR(ret); @@ -446,15 +445,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, } if (rq->next_rq) { - hdr->dout_resid = rq->data_len; - hdr->din_resid = rq->next_rq->data_len; + hdr->dout_resid = rq->resid_len; + hdr->din_resid = rq->next_rq->resid_len; blk_rq_unmap_user(bidi_bio); - rq->next_rq->bio = NULL; blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) - hdr->din_resid = rq->data_len; + hdr->din_resid = rq->resid_len; else - hdr->dout_resid = rq->data_len; + hdr->dout_resid = rq->resid_len; /* * If the request generated a negative error number, return it @@ -468,7 +466,6 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, blk_rq_unmap_user(bio); if (rq->cmd != rq->__cmd) kfree(rq->cmd); - rq->bio = NULL; blk_put_request(rq); return ret; @@ -1065,6 +1062,11 @@ EXPORT_SYMBOL_GPL(bsg_register_queue); static struct cdev bsg_cdev; +static char *bsg_nodename(struct device *dev) +{ + return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); +} + static int __init bsg_init(void) { int ret, i; @@ -1085,6 +1087,7 @@ static int __init bsg_init(void) ret = PTR_ERR(bsg_class); goto destroy_kmemcache; } + bsg_class->nodename = bsg_nodename; ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index a55a9bd75bd1..833ec18eaa63 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -122,7 +122,6 @@ struct cfq_data { struct cfq_queue *async_idle_cfqq; sector_t last_position; - unsigned long last_end_request; /* * tunables, see top of file @@ -349,8 +348,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) return rq2; - s1 = rq1->sector; - s2 = rq2->sector; + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); last = cfqd->last_position; @@ -579,9 +578,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, * Sort strictly based on sector. Smallest to the left, * largest to the right. */ - if (sector > cfqq->next_rq->sector) + if (sector > blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_right; - else if (sector < cfqq->next_rq->sector) + else if (sector < blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_left; else break; @@ -611,8 +610,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) return; cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; - __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, - &parent, &p); + __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, + blk_rq_pos(cfqq->next_rq), &parent, &p); if (!__cfqq) { rb_link_node(&cfqq->p_node, parent, p); rb_insert_color(&cfqq->p_node, cfqq->p_root); @@ -760,7 +759,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfqd->rq_in_driver); - cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; + cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); } static void cfq_deactivate_request(struct request_queue *q, struct request *rq) @@ -949,10 +948,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, struct request *rq) { - if (rq->sector >= cfqd->last_position) - return rq->sector - cfqd->last_position; + if (blk_rq_pos(rq) >= cfqd->last_position) + return blk_rq_pos(rq) - cfqd->last_position; else - return cfqd->last_position - rq->sector; + return cfqd->last_position - blk_rq_pos(rq); } #define CIC_SEEK_THR 8 * 1024 @@ -996,7 +995,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, if (cfq_rq_close(cfqd, __cfqq->next_rq)) return __cfqq; - if (__cfqq->next_rq->sector < sector) + if (blk_rq_pos(__cfqq->next_rq) < sector) node = rb_next(&__cfqq->p_node); else node = rb_prev(&__cfqq->p_node); @@ -1253,7 +1252,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) BUG_ON(cfqd->busy_queues); - cfq_log(cfqd, "forced_dispatch=%d\n", dispatched); + cfq_log(cfqd, "forced_dispatch=%d", dispatched); return dispatched; } @@ -1282,7 +1281,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) if (!cfqd->active_cic) { struct cfq_io_context *cic = RQ_CIC(rq); - atomic_inc(&cic->ioc->refcount); + atomic_long_inc(&cic->ioc->refcount); cfqd->active_cic = cic; } } @@ -1918,10 +1917,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, if (!cic->last_request_pos) sdist = 0; - else if (cic->last_request_pos < rq->sector) - sdist = rq->sector - cic->last_request_pos; + else if (cic->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - cic->last_request_pos; else - sdist = cic->last_request_pos - rq->sector; + sdist = cic->last_request_pos - blk_rq_pos(rq); /* * Don't allow the seek distance to get too large from the @@ -2071,7 +2070,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_io_seektime(cfqd, cic, rq); cfq_update_idle_window(cfqd, cfqq, cic); - cic->last_request_pos = rq->sector + rq->nr_sectors; + cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { /* @@ -2088,7 +2087,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { del_timer(&cfqd->idle_slice_timer); - blk_start_queueing(cfqd->queue); + __blk_run_queue(cfqd->queue); } cfq_mark_cfqq_must_dispatch(cfqq); } @@ -2100,7 +2099,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); - blk_start_queueing(cfqd->queue); + __blk_run_queue(cfqd->queue); } } @@ -2164,9 +2163,6 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight--; - if (!cfq_class_idle(cfqq)) - cfqd->last_end_request = now; - if (sync) RQ_CIC(rq)->last_end_request = now; @@ -2345,7 +2341,7 @@ static void cfq_kick_queue(struct work_struct *work) struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); - blk_start_queueing(q); + __blk_run_queue(cfqd->queue); spin_unlock_irq(q->queue_lock); } @@ -2479,7 +2475,6 @@ static void *cfq_init_queue(struct request_queue *q) INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); - cfqd->last_end_request = jiffies; cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index f87615dea46b..7865a34e0faa 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -568,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) memcpy(&buts.name, &cbuts.name, 32); mutex_lock(&bdev->bd_mutex); - ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts); + ret = do_blk_trace_setup(q, b, bdev->bd_dev, bdev, &buts); mutex_unlock(&bdev->bd_mutex); if (ret) return ret; @@ -763,10 +763,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ return compat_put_int(arg, block_size(bdev)); case BLKSSZGET: /* get block device hardware sector size */ - return compat_put_int(arg, bdev_hardsect_size(bdev)); + return compat_put_int(arg, bdev_logical_block_size(bdev)); case BLKSECTGET: return compat_put_ushort(arg, - bdev_get_queue(bdev)->max_sectors); + queue_max_sectors(bdev_get_queue(bdev))); case BLKRASET: /* compatible, but no compat_ptr (!) */ case BLKFRASET: if (!capable(CAP_SYS_ADMIN)) diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index c4d991d4adef..b547cbca7b23 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); if (__rq) { - BUG_ON(sector != __rq->sector); + BUG_ON(sector != blk_rq_pos(__rq)); if (elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_FRONT_MERGE; diff --git a/block/elevator.c b/block/elevator.c index 7073a9072577..ca861927ba41 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -33,17 +33,16 @@ #include <linux/compiler.h> #include <linux/delay.h> #include <linux/blktrace_api.h> -#include <trace/block.h> #include <linux/hash.h> #include <linux/uaccess.h> +#include <trace/events/block.h> + #include "blk.h" static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); -DEFINE_TRACE(block_rq_abort); - /* * Merge hash stuff. */ @@ -52,11 +51,7 @@ static const int elv_hash_shift = 6; #define ELV_HASH_FN(sec) \ (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) #define ELV_HASH_ENTRIES (1 << elv_hash_shift) -#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) -#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) - -DEFINE_TRACE(block_rq_insert); -DEFINE_TRACE(block_rq_issue); +#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) /* * Query io scheduler to see if the current process issuing bio may be @@ -120,9 +115,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio) * we can merge and sequence is ok, check if it's possible */ if (elv_rq_merge_ok(__rq, bio)) { - if (__rq->sector + __rq->nr_sectors == bio->bi_sector) + if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) ret = ELEVATOR_BACK_MERGE; - else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) + else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) ret = ELEVATOR_FRONT_MERGE; } @@ -310,22 +305,6 @@ void elevator_exit(struct elevator_queue *e) } EXPORT_SYMBOL(elevator_exit); -static void elv_activate_rq(struct request_queue *q, struct request *rq) -{ - struct elevator_queue *e = q->elevator; - - if (e->ops->elevator_activate_req_fn) - e->ops->elevator_activate_req_fn(q, rq); -} - -static void elv_deactivate_rq(struct request_queue *q, struct request *rq) -{ - struct elevator_queue *e = q->elevator; - - if (e->ops->elevator_deactivate_req_fn) - e->ops->elevator_deactivate_req_fn(q, rq); -} - static inline void __elv_rqhash_del(struct request *rq) { hlist_del_init(&rq->hash); @@ -387,9 +366,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq) parent = *p; __rq = rb_entry(parent, struct request, rb_node); - if (rq->sector < __rq->sector) + if (blk_rq_pos(rq) < blk_rq_pos(__rq)) p = &(*p)->rb_left; - else if (rq->sector > __rq->sector) + else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) p = &(*p)->rb_right; else return __rq; @@ -417,9 +396,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector) while (n) { rq = rb_entry(n, struct request, rb_node); - if (sector < rq->sector) + if (sector < blk_rq_pos(rq)) n = n->rb_left; - else if (sector > rq->sector) + else if (sector > blk_rq_pos(rq)) n = n->rb_right; else return rq; @@ -458,14 +437,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) break; if (pos->cmd_flags & stop_flags) break; - if (rq->sector >= boundary) { - if (pos->sector < boundary) + if (blk_rq_pos(rq) >= boundary) { + if (blk_rq_pos(pos) < boundary) continue; } else { - if (pos->sector >= boundary) + if (blk_rq_pos(pos) >= boundary) break; } - if (rq->sector >= pos->sector) + if (blk_rq_pos(rq) >= blk_rq_pos(pos)) break; } @@ -563,7 +542,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) * in_flight count again */ if (blk_account_rq(rq)) { - q->in_flight--; + q->in_flight[rq_is_sync(rq)]--; if (blk_sorted_rq(rq)) elv_deactivate_rq(q, rq); } @@ -592,6 +571,9 @@ void elv_drain_elevator(struct request_queue *q) */ void elv_quiesce_start(struct request_queue *q) { + if (!q->elevator) + return; + queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); /* @@ -599,7 +581,7 @@ void elv_quiesce_start(struct request_queue *q) */ elv_drain_elevator(q); while (q->rq.elvpriv) { - blk_start_queueing(q); + __blk_run_queue(q); spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); @@ -643,8 +625,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) * with anything. There's no point in delaying queue * processing. */ - blk_remove_plug(q); - blk_start_queueing(q); + __blk_run_queue(q); break; case ELEVATOR_INSERT_SORT: @@ -703,7 +684,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) if (unplug_it && blk_queue_plugged(q)) { int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] - - q->in_flight; + - queue_in_flight(q); if (nrq >= q->unplug_thresh) __generic_unplug_device(q); @@ -759,117 +740,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where, } EXPORT_SYMBOL(elv_add_request); -static inline struct request *__elv_next_request(struct request_queue *q) -{ - struct request *rq; - - while (1) { - while (!list_empty(&q->queue_head)) { - rq = list_entry_rq(q->queue_head.next); - if (blk_do_ordered(q, &rq)) - return rq; - } - - if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) - return NULL; - } -} - -struct request *elv_next_request(struct request_queue *q) -{ - struct request *rq; - int ret; - - while ((rq = __elv_next_request(q)) != NULL) { - if (!(rq->cmd_flags & REQ_STARTED)) { - /* - * This is the first time the device driver - * sees this request (possibly after - * requeueing). Notify IO scheduler. - */ - if (blk_sorted_rq(rq)) - elv_activate_rq(q, rq); - - /* - * just mark as started even if we don't start - * it, a request that has been delayed should - * not be passed by new incoming requests - */ - rq->cmd_flags |= REQ_STARTED; - trace_block_rq_issue(q, rq); - } - - if (!q->boundary_rq || q->boundary_rq == rq) { - q->end_sector = rq_end_sector(rq); - q->boundary_rq = NULL; - } - - if (rq->cmd_flags & REQ_DONTPREP) - break; - - if (q->dma_drain_size && rq->data_len) { - /* - * make sure space for the drain appears we - * know we can do this because max_hw_segments - * has been adjusted to be one fewer than the - * device can handle - */ - rq->nr_phys_segments++; - } - - if (!q->prep_rq_fn) - break; - - ret = q->prep_rq_fn(q, rq); - if (ret == BLKPREP_OK) { - break; - } else if (ret == BLKPREP_DEFER) { - /* - * the request may have been (partially) prepped. - * we need to keep this request in the front to - * avoid resource deadlock. REQ_STARTED will - * prevent other fs requests from passing this one. - */ - if (q->dma_drain_size && rq->data_len && - !(rq->cmd_flags & REQ_DONTPREP)) { - /* - * remove the space for the drain we added - * so that we don't add it again - */ - --rq->nr_phys_segments; - } - - rq = NULL; - break; - } else if (ret == BLKPREP_KILL) { - rq->cmd_flags |= REQ_QUIET; - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); - } else { - printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); - break; - } - } - - return rq; -} -EXPORT_SYMBOL(elv_next_request); - -void elv_dequeue_request(struct request_queue *q, struct request *rq) -{ - BUG_ON(list_empty(&rq->queuelist)); - BUG_ON(ELV_ON_HASH(rq)); - - list_del_init(&rq->queuelist); - - /* - * the time frame between a request being removed from the lists - * and to it is freed is accounted as io that is in progress at - * the driver side. - */ - if (blk_account_rq(rq)) - q->in_flight++; -} - int elv_queue_empty(struct request_queue *q) { struct elevator_queue *e = q->elevator; @@ -939,7 +809,12 @@ void elv_abort_queue(struct request_queue *q) rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; trace_block_rq_abort(q, rq); - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); + /* + * Mark this request as started so we don't trigger + * any debug logic in the end I/O path. + */ + blk_start_request(rq); + __blk_end_request_all(rq, -EIO); } } EXPORT_SYMBOL(elv_abort_queue); @@ -952,7 +827,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq) * request is released from the driver, io must be done */ if (blk_account_rq(rq)) { - q->in_flight--; + q->in_flight[rq_is_sync(rq)]--; if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) e->ops->elevator_completed_req_fn(q, rq); } @@ -967,11 +842,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq) if (!list_empty(&q->queue_head)) next = list_entry_rq(q->queue_head.next); - if (!q->in_flight && + if (!queue_in_flight(q) && blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); - blk_start_queueing(q); + __blk_run_queue(q); } } } @@ -1179,6 +1054,9 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, char elevator_name[ELV_NAME_MAX]; struct elevator_type *e; + if (!q->elevator) + return count; + strlcpy(elevator_name, name, sizeof(elevator_name)); strstrip(elevator_name); @@ -1202,10 +1080,15 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, ssize_t elv_iosched_show(struct request_queue *q, char *name) { struct elevator_queue *e = q->elevator; - struct elevator_type *elv = e->elevator_type; + struct elevator_type *elv; struct elevator_type *__e; int len = 0; + if (!q->elevator) + return sprintf(name, "none\n"); + + elv = e->elevator_type; + spin_lock(&elv_list_lock); list_for_each_entry(__e, &elv_list, list) { if (!strcmp(elv->elevator_name, __e->elevator_name)) diff --git a/block/genhd.c b/block/genhd.c index 1a4916e01732..f4c64c2b303a 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -852,11 +852,21 @@ static ssize_t disk_capability_show(struct device *dev, return sprintf(buf, "%x\n", disk->flags); } +static ssize_t disk_alignment_offset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); +} + static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -875,6 +885,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_removable.attr, &dev_attr_ro.attr, &dev_attr_size.attr, + &dev_attr_alignment_offset.attr, &dev_attr_capability.attr, &dev_attr_stat.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -985,10 +996,20 @@ struct class block_class = { .name = "block", }; +static char *block_nodename(struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + + if (disk->nodename) + return disk->nodename(disk); + return NULL; +} + static struct device_type disk_type = { .name = "disk", .groups = disk_attr_groups, .release = disk_release, + .nodename = block_nodename, }; #ifdef CONFIG_PROC_FS diff --git a/block/ioctl.c b/block/ioctl.c index ad474d4bbcce..500e4c73cc52 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, bio->bi_private = &wait; bio->bi_sector = start; - if (len > q->max_hw_sectors) { - bio->bi_size = q->max_hw_sectors << 9; - len -= q->max_hw_sectors; - start += q->max_hw_sectors; + if (len > queue_max_hw_sectors(q)) { + bio->bi_size = queue_max_hw_sectors(q) << 9; + len -= queue_max_hw_sectors(q); + start += queue_max_hw_sectors(q); } else { bio->bi_size = len << 9; len = 0; @@ -311,9 +311,9 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ return put_int(arg, block_size(bdev)); case BLKSSZGET: /* get block device hardware sector size */ - return put_int(arg, bdev_hardsect_size(bdev)); + return put_int(arg, bdev_logical_block_size(bdev)); case BLKSECTGET: - return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); + return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); case BLKRASET: case BLKFRASET: if(!capable(CAP_SYS_ADMIN)) diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 82a0ca2f6729..5f8e798ede4e 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p) static int sg_get_reserved_size(struct request_queue *q, int __user *p) { - unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); + unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9); return put_user(val, p); } @@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p) if (size < 0) return -EINVAL; - if (size > (q->max_sectors << 9)) - size = q->max_sectors << 9; + if (size > (queue_max_sectors(q) << 9)) + size = queue_max_sectors(q) << 9; q->sg_reserved_size = size; return 0; @@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, hdr->info = 0; if (hdr->masked_status || hdr->host_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; - hdr->resid = rq->data_len; + hdr->resid = rq->resid_len; hdr->sb_len_wr = 0; if (rq->sense_len && hdr->sbp) { @@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; - if (hdr->dxfer_len > (q->max_hw_sectors << 9)) + if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) return -EIO; if (hdr->dxfer_len) @@ -500,9 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, rq = blk_get_request(q, WRITE, __GFP_WAIT); rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->data = NULL; - rq->data_len = 0; - rq->extra_len = 0; rq->timeout = BLK_DEFAULT_SG_TIMEOUT; rq->cmd[0] = cmd; rq->cmd[4] = data; |