summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2006-06-23 13:46:23 -0700
committerTony Luck <tony.luck@intel.com>2006-06-23 13:46:23 -0700
commit8cf60e04a131310199d5776e2f9e915f0c468899 (patch)
tree373a68e88e6737713a0a5723d552cdeefffff929 /block
parent1323523f505606cfd24af6122369afddefc3b09d (diff)
parent95eaa5fa8eb2c345244acd5f65b200b115ae8c65 (diff)
downloadlinux-8cf60e04a131310199d5776e2f9e915f0c468899.tar.gz
linux-8cf60e04a131310199d5776e2f9e915f0c468899.tar.bz2
linux-8cf60e04a131310199d5776e2f9e915f0c468899.zip
Auto-update from upstream
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig.iosched2
-rw-r--r--block/as-iosched.c64
-rw-r--r--block/cfq-iosched.c199
-rw-r--r--block/deadline-iosched.c52
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c7
-rw-r--r--block/ll_rw_blk.c15
7 files changed, 153 insertions, 189 deletions
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index f3b7753aac99..48d090e266fc 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -40,7 +40,7 @@ config IOSCHED_CFQ
choice
prompt "Default I/O scheduler"
- default DEFAULT_AS
+ default DEFAULT_CFQ
help
Select the I/O scheduler which will be used by default for all
block devices.
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 0c750393be4a..1ec5df466708 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -96,7 +96,7 @@ struct as_data {
struct as_rq *next_arq[2]; /* next in sort order */
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
- struct list_head *hash; /* request hash */
+ struct hlist_head *hash; /* request hash */
unsigned long exit_prob; /* probability a task will exit while
being waited on */
@@ -165,8 +165,7 @@ struct as_rq {
/*
* request hash, key is the ending offset (for back merge lookup)
*/
- struct list_head hash;
- unsigned int on_hash;
+ struct hlist_node hash;
/*
* expire fifo
@@ -282,17 +281,15 @@ static const int as_hash_shift = 6;
#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
#define AS_HASH_ENTRIES (1 << as_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
static inline void __as_del_arq_hash(struct as_rq *arq)
{
- arq->on_hash = 0;
- list_del_init(&arq->hash);
+ hlist_del_init(&arq->hash);
}
static inline void as_del_arq_hash(struct as_rq *arq)
{
- if (arq->on_hash)
+ if (!hlist_unhashed(&arq->hash))
__as_del_arq_hash(arq);
}
@@ -300,10 +297,9 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
{
struct request *rq = arq->request;
- BUG_ON(arq->on_hash);
+ BUG_ON(!hlist_unhashed(&arq->hash));
- arq->on_hash = 1;
- list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
+ hlist_add_head(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
}
/*
@@ -312,31 +308,29 @@ static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
{
struct request *rq = arq->request;
- struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
+ struct hlist_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
- if (!arq->on_hash) {
+ if (hlist_unhashed(&arq->hash)) {
WARN_ON(1);
return;
}
- if (arq->hash.prev != head) {
- list_del(&arq->hash);
- list_add(&arq->hash, head);
+ if (&arq->hash != head->first) {
+ hlist_del(&arq->hash);
+ hlist_add_head(&arq->hash, head);
}
}
static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
{
- struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
- struct list_head *entry, *next = hash_list->next;
+ struct hlist_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
+ struct hlist_node *entry, *next;
+ struct as_rq *arq;
- while ((entry = next) != hash_list) {
- struct as_rq *arq = list_entry_hash(entry);
+ hlist_for_each_entry_safe(arq, entry, next, hash_list, hash) {
struct request *__rq = arq->request;
- next = entry->next;
-
- BUG_ON(!arq->on_hash);
+ BUG_ON(hlist_unhashed(&arq->hash));
if (!rq_mergeable(__rq)) {
as_del_arq_hash(arq);
@@ -353,9 +347,6 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
/*
* rb tree support functions
*/
-#define RB_EMPTY(root) ((root)->rb_node == NULL)
-#define ON_RB(node) (rb_parent(node) != node)
-#define RB_CLEAR(node) (rb_set_parent(node, node))
#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
#define rq_rb_key(rq) (rq)->sector
@@ -424,13 +415,13 @@ static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
- if (!ON_RB(&arq->rb_node)) {
+ if (!RB_EMPTY_NODE(&arq->rb_node)) {
WARN_ON(1);
return;
}
rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
- RB_CLEAR(&arq->rb_node);
+ RB_CLEAR_NODE(&arq->rb_node);
}
static struct request *
@@ -551,7 +542,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct as_rq *arq_next, *arq_prev;
- BUG_ON(!ON_RB(&last->rb_node));
+ BUG_ON(!RB_EMPTY_NODE(&last->rb_node));
if (rbprev)
arq_prev = rb_entry_arq(rbprev);
@@ -1128,7 +1119,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
struct request *rq = arq->request;
const int data_dir = arq->is_sync;
- BUG_ON(!ON_RB(&arq->rb_node));
+ BUG_ON(!RB_EMPTY_NODE(&arq->rb_node));
as_antic_stop(ad);
ad->antic_status = ANTIC_OFF;
@@ -1253,7 +1244,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
*/
if (reads) {
- BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));
+ BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
if (writes && ad->batch_data_dir == REQ_SYNC)
/*
@@ -1277,7 +1268,7 @@ static int as_dispatch_request(request_queue_t *q, int force)
if (writes) {
dispatch_writes:
- BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));
+ BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
if (ad->batch_data_dir == REQ_SYNC) {
ad->changed_batch = 1;
@@ -1345,7 +1336,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
arq->state = AS_RQ_NEW;
if (rq_data_dir(arq->request) == READ
- || current->flags&PF_SYNCWRITE)
+ || (arq->request->flags & REQ_RW_SYNC))
arq->is_sync = 1;
else
arq->is_sync = 0;
@@ -1597,12 +1588,11 @@ static int as_set_request(request_queue_t *q, struct request *rq,
if (arq) {
memset(arq, 0, sizeof(*arq));
- RB_CLEAR(&arq->rb_node);
+ RB_CLEAR_NODE(&arq->rb_node);
arq->request = rq;
arq->state = AS_RQ_PRESCHED;
arq->io_context = NULL;
- INIT_LIST_HEAD(&arq->hash);
- arq->on_hash = 0;
+ INIT_HLIST_NODE(&arq->hash);
INIT_LIST_HEAD(&arq->fifo);
rq->elevator_private = arq;
return 0;
@@ -1662,7 +1652,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
ad->q = q; /* Identify what queue the data belongs to */
- ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES,
+ ad->hash = kmalloc_node(sizeof(struct hlist_head)*AS_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!ad->hash) {
kfree(ad);
@@ -1684,7 +1674,7 @@ static void *as_init_queue(request_queue_t *q, elevator_t *e)
INIT_WORK(&ad->antic_work, as_work_handler, q);
for (i = 0; i < AS_HASH_ENTRIES; i++)
- INIT_LIST_HEAD(&ad->hash[i]);
+ INIT_HLIST_HEAD(&ad->hash[i]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e2e6ad0a158e..e25223e147a2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -26,7 +26,7 @@ static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
-static int cfq_slice_idle = HZ / 70;
+static int cfq_slice_idle = HZ / 125;
#define CFQ_IDLE_GRACE (HZ / 10)
#define CFQ_SLICE_SCALE (5)
@@ -60,11 +60,6 @@ static DEFINE_SPINLOCK(cfq_exit_lock);
/*
* rb-tree defines
*/
-#define RB_EMPTY(node) ((node)->rb_node == NULL)
-#define RB_CLEAR(node) do { \
- memset(node, 0, sizeof(*node)); \
-} while (0)
-#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
@@ -123,8 +118,6 @@ struct cfq_data {
*/
struct hlist_head *crq_hash;
- unsigned int max_queued;
-
mempool_t *crq_pool;
int rq_in_driver;
@@ -279,8 +272,6 @@ static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsi
static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
-#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
-
/*
* lots of deadline iosched dupes, can be abstracted later...
*/
@@ -336,7 +327,7 @@ static int cfq_queue_empty(request_queue_t *q)
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
{
- if (rw == READ || process_sync(task))
+ if (rw == READ || rw == WRITE_SYNC)
return task->pid;
return CFQ_KEY_ASYNC;
@@ -563,7 +554,7 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
rb_erase(&crq->rb_node, &cfqq->sort_list);
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
cfq_del_cfqq_rr(cfqd, cfqq);
}
@@ -910,13 +901,15 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
return cfqq;
}
+#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
+
static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct cfq_io_context *cic;
unsigned long sl;
- WARN_ON(!RB_EMPTY(&cfqq->sort_list));
+ WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfqq != cfqd->active_queue);
/*
@@ -943,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* fair distribution of slice time for a process doing back-to-back
* seeks. so allow a little bit of time for him to submit a new rq
*/
- if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
+ if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
sl = 2;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
@@ -954,11 +947,15 @@ static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = crq->cfq_queue;
+ struct request *rq;
cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
cfq_remove_request(crq->request);
cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
elv_dispatch_sort(q, crq->request);
+
+ rq = list_entry(q->queue_head.prev, struct request, queuelist);
+ cfqd->last_sector = rq->sector + rq->nr_sectors;
}
/*
@@ -1040,10 +1037,12 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
* if queue has requests, dispatch one. if not, check if
* enough slice is left to wait for one
*/
- if (!RB_EMPTY(&cfqq->sort_list))
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
goto keep_queue;
- else if (cfq_cfqq_class_sync(cfqq) &&
- time_before(now, cfqq->slice_end)) {
+ else if (cfq_cfqq_dispatched(cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ } else if (cfq_cfqq_class_sync(cfqq)) {
if (cfq_arm_slice_timer(cfqd, cfqq))
return NULL;
}
@@ -1062,7 +1061,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
int dispatched = 0;
- BUG_ON(RB_EMPTY(&cfqq->sort_list));
+ BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
do {
struct cfq_rq *crq;
@@ -1086,14 +1085,13 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->active_cic = crq->io_context;
}
- if (RB_EMPTY(&cfqq->sort_list))
+ if (RB_EMPTY_ROOT(&cfqq->sort_list))
break;
} while (dispatched < max_dispatch);
/*
- * if slice end isn't set yet, set it. if at least one request was
- * sync, use the sync time slice value
+ * if slice end isn't set yet, set it.
*/
if (!cfqq->slice_end)
cfq_set_prio_slice(cfqd, cfqq);
@@ -1104,7 +1102,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
*/
if ((!cfq_cfqq_sync(cfqq) &&
cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq))
+ cfq_class_idle(cfqq) ||
+ !cfq_cfqq_idle_window(cfqq))
cfq_slice_expired(cfqd, 0);
return dispatched;
@@ -1113,10 +1112,11 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
static int
cfq_forced_dispatch_cfqqs(struct list_head *list)
{
- int dispatched = 0;
struct cfq_queue *cfqq, *next;
struct cfq_rq *crq;
+ int dispatched;
+ dispatched = 0;
list_for_each_entry_safe(cfqq, next, list, cfq_list) {
while ((crq = cfqq->next_crq)) {
cfq_dispatch_insert(cfqq->cfqd->queue, crq);
@@ -1124,6 +1124,7 @@ cfq_forced_dispatch_cfqqs(struct list_head *list)
}
BUG_ON(!list_empty(&cfqq->fifo));
}
+
return dispatched;
}
@@ -1150,7 +1151,8 @@ static int
cfq_dispatch_requests(request_queue_t *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq;
+ struct cfq_queue *cfqq, *prev_cfqq;
+ int dispatched;
if (!cfqd->busy_queues)
return 0;
@@ -1158,10 +1160,17 @@ cfq_dispatch_requests(request_queue_t *q, int force)
if (unlikely(force))
return cfq_forced_dispatch(cfqd);
- cfqq = cfq_select_queue(cfqd);
- if (cfqq) {
+ dispatched = 0;
+ prev_cfqq = NULL;
+ while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
int max_dispatch;
+ /*
+ * Don't repeat dispatch from the previous queue.
+ */
+ if (prev_cfqq == cfqq)
+ break;
+
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
del_timer(&cfqd->idle_slice_timer);
@@ -1170,10 +1179,19 @@ cfq_dispatch_requests(request_queue_t *q, int force)
if (cfq_class_idle(cfqq))
max_dispatch = 1;
- return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+ dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+
+ /*
+ * If the dispatch cfqq has idling enabled and is still
+ * the active queue, break out.
+ */
+ if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
+ break;
+
+ prev_cfqq = cfqq;
}
- return 0;
+ return dispatched;
}
/*
@@ -1379,25 +1397,28 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
{
struct cfq_data *cfqd = cic->key;
struct cfq_queue *cfqq;
- if (cfqd) {
- spin_lock(cfqd->queue->queue_lock);
- cfqq = cic->cfqq[ASYNC];
- if (cfqq) {
- struct cfq_queue *new_cfqq;
- new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
- cic->ioc->task, GFP_ATOMIC);
- if (new_cfqq) {
- cic->cfqq[ASYNC] = new_cfqq;
- cfq_put_queue(cfqq);
- }
- }
- cfqq = cic->cfqq[SYNC];
- if (cfqq) {
- cfq_mark_cfqq_prio_changed(cfqq);
- cfq_init_prio_data(cfqq);
+
+ if (unlikely(!cfqd))
+ return;
+
+ spin_lock(cfqd->queue->queue_lock);
+
+ cfqq = cic->cfqq[ASYNC];
+ if (cfqq) {
+ struct cfq_queue *new_cfqq;
+ new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
+ GFP_ATOMIC);
+ if (new_cfqq) {
+ cic->cfqq[ASYNC] = new_cfqq;
+ cfq_put_queue(cfqq);
}
- spin_unlock(cfqd->queue->queue_lock);
}
+
+ cfqq = cic->cfqq[SYNC];
+ if (cfqq)
+ cfq_mark_cfqq_prio_changed(cfqq);
+
+ spin_unlock(cfqd->queue->queue_lock);
}
/*
@@ -1454,7 +1475,6 @@ retry:
INIT_HLIST_NODE(&cfqq->cfq_hash);
INIT_LIST_HEAD(&cfqq->cfq_list);
- RB_CLEAR_ROOT(&cfqq->sort_list);
INIT_LIST_HEAD(&cfqq->fifo);
cfqq->key = key;
@@ -1466,8 +1486,7 @@ retry:
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
- if (!cfqd->hw_tag)
- cfq_mark_cfqq_idle_window(cfqq);
+ cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
cfq_init_prio_data(cfqq);
}
@@ -1658,7 +1677,8 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
int enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
+ (cfqd->hw_tag && CIC_SEEKY(cic)))
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -1688,7 +1708,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
return 0;
if (!cfqq)
- return 1;
+ return 0;
if (cfq_class_idle(cfqq))
return 1;
@@ -1720,7 +1740,7 @@ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
cfqq->slice_end = cfqq->slice_left + jiffies;
- __cfq_slice_expired(cfqd, cfqq, 1);
+ cfq_slice_expired(cfqd, 1);
__cfq_set_active_queue(cfqd, cfqq);
}
@@ -1745,11 +1765,7 @@ static void
cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *crq)
{
- struct cfq_io_context *cic;
-
- cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
-
- cic = crq->io_context;
+ struct cfq_io_context *cic = crq->io_context;
/*
* we never wait for an async request and we don't allow preemption
@@ -1839,11 +1855,23 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
cfqq->service_last = now;
cfq_resort_rr_list(cfqq, 0);
}
- cfq_schedule_dispatch(cfqd);
}
- if (cfq_crq_is_sync(crq))
+ if (sync)
crq->io_context->last_end_request = now;
+
+ /*
+ * If this is the active queue, check if it needs to be expired,
+ * or if we want to idle in case it has no pending requests.
+ */
+ if (cfqd->active_queue == cfqq) {
+ if (time_after(now, cfqq->slice_end))
+ cfq_slice_expired(cfqd, 0);
+ else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
+ if (!cfq_arm_slice_timer(cfqd, cfqq))
+ cfq_schedule_dispatch(cfqd);
+ }
+ }
}
static struct request *
@@ -1910,7 +1938,6 @@ static inline int
__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct task_struct *task, int rw)
{
-#if 1
if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
!cfq_cfqq_must_alloc_slice(cfqq)) {
cfq_mark_cfqq_must_alloc_slice(cfqq);
@@ -1918,39 +1945,6 @@ __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
return ELV_MQUEUE_MAY;
-#else
- if (!cfqq || task->flags & PF_MEMALLOC)
- return ELV_MQUEUE_MAY;
- if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
- if (cfq_cfqq_wait_request(cfqq))
- return ELV_MQUEUE_MUST;
-
- /*
- * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
- * can quickly flood the queue with writes from a single task
- */
- if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
- cfq_mark_cfqq_must_alloc_slice(cfqq);
- return ELV_MQUEUE_MUST;
- }
-
- return ELV_MQUEUE_MAY;
- }
- if (cfq_class_idle(cfqq))
- return ELV_MQUEUE_NO;
- if (cfqq->allocated[rw] >= cfqd->max_queued) {
- struct io_context *ioc = get_io_context(GFP_ATOMIC);
- int ret = ELV_MQUEUE_NO;
-
- if (ioc && ioc->nr_batch_requests)
- ret = ELV_MQUEUE_MAY;
-
- put_io_context(ioc);
- return ret;
- }
-
- return ELV_MQUEUE_MAY;
-#endif
}
static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
@@ -1979,16 +1973,13 @@ static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- struct request_list *rl = &q->rq;
- if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
+ if (unlikely(cfqd->rq_starved)) {
+ struct request_list *rl = &q->rq;
+
smp_mb();
if (waitqueue_active(&rl->wait[READ]))
wake_up(&rl->wait[READ]);
- }
-
- if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
- smp_mb();
if (waitqueue_active(&rl->wait[WRITE]))
wake_up(&rl->wait[WRITE]);
}
@@ -2062,7 +2053,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
if (crq) {
- RB_CLEAR(&crq->rb_node);
+ RB_CLEAR_NODE(&crq->rb_node);
crq->rb_key = 0;
crq->request = rq;
INIT_HLIST_NODE(&crq->hash);
@@ -2148,16 +2139,13 @@ static void cfq_idle_slice_timer(unsigned long data)
* only expire and reinvoke request handler, if there are
* other queues with pending requests
*/
- if (!cfqd->busy_queues) {
- cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
- add_timer(&cfqd->idle_slice_timer);
+ if (!cfqd->busy_queues)
goto out_cont;
- }
/*
* not expired and it has a request pending, let it dispatch
*/
- if (!RB_EMPTY(&cfqq->sort_list)) {
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
cfq_mark_cfqq_must_dispatch(cfqq);
goto out_kick;
}
@@ -2278,9 +2266,6 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->queue = q;
- cfqd->max_queued = q->nr_requests / 4;
- q->nr_batching = cfq_queued;
-
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c94de8e12fbf..4469dd84623c 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -30,8 +30,7 @@ static const int deadline_hash_shift = 5;
#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
-#define ON_HASH(drq) (drq)->on_hash
+#define ON_HASH(drq) (!hlist_unhashed(&(drq)->hash))
struct deadline_data {
/*
@@ -48,7 +47,7 @@ struct deadline_data {
* next in sort order. read, write or both are NULL
*/
struct deadline_rq *next_drq[2];
- struct list_head *hash; /* request hash */
+ struct hlist_head *hash; /* request hash */
unsigned int batching; /* number of sequential requests made */
sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */
@@ -79,8 +78,7 @@ struct deadline_rq {
/*
* request hash, key is the ending offset (for back merge lookup)
*/
- struct list_head hash;
- char on_hash;
+ struct hlist_node hash;
/*
* expire fifo
@@ -100,8 +98,7 @@ static kmem_cache_t *drq_pool;
*/
static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
{
- drq->on_hash = 0;
- list_del_init(&drq->hash);
+ hlist_del_init(&drq->hash);
}
static inline void deadline_del_drq_hash(struct deadline_rq *drq)
@@ -117,8 +114,7 @@ deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
BUG_ON(ON_HASH(drq));
- drq->on_hash = 1;
- list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
+ hlist_add_head(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
}
/*
@@ -128,26 +124,24 @@ static inline void
deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
{
struct request *rq = drq->request;
- struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
+ struct hlist_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
- if (ON_HASH(drq) && drq->hash.prev != head) {
- list_del(&drq->hash);
- list_add(&drq->hash, head);
+ if (ON_HASH(drq) && &drq->hash != head->first) {
+ hlist_del(&drq->hash);
+ hlist_add_head(&drq->hash, head);
}
}
static struct request *
deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
{
- struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
- struct list_head *entry, *next = hash_list->next;
+ struct hlist_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
+ struct hlist_node *entry, *next;
+ struct deadline_rq *drq;
- while ((entry = next) != hash_list) {
- struct deadline_rq *drq = list_entry_hash(entry);
+ hlist_for_each_entry_safe(drq, entry, next, hash_list, hash) {
struct request *__rq = drq->request;
- next = entry->next;
-
BUG_ON(!ON_HASH(drq));
if (!rq_mergeable(__rq)) {
@@ -165,9 +159,6 @@ deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
/*
* rb tree support functions
*/
-#define RB_EMPTY(root) ((root)->rb_node == NULL)
-#define ON_RB(node) (rb_parent(node) != node)
-#define RB_CLEAR(node) (rb_set_parent(node, node))
#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
#define rq_rb_key(rq) (rq)->sector
@@ -226,9 +217,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
dd->next_drq[data_dir] = rb_entry_drq(rbnext);
}
- BUG_ON(!ON_RB(&drq->rb_node));
+ BUG_ON(!RB_EMPTY_NODE(&drq->rb_node));
rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
- RB_CLEAR(&drq->rb_node);
+ RB_CLEAR_NODE(&drq->rb_node);
}
static struct request *
@@ -502,7 +493,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
*/
if (reads) {
- BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
if (writes && (dd->starved++ >= dd->writes_starved))
goto dispatch_writes;
@@ -518,7 +509,7 @@ static int deadline_dispatch_requests(request_queue_t *q, int force)
if (writes) {
dispatch_writes:
- BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
dd->starved = 0;
@@ -625,7 +616,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
return NULL;
memset(dd, 0, sizeof(*dd));
- dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
+ dd->hash = kmalloc_node(sizeof(struct hlist_head)*DL_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!dd->hash) {
kfree(dd);
@@ -641,7 +632,7 @@ static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
}
for (i = 0; i < DL_HASH_ENTRIES; i++)
- INIT_LIST_HEAD(&dd->hash[i]);
+ INIT_HLIST_HEAD(&dd->hash[i]);
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
@@ -674,11 +665,10 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
drq = mempool_alloc(dd->drq_pool, gfp_mask);
if (drq) {
memset(drq, 0, sizeof(*drq));
- RB_CLEAR(&drq->rb_node);
+ RB_CLEAR_NODE(&drq->rb_node);
drq->request = rq;
- INIT_LIST_HEAD(&drq->hash);
- drq->on_hash = 0;
+ INIT_HLIST_NODE(&drq->hash);
INIT_LIST_HEAD(&drq->fifo);
diff --git a/block/elevator.c b/block/elevator.c
index a0afdd317cef..d00b283f31d2 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -850,12 +850,9 @@ fail_register:
* one again (along with re-adding the sysfs dir)
*/
elevator_exit(e);
- e = NULL;
q->elevator = old_elevator;
elv_register_queue(q);
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
- if (e)
- kobject_put(&e->kobj);
return 0;
}
diff --git a/block/genhd.c b/block/genhd.c
index 5a8d3bf02f17..8d7339511e5e 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,8 +17,7 @@
#include <linux/buffer_head.h>
#include <linux/mutex.h>
-static struct subsystem block_subsys;
-
+struct subsystem block_subsys;
static DEFINE_MUTEX(block_subsys_lock);
/*
@@ -511,9 +510,7 @@ static struct kset_uevent_ops block_uevent_ops = {
.uevent = block_uevent,
};
-/* declare block_subsys. */
-static decl_subsys(block, &ktype_block, &block_uevent_ops);
-
+decl_subsys(block, &ktype_block, &block_uevent_ops);
/*
* aggregate disk stat collector. Uses the same stats that the sysfs
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 7eb36c53f4b7..0603ab2f3692 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -638,7 +638,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
- if (bounce_pfn < (0xffffffff>>PAGE_SHIFT))
+ if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
#else
@@ -1663,6 +1663,8 @@ static void blk_unplug_timeout(unsigned long data)
**/
void blk_start_queue(request_queue_t *q)
{
+ WARN_ON(!irqs_disabled());
+
clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
/*
@@ -1878,7 +1880,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
* get dealt with eventually.
*
* The queue spin lock must be held while manipulating the requests on the
- * request queue.
+ * request queue; this lock will be taken also from interrupt context, so irq
+ * disabling is needed for it.
*
* Function returns a pointer to the initialized request queue, or NULL if
* it didn't succeed.
@@ -2824,6 +2827,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
if (unlikely(bio_barrier(bio)))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+ if (bio_sync(bio))
+ req->flags |= REQ_RW_SYNC;
+
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
@@ -3359,12 +3365,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
*/
static void blk_done_softirq(struct softirq_action *h)
{
- struct list_head *cpu_list;
- LIST_HEAD(local_list);
+ struct list_head *cpu_list, local_list;
local_irq_disable();
cpu_list = &__get_cpu_var(blk_cpu_done);
- list_splice_init(cpu_list, &local_list);
+ list_replace_init(cpu_list, &local_list);
local_irq_enable();
while (!list_empty(&local_list)) {