diff options
author | Christoph Lameter <christoph@lameter.com> | 2005-06-23 00:08:19 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-23 09:45:09 -0700 |
commit | 1946089a109251655c5438d92c539bd2930e71ea (patch) | |
tree | 819a492d5a7c4e6e695b150a86abeb99d5ac46eb /drivers/block | |
parent | 8c5a09082f4e61a176382e96a831a0636b918602 (diff) | |
download | linux-stable-1946089a109251655c5438d92c539bd2930e71ea.tar.gz linux-stable-1946089a109251655c5438d92c539bd2930e71ea.tar.bz2 linux-stable-1946089a109251655c5438d92c539bd2930e71ea.zip |
[PATCH] NUMA aware block device control structure allocation
Patch to allocate the control structures for for ide devices on the node of
the device itself (for NUMA systems). The patch depends on the Slab API
change patch by Manfred and me (in mm) and the pcidev_to_node patch that I
posted today.
Does some realignment too.
Signed-off-by: Justin M. Forbes <jmforbes@linuxtx.org>
Signed-off-by: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Pravin Shelar <pravin@calsoftinc.com>
Signed-off-by: Shobhit Dayal <shobhit@calsoftinc.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/as-iosched.c | 8 | ||||
-rw-r--r-- | drivers/block/deadline-iosched.c | 8 | ||||
-rw-r--r-- | drivers/block/genhd.c | 13 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 30 |
4 files changed, 43 insertions, 16 deletions
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c index 638db06de2be..3410b4d294b9 100644 --- a/drivers/block/as-iosched.c +++ b/drivers/block/as-iosched.c @@ -1871,20 +1871,22 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) if (!arq_pool) return -ENOMEM; - ad = kmalloc(sizeof(*ad), GFP_KERNEL); + ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); if (!ad) return -ENOMEM; memset(ad, 0, sizeof(*ad)); ad->q = q; /* Identify what queue the data belongs to */ - ad->hash = kmalloc(sizeof(struct list_head)*AS_HASH_ENTRIES,GFP_KERNEL); + ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, + GFP_KERNEL, q->node); if (!ad->hash) { kfree(ad); return -ENOMEM; } - ad->arq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, arq_pool); + ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, + mempool_free_slab, arq_pool, q->node); if (!ad->arq_pool) { kfree(ad->hash); kfree(ad); diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c index 7f79f3dd0165..4bc2fea73273 100644 --- a/drivers/block/deadline-iosched.c +++ b/drivers/block/deadline-iosched.c @@ -711,18 +711,20 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e) if (!drq_pool) return -ENOMEM; - dd = kmalloc(sizeof(*dd), GFP_KERNEL); + dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); if (!dd) return -ENOMEM; memset(dd, 0, sizeof(*dd)); - dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL); + dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES, + GFP_KERNEL, q->node); if (!dd->hash) { kfree(dd); return -ENOMEM; } - dd->drq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool); + dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, + mempool_free_slab, drq_pool, q->node); if (!dd->drq_pool) { kfree(dd->hash); kfree(dd); diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c index 53f7d846b747..43805e4d31e9 100644 --- a/drivers/block/genhd.c +++ b/drivers/block/genhd.c @@ -582,10 +582,16 @@ struct seq_operations diskstats_op = { .show = diskstats_show }; - struct gendisk *alloc_disk(int minors) { - struct gendisk *disk = kmalloc(sizeof(struct gendisk), GFP_KERNEL); + return alloc_disk_node(minors, -1); +} + +struct gendisk *alloc_disk_node(int minors, int node_id) +{ + struct gendisk *disk; + + disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); if (disk) { memset(disk, 0, sizeof(struct gendisk)); if (!init_disk_stats(disk)) { @@ -594,7 +600,7 @@ struct gendisk *alloc_disk(int minors) } if (minors > 1) { int size = (minors - 1) * sizeof(struct hd_struct *); - disk->part = kmalloc(size, GFP_KERNEL); + disk->part = kmalloc_node(size, GFP_KERNEL, node_id); if (!disk->part) { kfree(disk); return NULL; @@ -610,6 +616,7 @@ struct gendisk *alloc_disk(int minors) } EXPORT_SYMBOL(alloc_disk); +EXPORT_SYMBOL(alloc_disk_node); struct kobject *get_disk(struct gendisk *disk) { diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 81fe3a0c1fe7..cd8cf302068c 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/writeback.h> +#include <linux/blkdev.h> /* * for max sense size @@ -1645,7 +1646,8 @@ static int blk_init_free_list(request_queue_t *q) init_waitqueue_head(&rl->wait[WRITE]); init_waitqueue_head(&rl->drain); - rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep); + rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, + mempool_free_slab, request_cachep, q->node); if (!rl->rq_pool) return -ENOMEM; @@ -1657,8 +1659,15 @@ static int __make_request(request_queue_t *, struct bio *); request_queue_t *blk_alloc_queue(int gfp_mask) { - request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask); + return blk_alloc_queue_node(gfp_mask, -1); +} +EXPORT_SYMBOL(blk_alloc_queue); + +request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id) +{ + request_queue_t *q; + q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); if (!q) return NULL; @@ -1671,8 +1680,7 @@ request_queue_t *blk_alloc_queue(int gfp_mask) return q; } - -EXPORT_SYMBOL(blk_alloc_queue); +EXPORT_SYMBOL(blk_alloc_queue_node); /** * blk_init_queue - prepare a request queue for use with a block device @@ -1705,13 +1713,22 @@ EXPORT_SYMBOL(blk_alloc_queue); * blk_init_queue() must be paired with a blk_cleanup_queue() call * when the block device is deactivated (such as at module unload). **/ + request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) { - request_queue_t *q = blk_alloc_queue(GFP_KERNEL); + return blk_init_queue_node(rfn, lock, -1); +} +EXPORT_SYMBOL(blk_init_queue); + +request_queue_t * +blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) +{ + request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); if (!q) return NULL; + q->node = node_id; if (blk_init_free_list(q)) goto out_init; @@ -1754,8 +1771,7 @@ out_init: kmem_cache_free(requestq_cachep, q); return NULL; } - -EXPORT_SYMBOL(blk_init_queue); +EXPORT_SYMBOL(blk_init_queue_node); int blk_get_queue(request_queue_t *q) { |