diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-09 18:07:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-09 18:07:15 -0700 |
commit | 26b0332e30c7f93e780aaa054bd84e3437f84354 (patch) | |
tree | e9cf240b67bf7eebae9fabbdba4e6a0fdfd359d7 /drivers/dma/dmaengine.c | |
parent | 640414171818c6293c23e74a28d1c69b2a1a7fe5 (diff) | |
parent | 4a43f394a08214eaf92cdd8ce3eae75e555323d8 (diff) | |
download | linux-26b0332e30c7f93e780aaa054bd84e3437f84354.tar.gz linux-26b0332e30c7f93e780aaa054bd84e3437f84354.tar.bz2 linux-26b0332e30c7f93e780aaa054bd84e3437f84354.zip |
Merge tag 'dmaengine-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine update from Dan Williams:
"Collection of random updates to the core and some end-driver fixups
for ioatdma and mv_xor:
- NUMA aware channel allocation
- Cleanup dmatest debugfs interface
- ioat: make raid-support Atom only
- mv_xor: big endian
Aside from the top three commits these have all had some soak time in
-next. The top commit fixes a recent build breakage.
It has been a long while since my last pull request, hopefully it does
not show. Thanks to Vinod for keeping an eye on drivers/dma/ this
past year"
* tag 'dmaengine-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
dmaengine: dma_sync_wait and dma_find_channel undefined
MAINTAINERS: update email for Dan Williams
dma: mv_xor: Fix incorrect error path
ioatdma: silence GCC warnings
dmaengine: make dma_channel_rebalance() NUMA aware
dmaengine: make dma_submit_error() return an error code
ioatdma: disable RAID on non-Atom platforms and reenable unaligned copies
mv_xor: support big endian systems using descriptor swap feature
mv_xor: use {readl, writel}_relaxed instead of __raw_{readl, writel}
dmatest: print message on debug level in case of no error
dmatest: remove IS_ERR_OR_NULL checks of debugfs calls
dmatest: make module parameters writable
Diffstat (limited to 'drivers/dma/dmaengine.c')
-rw-r--r-- | drivers/dma/dmaengine.c | 55 |
1 files changed, 27 insertions, 28 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 99af4db5948b..eee16b01fa89 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -382,20 +382,30 @@ void dma_issue_pending_all(void) EXPORT_SYMBOL(dma_issue_pending_all); /** - * nth_chan - returns the nth channel of the given capability + * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - returns the channel with min count and in the same numa-node as the cpu * @cap: capability to match - * @n: nth channel desired + * @cpu: cpu index which the channel should be close to * - * Defaults to returning the channel with the desired capability and the - * lowest reference count when 'n' cannot be satisfied. Must be called - * under dma_list_mutex. + * If some channels are close to the given cpu, the one with the lowest + * reference count is returned. Otherwise, cpu is ignored and only the + * reference count is taken into account. + * Must be called under dma_list_mutex. */ -static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) { struct dma_device *device; struct dma_chan *chan; - struct dma_chan *ret = NULL; struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; list_for_each_entry(device, &dma_device_list, global_node) { if (!dma_has_cap(cap, device->cap_mask) || @@ -404,27 +414,22 @@ static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) list_for_each_entry(chan, &device->channels, device_node) { if (!chan->client_count) continue; - if (!min) - min = chan; - else if (chan->table_count < min->table_count) + if (!min || chan->table_count < min->table_count) min = chan; - if (n-- == 0) { - ret = chan; - break; /* done */ - } + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; } - if (ret) - break; /* done */ } - if (!ret) - ret = min; + chan = localmin ? localmin : min; - if (ret) - ret->table_count++; + if (chan) + chan->table_count++; - return ret; + return chan; } /** @@ -441,7 +446,6 @@ static void dma_channel_rebalance(void) struct dma_device *device; int cpu; int cap; - int n; /* undo the last distribution */ for_each_dma_cap_mask(cap, dma_cap_mask_all) @@ -460,14 +464,9 @@ static void dma_channel_rebalance(void) return; /* redistribute available channels */ - n = 0; for_each_dma_cap_mask(cap, dma_cap_mask_all) for_each_online_cpu(cpu) { - if (num_possible_cpus() > 1) - chan = nth_chan(cap, n++); - else - chan = nth_chan(cap, -1); - + chan = min_chan(cap, cpu); per_cpu_ptr(channel_table[cap], cpu)->chan = chan; } } |