diff options
Diffstat (limited to 'drivers/net/ethernet/ti/davinci_cpdma.c')
-rw-r--r-- | drivers/net/ethernet/ti/davinci_cpdma.c | 258 |
1 files changed, 65 insertions, 193 deletions
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 18bf3a8fdc50..73638f7a55d4 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -21,7 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/delay.h> - +#include <linux/genalloc.h> #include "davinci_cpdma.h" /* DMA Registers */ @@ -87,9 +87,8 @@ struct cpdma_desc_pool { void *cpumap; /* dma_alloc map */ int desc_size, mem_size; int num_desc, used_desc; - unsigned long *bitmap; struct device *dev; - spinlock_t lock; + struct gen_pool *gen_pool; }; enum cpdma_state { @@ -98,8 +97,6 @@ enum cpdma_state { CPDMA_STATE_TEARDOWN, }; -static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; - struct cpdma_ctlr { enum cpdma_state state; struct cpdma_params params; @@ -117,6 +114,7 @@ struct cpdma_chan { int chan_num; spinlock_t lock; int count; + u32 desc_num; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -145,6 +143,19 @@ struct cpdma_chan { (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) +static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) +{ + if (!pool) + return; + + WARN_ON(pool->used_desc); + if (pool->cpumap) + dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, + pool->phys); + else + iounmap(pool->iomap); +} + /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other @@ -155,24 +166,25 @@ static struct cpdma_desc_pool * cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, int size, int align) { - int bitmap_size; struct cpdma_desc_pool *pool; + int ret; pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); if (!pool) - goto fail; - - spin_lock_init(&pool->lock); + goto gen_pool_create_fail; pool->dev = dev; pool->mem_size = size; pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); pool->num_desc = size / pool->desc_size; - bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); - pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); - if (!pool->bitmap) - goto fail; + pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, + "cpdma"); + if (IS_ERR(pool->gen_pool)) { + dev_err(dev, "pool create failed %ld\n", + PTR_ERR(pool->gen_pool)); + goto gen_pool_create_fail; + } if (phys) { pool->phys = phys; @@ -185,24 +197,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } - if (pool->iomap) - return pool; -fail: - return NULL; -} - -static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) -{ - if (!pool) - return; + if (!pool->iomap) + goto gen_pool_create_fail; - WARN_ON(pool->used_desc); - if (pool->cpumap) { - dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, - pool->phys); - } else { - iounmap(pool->iomap); + ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, + pool->phys, pool->mem_size, -1); + if (ret < 0) { + dev_err(dev, "pool add failed %d\n", ret); + goto gen_pool_add_virt_fail; } + + return pool; + +gen_pool_add_virt_fail: + cpdma_desc_pool_destroy(pool); +gen_pool_create_fail: + return NULL; } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -220,47 +230,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) +cpdma_desc_alloc(struct cpdma_desc_pool *pool) { - unsigned long flags; - int index; - int desc_start; - int desc_end; struct cpdma_desc __iomem *desc = NULL; - spin_lock_irqsave(&pool->lock, flags); - - if (is_rx) { - desc_start = 0; - desc_end = pool->num_desc/2; - } else { - desc_start = pool->num_desc/2; - desc_end = pool->num_desc; - } - - index = bitmap_find_next_zero_area(pool->bitmap, - desc_end, desc_start, num_desc, 0); - if (index < desc_end) { - bitmap_set(pool->bitmap, index, num_desc); - desc = pool->iomap + pool->desc_size * index; + desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool, + pool->desc_size); + if (desc) pool->used_desc++; - } - spin_unlock_irqrestore(&pool->lock, flags); return desc; } static void cpdma_desc_free(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc, int num_desc) { - unsigned long flags, index; - - index = ((unsigned long)desc - (unsigned long)pool->iomap) / - pool->desc_size; - spin_lock_irqsave(&pool->lock, flags); - bitmap_clear(pool->bitmap, index, num_desc); + gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); pool->used_desc--; - spin_unlock_irqrestore(&pool->lock, flags); } struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) @@ -369,77 +355,6 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) } EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); -int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) -{ - struct device *dev = ctlr->dev; - unsigned long flags; - int i; - - spin_lock_irqsave(&ctlr->lock, flags); - - dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); - - dev_info(dev, "CPDMA: txidver: %x", - dma_reg_read(ctlr, CPDMA_TXIDVER)); - dev_info(dev, "CPDMA: txcontrol: %x", - dma_reg_read(ctlr, CPDMA_TXCONTROL)); - dev_info(dev, "CPDMA: txteardown: %x", - dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); - dev_info(dev, "CPDMA: rxidver: %x", - dma_reg_read(ctlr, CPDMA_RXIDVER)); - dev_info(dev, "CPDMA: rxcontrol: %x", - dma_reg_read(ctlr, CPDMA_RXCONTROL)); - dev_info(dev, "CPDMA: softreset: %x", - dma_reg_read(ctlr, CPDMA_SOFTRESET)); - dev_info(dev, "CPDMA: rxteardown: %x", - dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); - dev_info(dev, "CPDMA: txintstatraw: %x", - dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); - dev_info(dev, "CPDMA: txintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); - dev_info(dev, "CPDMA: txintmaskset: %x", - dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); - dev_info(dev, "CPDMA: txintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); - dev_info(dev, "CPDMA: macinvector: %x", - dma_reg_read(ctlr, CPDMA_MACINVECTOR)); - dev_info(dev, "CPDMA: maceoivector: %x", - dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); - dev_info(dev, "CPDMA: rxintstatraw: %x", - dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); - dev_info(dev, "CPDMA: rxintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); - dev_info(dev, "CPDMA: rxintmaskset: %x", - dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); - dev_info(dev, "CPDMA: rxintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); - dev_info(dev, "CPDMA: dmaintstatraw: %x", - dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); - dev_info(dev, "CPDMA: dmaintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); - dev_info(dev, "CPDMA: dmaintmaskset: %x", - dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); - dev_info(dev, "CPDMA: dmaintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); - - if (!ctlr->params.has_ext_regs) { - dev_info(dev, "CPDMA: dmacontrol: %x", - dma_reg_read(ctlr, CPDMA_DMACONTROL)); - dev_info(dev, "CPDMA: dmastatus: %x", - dma_reg_read(ctlr, CPDMA_DMASTATUS)); - dev_info(dev, "CPDMA: rxbuffofs: %x", - dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); - } - - for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) - if (ctlr->channels[i]) - cpdma_chan_dump(ctlr->channels[i]); - - spin_unlock_irqrestore(&ctlr->lock, flags); - return 0; -} -EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); - int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { unsigned long flags; @@ -516,6 +431,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan->state = CPDMA_STATE_IDLE; chan->chan_num = chan_num; chan->handler = handler; + chan->desc_num = ctlr->pool->num_desc / 2; if (is_rx_chan(chan)) { chan->hdp = ctlr->params.rxhdp + offset; @@ -543,6 +459,12 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, } EXPORT_SYMBOL_GPL(cpdma_chan_create); +int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr) +{ + return ctlr->pool->num_desc / 2; +} +EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); + int cpdma_chan_destroy(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr; @@ -574,54 +496,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan, } EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); -int cpdma_chan_dump(struct cpdma_chan *chan) -{ - unsigned long flags; - struct device *dev = chan->ctlr->dev; - - spin_lock_irqsave(&chan->lock, flags); - - dev_info(dev, "channel %d (%s %d) state %s", - chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", - chan_linear(chan), cpdma_state_str[chan->state]); - dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); - dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); - if (chan->rxfree) { - dev_info(dev, "\trxfree: %x\n", - chan_read(chan, rxfree)); - } - - dev_info(dev, "\tstats head_enqueue: %d\n", - chan->stats.head_enqueue); - dev_info(dev, "\tstats tail_enqueue: %d\n", - chan->stats.tail_enqueue); - dev_info(dev, "\tstats pad_enqueue: %d\n", - chan->stats.pad_enqueue); - dev_info(dev, "\tstats misqueued: %d\n", - chan->stats.misqueued); - dev_info(dev, "\tstats desc_alloc_fail: %d\n", - chan->stats.desc_alloc_fail); - dev_info(dev, "\tstats pad_alloc_fail: %d\n", - chan->stats.pad_alloc_fail); - dev_info(dev, "\tstats runt_receive_buff: %d\n", - chan->stats.runt_receive_buff); - dev_info(dev, "\tstats runt_transmit_buff: %d\n", - chan->stats.runt_transmit_buff); - dev_info(dev, "\tstats empty_dequeue: %d\n", - chan->stats.empty_dequeue); - dev_info(dev, "\tstats busy_dequeue: %d\n", - chan->stats.busy_dequeue); - dev_info(dev, "\tstats good_dequeue: %d\n", - chan->stats.good_dequeue); - dev_info(dev, "\tstats requeue: %d\n", - chan->stats.requeue); - dev_info(dev, "\tstats teardown_dequeue: %d\n", - chan->stats.teardown_dequeue); - - spin_unlock_irqrestore(&chan->lock, flags); - return 0; -} - static void __cpdma_chan_submit(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc) { @@ -675,7 +549,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); + if (chan->count >= chan->desc_num) { + chan->stats.desc_alloc_fail++; + ret = -ENOMEM; + goto unlock_ret; + } + + desc = cpdma_desc_alloc(ctlr->pool); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -721,24 +601,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) { - unsigned long flags; - int index; - bool ret; struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; + bool free_tx_desc; + unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - - index = bitmap_find_next_zero_area(pool->bitmap, - pool->num_desc, pool->num_desc/2, 1, 0); - - if (index < pool->num_desc) - ret = true; - else - ret = false; - - spin_unlock_irqrestore(&pool->lock, flags); - return ret; + spin_lock_irqsave(&chan->lock, flags); + free_tx_desc = (chan->count < chan->desc_num) && + gen_pool_avail(pool->gen_pool); + spin_unlock_irqrestore(&chan->lock, flags); + return free_tx_desc; } EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); |