summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mediatek/mt76/dma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
commit80f232121b69cc69a31ccb2b38c1665d770b0710 (patch)
tree106263eac4ff03b899df695e00dd11e593e74fe2 /drivers/net/wireless/mediatek/mt76/dma.c
parent82efe439599439a5e1e225ce5740e6cfb777a7dd (diff)
parenta9e41a529681b38087c91ebc0bb91e12f510ca2d (diff)
downloadlinux-stable-80f232121b69cc69a31ccb2b38c1665d770b0710.tar.gz
linux-stable-80f232121b69cc69a31ccb2b38c1665d770b0710.tar.bz2
linux-stable-80f232121b69cc69a31ccb2b38c1665d770b0710.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support AES128-CCM ciphers in kTLS, from Vakul Garg. 2) Add fib_sync_mem to control the amount of dirty memory we allow to queue up between synchronize RCU calls, from David Ahern. 3) Make flow classifier more lockless, from Vlad Buslov. 4) Add PHY downshift support to aquantia driver, from Heiner Kallweit. 5) Add SKB cache for TCP rx and tx, from Eric Dumazet. This reduces contention on SLAB spinlocks in heavy RPC workloads. 6) Partial GSO offload support in XFRM, from Boris Pismenny. 7) Add fast link down support to ethtool, from Heiner Kallweit. 8) Use siphash for IP ID generator, from Eric Dumazet. 9) Pull nexthops even further out from ipv4/ipv6 routes and FIB entries, from David Ahern. 10) Move skb->xmit_more into a per-cpu variable, from Florian Westphal. 11) Improve eBPF verifier speed and increase maximum program size, from Alexei Starovoitov. 12) Eliminate per-bucket spinlocks in rhashtable, and instead use bit spinlocks. From Neil Brown. 13) Allow tunneling with GUE encap in ipvs, from Jacky Hu. 14) Improve link partner cap detection in generic PHY code, from Heiner Kallweit. 15) Add layer 2 encap support to bpf_skb_adjust_room(), from Alan Maguire. 16) Remove SKB list implementation assumptions in SCTP, your's truly. 17) Various cleanups, optimizations, and simplifications in r8169 driver. From Heiner Kallweit. 18) Add memory accounting on TX and RX path of SCTP, from Xin Long. 19) Switch PHY drivers over to use dynamic featue detection, from Heiner Kallweit. 20) Support flow steering without masking in dpaa2-eth, from Ioana Ciocoi. 21) Implement ndo_get_devlink_port in netdevsim driver, from Jiri Pirko. 22) Increase the strict parsing of current and future netlink attributes, also export such policies to userspace. From Johannes Berg. 23) Allow DSA tag drivers to be modular, from Andrew Lunn. 24) Remove legacy DSA probing support, also from Andrew Lunn. 25) Allow ll_temac driver to be used on non-x86 platforms, from Esben Haabendal. 26) Add a generic tracepoint for TX queue timeouts to ease debugging, from Cong Wang. 27) More indirect call optimizations, from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1763 commits) cxgb4: Fix error path in cxgb4_init_module net: phy: improve pause mode reporting in phy_print_status dt-bindings: net: Fix a typo in the phy-mode list for ethernet bindings net: macb: Change interrupt and napi enable order in open net: ll_temac: Improve error message on error IRQ net/sched: remove block pointer from common offload structure net: ethernet: support of_get_mac_address new ERR_PTR error net: usb: smsc: fix warning reported by kbuild test robot staging: octeon-ethernet: Fix of_get_mac_address ERR_PTR check net: dsa: support of_get_mac_address new ERR_PTR error net: dsa: sja1105: Fix status initialization in sja1105_get_ethtool_stats vrf: sit mtu should not be updated when vrf netdev is the link net: dsa: Fix error cleanup path in dsa_init_module l2tp: Fix possible NULL pointer dereference taprio: add null check on sched_nest to avoid potential null pointer dereference net: mvpp2: cls: fix less than zero check on a u32 variable net_sched: sch_fq: handle non connected flows net_sched: sch_fq: do not assume EDT packets are ordered net: hns3: use devm_kcalloc when allocating desc_cb net: hns3: some cleanup for struct hns3_enet_ring ...
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/dma.c')
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c164
1 files changed, 92 insertions, 72 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 76629b98c78d..4381155375e1 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -18,16 +18,20 @@
#include "mt76.h"
#include "dma.h"
-#define DMA_DUMMY_TXWI ((void *) ~0)
-
static int
-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
{
int size;
int i;
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
+
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
@@ -43,10 +47,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(0, &q->regs->cpu_idx);
- iowrite32(0, &q->regs->dma_idx);
- iowrite32(q->ndesc, &q->regs->ring_size);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(0, &q->regs->cpu_idx);
+ writel(0, &q->regs->dma_idx);
+ writel(q->ndesc, &q->regs->ring_size);
return 0;
}
@@ -61,7 +65,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
int i, idx = -1;
if (txwi)
- q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
@@ -120,9 +124,12 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
DMA_TO_DEVICE);
}
- if (e->txwi == DMA_DUMMY_TXWI)
+ if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
+ if (e->skb == DMA_DUMMY_DATA)
+ e->skb = NULL;
+
*prev_e = *e;
memset(e, 0, sizeof(*e));
}
@@ -130,56 +137,64 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(q->ndesc, &q->regs->ring_size);
- q->head = ioread32(&q->regs->dma_idx);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
+ unsigned int n_swq_queued[4] = {};
+ unsigned int n_queued = 0;
bool wake = false;
- int last;
+ int i, last;
- if (!q->ndesc)
+ if (!q)
return;
- spin_lock_bh(&q->lock);
if (flush)
last = -1;
else
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
- while (q->queued && q->tail != last) {
+ while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
- q->swq_queued--;
+ n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
+ n_queued++;
- if (entry.skb) {
- spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, flush);
- spin_lock_bh(&q->lock);
- }
+ if (entry.skb)
+ dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
- mt76_put_txwi(dev, entry.txwi);
+ if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
+ mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
if (!flush && q->tail == last)
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
}
- if (!flush)
- mt76_txq_schedule(dev, q);
- else
+ spin_lock_bh(&q->lock);
+
+ q->queued -= n_queued;
+ for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+ if (!n_swq_queued[i])
+ continue;
+
+ dev->q_tx[i].swq_queued -= n_swq_queued[i];
+ }
+
+ if (flush)
mt76_dma_sync_idx(dev, q);
wake = wake && q->stopped &&
@@ -244,20 +259,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_buf buf;
dma_addr_t addr;
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
return -ENOMEM;
buf.addr = addr;
@@ -271,80 +286,85 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
return 0;
}
-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+static int
+mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
{
+ struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
struct sk_buff *iter;
dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
+ u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM;
}
+ txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
+ if (dev->drv->tx_aligned4_skbs)
+ mt76_insert_hdr_pad(skb);
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto free;
- }
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
+ tx_info.buf[n].addr = t->dma_addr;
+ tx_info.buf[n++].len = dev->drv->txwi_size;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
+ if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto unmap;
- buf[n].addr = addr;
- buf[n++].len = iter->len;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = iter->len;
}
+ tx_info.nbuf = n;
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto unmap;
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
+ ret = -ENOMEM;
goto unmap;
+ }
- return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+ tx_info.info, tx_info.skb, t);
unmap:
- ret = -ENOMEM;
for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ tx_info.buf[n].len, DMA_TO_DEVICE);
free:
- e.skb = skb;
+ e.skb = tx_info.skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
+ dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
-EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
@@ -366,7 +386,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
skb_free_frag(buf);
break;
}