diff options
Diffstat (limited to 'drivers/net/ethernet/xilinx')
-rw-r--r-- | drivers/net/ethernet/xilinx/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_axienet.h | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 315 |
3 files changed, 267 insertions, 78 deletions
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 35d96c633a33..7502214cc7d5 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -28,6 +28,7 @@ config XILINX_AXI_EMAC depends on HAS_IOMEM depends on XILINX_DMA select PHYLINK + select DIMLIB help This driver supports the 10/100/1000 Ethernet from Xilinx for the AXI bus interface used in Xilinx Virtex FPGAs and Soc's. diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index a3f4f3e42587..5ff742103beb 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -9,6 +9,7 @@ #ifndef XILINX_AXIENET_H #define XILINX_AXIENET_H +#include <linux/dim.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/interrupt.h> @@ -112,9 +113,6 @@ #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ -#define XAXIDMA_DELAY_SHIFT 24 -#define XAXIDMA_COALESCE_SHIFT 16 - #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ @@ -126,8 +124,7 @@ /* Default TX/RX Threshold and delay timer values for SGDMA mode */ #define XAXIDMA_DFT_TX_THRESHOLD 24 #define XAXIDMA_DFT_TX_USEC 50 -#define XAXIDMA_DFT_RX_THRESHOLD 1 -#define XAXIDMA_DFT_RX_USEC 50 +#define XAXIDMA_DFT_RX_USEC 16 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ @@ -487,7 +484,12 @@ struct skbuf_dma_descriptor { * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space * @napi_rx: NAPI RX control structure + * @rx_dim: DIM state for the receive queue + * @rx_dim_enabled: Whether DIM is enabled or not + * @rx_irqs: Number of interrupts + * @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started * @rx_dma_cr: Nominal content of RX DMA control register + * @rx_dma_started: Set when RX DMA is started * @rx_bd_v: Virtual address of the RX buffer descriptor ring * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring * @rx_bd_num: Size of RX buffer descriptor ring @@ -497,7 +499,9 @@ struct skbuf_dma_descriptor { * @rx_bytes: RX byte count for statistics * @rx_stat_sync: Synchronization object for RX stats * @napi_tx: NAPI TX control structure + * @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started * @tx_dma_cr: Nominal content of TX DMA control register + * @tx_dma_started: Set when TX DMA is started * @tx_bd_v: Virtual address of the TX buffer descriptor ring * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring * @tx_bd_num: Size of TX buffer descriptor ring @@ -532,10 +536,6 @@ struct skbuf_dma_descriptor { * supported, the maximum frame size would be 9k. Else it is * 1522 bytes (assuming support for basic VLAN) * @rxmem: Stores rx memory size for jumbo frame handling. - * @coalesce_count_rx: Store the irq coalesce on RX side. - * @coalesce_usec_rx: IRQ coalesce delay for RX - * @coalesce_count_tx: Store the irq coalesce on TX side. - * @coalesce_usec_tx: IRQ coalesce delay for TX * @use_dmaengine: flag to check dmaengine framework usage. * @tx_chan: TX DMA channel. * @rx_chan: RX DMA channel. @@ -569,7 +569,12 @@ struct axienet_local { void __iomem *dma_regs; struct napi_struct napi_rx; + struct dim rx_dim; + bool rx_dim_enabled; + u16 rx_irqs; + spinlock_t rx_cr_lock; u32 rx_dma_cr; + bool rx_dma_started; struct axidma_bd *rx_bd_v; dma_addr_t rx_bd_p; u32 rx_bd_num; @@ -579,7 +584,9 @@ struct axienet_local { struct u64_stats_sync rx_stat_sync; struct napi_struct napi_tx; + spinlock_t tx_cr_lock; u32 tx_dma_cr; + bool tx_dma_started; struct axidma_bd *tx_bd_v; dma_addr_t tx_bd_p; u32 tx_bd_num; @@ -610,10 +617,6 @@ struct axienet_local { u32 max_frm_size; u32 rxmem; - u32 coalesce_count_rx; - u32 coalesce_usec_rx; - u32 coalesce_count_tx; - u32 coalesce_usec_tx; u8 use_dmaengine; struct dma_chan *tx_chan; struct dma_chan *rx_chan; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index f33178f90c42..054abf283ab3 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -223,23 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev) lp->rx_bd_p); } +static u64 axienet_dma_rate(struct axienet_local *lp) +{ + if (lp->axi_clk) + return clk_get_rate(lp->axi_clk); + return 125000000; /* arbitrary guess if no clock rate set */ +} + /** - * axienet_usec_to_timer - Calculate IRQ delay timer value - * @lp: Pointer to the axienet_local structure - * @coalesce_usec: Microseconds to convert into timer value + * axienet_calc_cr() - Calculate control register value + * @lp: Device private data + * @count: Number of completions before an interrupt + * @usec: Microseconds after the last completion before an interrupt + * + * Calculate a control register value based on the coalescing settings. The + * run/stop bit is not set. */ -static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) +static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec) { - u32 result; - u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ + u32 cr; - if (lp->axi_clk) - clk_rate = clk_get_rate(lp->axi_clk); + cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK | + XAXIDMA_IRQ_ERROR_MASK; + /* Only set interrupt delay timer if not generating an interrupt on + * the first packet. Otherwise leave at 0 to disable delay interrupt. + */ + if (count > 1) { + u64 clk_rate = axienet_dma_rate(lp); + u32 timer; + + /* 1 Timeout Interval = 125 * (clock period of SG clock) */ + timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate, + XAXIDMA_DELAY_SCALE); - /* 1 Timeout Interval = 125 * (clock period of SG clock) */ - result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, - XAXIDMA_DELAY_SCALE); - return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK)); + timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK)); + cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) | + XAXIDMA_IRQ_DELAY_MASK; + } + + return cr; +} + +/** + * axienet_coalesce_params() - Extract coalesce parameters from the CR + * @lp: Device private data + * @cr: The control register to parse + * @count: Number of packets before an interrupt + * @usec: Idle time (in usec) before an interrupt + */ +static void axienet_coalesce_params(struct axienet_local *lp, u32 cr, + u32 *count, u32 *usec) +{ + u64 clk_rate = axienet_dma_rate(lp); + u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr); + + *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr); + *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate); } /** @@ -248,30 +287,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) */ static void axienet_dma_start(struct axienet_local *lp) { + spin_lock_irq(&lp->rx_cr_lock); + /* Start updating the Rx channel control register */ - lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | - XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; - /* Only set interrupt delay timer if not generating an interrupt on - * the first RX packet. Otherwise leave at 0 to disable delay interrupt. - */ - if (lp->coalesce_count_rx > 1) - lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) - << XAXIDMA_DELAY_SHIFT) | - XAXIDMA_IRQ_DELAY_MASK; + lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); - /* Start updating the Tx channel control register */ - lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | - XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; - /* Only set interrupt delay timer if not generating an interrupt on - * the first TX packet. Otherwise leave at 0 to disable delay interrupt. - */ - if (lp->coalesce_count_tx > 1) - lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) - << XAXIDMA_DELAY_SHIFT) | - XAXIDMA_IRQ_DELAY_MASK; - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); - /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ @@ -280,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp) axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); + lp->rx_dma_started = true; + + spin_unlock_irq(&lp->rx_cr_lock); + spin_lock_irq(&lp->tx_cr_lock); + + /* Start updating the Tx channel control register */ + lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the @@ -288,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp) axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); + lp->tx_dma_started = true; + + spin_unlock_irq(&lp->tx_cr_lock); } /** @@ -623,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp) int count; u32 cr, sr; - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); + spin_lock_irq(&lp->rx_cr_lock); + + cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + lp->rx_dma_started = false; + + spin_unlock_irq(&lp->rx_cr_lock); synchronize_irq(lp->rx_irq); - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); + spin_lock_irq(&lp->tx_cr_lock); + + cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + lp->tx_dma_started = false; + + spin_unlock_irq(&lp->tx_cr_lock); synchronize_irq(lp->tx_irq); /* Give DMAs a chance to halt gracefully */ @@ -962,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) &size, budget); if (packets) { + netdev_completed_queue(ndev, packets, size); u64_stats_update_begin(&lp->tx_stat_sync); u64_stats_add(&lp->tx_packets, packets); u64_stats_add(&lp->tx_bytes, size); @@ -979,7 +1020,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) * cause an immediate interrupt if any TX packets are * already pending. */ + spin_lock_irq(&lp->tx_cr_lock); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); + spin_unlock_irq(&lp->tx_cr_lock); } return packets; } @@ -1083,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (++new_tail_ptr >= lp->tx_bd_num) new_tail_ptr = 0; WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); + netdev_sent_queue(ndev, skb->len); /* Start the transfer */ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); @@ -1241,11 +1285,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget) axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); if (packets < budget && napi_complete_done(napi, packets)) { + if (READ_ONCE(lp->rx_dim_enabled)) { + struct dim_sample sample = { + .time = ktime_get(), + /* Safe because we are the only writer */ + .pkt_ctr = u64_stats_read(&lp->rx_packets), + .byte_ctr = u64_stats_read(&lp->rx_bytes), + .event_ctr = READ_ONCE(lp->rx_irqs), + }; + + net_dim(&lp->rx_dim, &sample); + } + /* Re-enable RX completion interrupts. This should * cause an immediate interrupt if any RX packets are * already pending. */ + spin_lock_irq(&lp->rx_cr_lock); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); + spin_unlock_irq(&lp->rx_cr_lock); } return packets; } @@ -1283,11 +1341,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) /* Disable further TX completion interrupts and schedule * NAPI to handle the completions. */ - u32 cr = lp->tx_dma_cr; - - cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); if (napi_schedule_prep(&lp->napi_tx)) { + u32 cr; + + spin_lock(&lp->tx_cr_lock); + cr = lp->tx_dma_cr; + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + spin_unlock(&lp->tx_cr_lock); __napi_schedule(&lp->napi_tx); } } @@ -1328,11 +1389,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) /* Disable further RX completion interrupts and schedule * NAPI receive. */ - u32 cr = lp->rx_dma_cr; - - cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); + WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1); if (napi_schedule_prep(&lp->napi_rx)) { + u32 cr; + + spin_lock(&lp->rx_cr_lock); + cr = lp->rx_dma_cr; + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + spin_unlock(&lp->rx_cr_lock); + __napi_schedule(&lp->napi_rx); } } @@ -1625,6 +1691,7 @@ err_free_eth_irq: if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); err_phy: + cancel_work_sync(&lp->rx_dim.work); cancel_delayed_work_sync(&lp->stats_work); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); @@ -1654,6 +1721,7 @@ static int axienet_stop(struct net_device *ndev) napi_disable(&lp->napi_rx); } + cancel_work_sync(&lp->rx_dim.work); cancel_delayed_work_sync(&lp->stats_work); phylink_stop(lp->phylink); @@ -1685,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev) dma_release_channel(lp->tx_chan); } + netdev_reset_queue(ndev); axienet_iow(lp, XAE_IE_OFFSET, 0); if (lp->eth_irq > 0) @@ -1999,6 +2068,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev, } /** + * axienet_update_coalesce_rx() - Set RX CR + * @lp: Device private data + * @cr: Value to write to the RX CR + * @mask: Bits to set from @cr + */ +static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr, + u32 mask) +{ + spin_lock_irq(&lp->rx_cr_lock); + lp->rx_dma_cr &= ~mask; + lp->rx_dma_cr |= cr; + /* If DMA isn't started, then the settings will be applied the next + * time dma_start() is called. + */ + if (lp->rx_dma_started) { + u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + + /* Don't enable IRQs if they are disabled by NAPI */ + if (reg & XAXIDMA_IRQ_ALL_MASK) + cr = lp->rx_dma_cr; + else + cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + } + spin_unlock_irq(&lp->rx_cr_lock); +} + +/** + * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM + * @lp: Device private data + */ +static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp) +{ + return min(1 << (lp->rx_dim.profile_ix << 1), 255); +} + +/** + * axienet_rx_dim_work() - Adjust RX DIM settings + * @work: The work struct + */ +static void axienet_rx_dim_work(struct work_struct *work) +{ + struct axienet_local *lp = + container_of(work, struct axienet_local, rx_dim.work); + u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0); + u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK | + XAXIDMA_IRQ_ERROR_MASK; + + axienet_update_coalesce_rx(lp, cr, mask); + lp->rx_dim.state = DIM_START_MEASURE; +} + +/** + * axienet_update_coalesce_tx() - Set TX CR + * @lp: Device private data + * @cr: Value to write to the TX CR + * @mask: Bits to set from @cr + */ +static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr, + u32 mask) +{ + spin_lock_irq(&lp->tx_cr_lock); + lp->tx_dma_cr &= ~mask; + lp->tx_dma_cr |= cr; + /* If DMA isn't started, then the settings will be applied the next + * time dma_start() is called. + */ + if (lp->tx_dma_started) { + u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + + /* Don't enable IRQs if they are disabled by NAPI */ + if (reg & XAXIDMA_IRQ_ALL_MASK) + cr = lp->tx_dma_cr; + else + cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + } + spin_unlock_irq(&lp->tx_cr_lock); +} + +/** * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure @@ -2018,11 +2168,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); - - ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; - ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; - ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; - ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; + u32 cr; + + ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled; + + spin_lock_irq(&lp->rx_cr_lock); + cr = lp->rx_dma_cr; + spin_unlock_irq(&lp->rx_cr_lock); + axienet_coalesce_params(lp, cr, + &ecoalesce->rx_max_coalesced_frames, + &ecoalesce->rx_coalesce_usecs); + + spin_lock_irq(&lp->tx_cr_lock); + cr = lp->tx_dma_cr; + spin_unlock_irq(&lp->tx_cr_lock); + axienet_coalesce_params(lp, cr, + &ecoalesce->tx_max_coalesced_frames, + &ecoalesce->tx_coalesce_usecs); return 0; } @@ -2046,12 +2208,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); - - if (netif_running(ndev)) { - NL_SET_ERR_MSG(extack, - "Please stop netif before applying configuration"); - return -EBUSY; - } + bool new_dim = ecoalesce->use_adaptive_rx_coalesce; + bool old_dim = lp->rx_dim_enabled; + u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK; if (ecoalesce->rx_max_coalesced_frames > 255 || ecoalesce->tx_max_coalesced_frames > 255) { @@ -2065,7 +2224,7 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, return -EINVAL; } - if ((ecoalesce->rx_max_coalesced_frames > 1 && + if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) && !ecoalesce->rx_coalesce_usecs) || (ecoalesce->tx_max_coalesced_frames > 1 && !ecoalesce->tx_coalesce_usecs)) { @@ -2074,11 +2233,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, return -EINVAL; } - lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; - lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; - lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; - lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; + if (new_dim && !old_dim) { + cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), + ecoalesce->rx_coalesce_usecs); + } else if (!new_dim) { + if (old_dim) { + WRITE_ONCE(lp->rx_dim_enabled, false); + napi_synchronize(&lp->napi_rx); + flush_work(&lp->rx_dim.work); + } + + cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames, + ecoalesce->rx_coalesce_usecs); + } else { + /* Dummy value for count just to calculate timer */ + cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs); + mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK; + } + + axienet_update_coalesce_rx(lp, cr, mask); + if (new_dim && !old_dim) + WRITE_ONCE(lp->rx_dim_enabled, true); + cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames, + ecoalesce->tx_coalesce_usecs); + axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK); return 0; } @@ -2316,7 +2495,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev, static const struct ethtool_ops axienet_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_USECS, + ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_drvinfo = axienet_ethtools_get_drvinfo, .get_regs_len = axienet_ethtools_get_regs_len, .get_regs = axienet_ethtools_get_regs, @@ -2499,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work) ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_dma_stop(lp); + netdev_reset_queue(ndev); for (i = 0; i < lp->tx_bd_num; i++) { cur_p = &lp->tx_bd_v[i]; @@ -2858,10 +3039,15 @@ static int axienet_probe(struct platform_device *pdev) axienet_set_mac_address(ndev, NULL); } - lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; - lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; - lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; - lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; + spin_lock_init(&lp->rx_cr_lock); + spin_lock_init(&lp->tx_cr_lock); + INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work); + lp->rx_dim_enabled = true; + lp->rx_dim.profile_ix = 1; + lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), + XAXIDMA_DFT_RX_USEC); + lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD, + XAXIDMA_DFT_TX_USEC); ret = axienet_mdio_setup(lp); if (ret) @@ -2891,7 +3077,6 @@ static int axienet_probe(struct platform_device *pdev) } of_node_put(np); lp->pcs.ops = &axienet_pcs_ops; - lp->pcs.neg_mode = true; lp->pcs.poll = true; } |