summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2013-01-29 23:33:14 +0000
committerBen Hutchings <bhutchings@solarflare.com>2013-03-07 20:22:06 +0000
commit272baeeb6a98f5f746c2eeab4973c2df89e9d7ea (patch)
treec894c86c4331a376e728fdffb5cdbcd301158628 /drivers/net/ethernet/sfc/rx.c
parent80c2e716d555912168f93853f96a24d0de75897b (diff)
downloadlinux-272baeeb6a98f5f746c2eeab4973c2df89e9d7ea.tar.gz
linux-272baeeb6a98f5f746c2eeab4973c2df89e9d7ea.tar.bz2
linux-272baeeb6a98f5f746c2eeab4973c2df89e9d7ea.zip
sfc: Properly distinguish RX buffer and DMA lengths
Replace efx_nic::rx_buffer_len with efx_nic::rx_dma_len, the maximum RX DMA length. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index e7aa28eb9327..31361db28f91 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -27,8 +27,9 @@
/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH 8
-/* Maximum size of a buffer sharing a page */
-#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+/* Maximum length for an RX descriptor sharing a page */
+#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state) \
+ - EFX_PAGE_IP_ALIGN)
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
@@ -52,10 +53,6 @@ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
{
return buf->page_offset + efx->type->rx_buffer_hash_size;
}
-static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
-{
- return PAGE_SIZE << efx->rx_buffer_order;
-}
static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
{
@@ -105,7 +102,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
if (unlikely(page == NULL))
return -ENOMEM;
dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
- efx_rx_buf_size(efx),
+ PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
@@ -124,12 +121,12 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->page = page;
rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
- rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
++state->refcnt;
- if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
+ if ((~count & 1) && (efx->rx_dma_len <= EFX_RX_HALF_PAGE)) {
/* Use the second half of the page */
get_page(page);
dma_addr += (PAGE_SIZE >> 1);
@@ -153,7 +150,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
if (--state->refcnt == 0) {
dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr,
- efx_rx_buf_size(efx),
+ PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
} else if (used_len) {
dma_sync_single_for_cpu(&efx->pci_dev->dev,
@@ -221,7 +218,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
rx_buf->flags = 0;
- if (efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ if (efx->rx_dma_len <= EFX_RX_HALF_PAGE &&
page_count(rx_buf->page) == 1)
efx_resurrect_rx_buffer(rx_queue, rx_buf);