diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2008-11-23 23:24:32 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-23 23:24:32 -0800 |
commit | 1f87e235e6fb92c2968b52b9191de04f1aff8e77 (patch) | |
tree | ab774d239c61b6c206ef07398828533cdd01915e /include/linux/etherdevice.h | |
parent | 70eb1bfd52e97120eddf9b5aaabfe1ecdf4eb663 (diff) | |
download | linux-1f87e235e6fb92c2968b52b9191de04f1aff8e77.tar.gz linux-1f87e235e6fb92c2968b52b9191de04f1aff8e77.tar.bz2 linux-1f87e235e6fb92c2968b52b9191de04f1aff8e77.zip |
eth: Declare an optimized compare_ether_addr_64bits() function
Linus mentioned we could try to perform long word operations, even
on potentially unaligned addresses, on x86 at least. David mentioned
the HAVE_EFFICIENT_UNALIGNED_ACCESS test to handle this on all
arches that have efficient unailgned accesses.
I tried this idea and got nice assembly on 32 bits:
158: 33 82 38 01 00 00 xor 0x138(%edx),%eax
15e: 33 8a 34 01 00 00 xor 0x134(%edx),%ecx
164: c1 e0 10 shl $0x10,%eax
167: 09 c1 or %eax,%ecx
169: 74 0b je 176 <eth_type_trans+0x87>
And very nice assembly on 64 bits of course (one xor, one shl)
Nice oprofile improvement in eth_type_trans(), 0.17 % instead of 0.41 %,
expected since we remove 8 instructions on a fast path.
This patch implements a compare_ether_addr_64bits() function, that
uses the CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ifdef to efficiently
perform the 6 bytes comparison on all capable arches.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/etherdevice.h')
-rw-r--r-- | include/linux/etherdevice.h | 42 |
1 files changed, 42 insertions, 0 deletions
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 0e5e97060034..1cb0f0b90926 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -27,6 +27,7 @@ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/random.h> +#include <asm/unaligned.h> #ifdef __KERNEL__ extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); @@ -140,6 +141,47 @@ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) BUILD_BUG_ON(ETH_ALEN != 6); return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; } + +static inline unsigned long zap_last_2bytes(unsigned long value) +{ +#ifdef __BIG_ENDIAN + return value >> 16; +#else + return value << 16; +#endif +} + +/** + * compare_ether_addr_64bits - Compare two Ethernet addresses + * @addr1: Pointer to an array of 8 bytes + * @addr2: Pointer to an other array of 8 bytes + * + * Compare two ethernet addresses, returns 0 if equal. + * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional + * branches, and possibly long word memory accesses on CPU allowing cheap + * unaligned memory reads. + * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} + * + * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits. + */ + +static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], + const u8 addr2[6+2]) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + unsigned long fold = ((*(unsigned long *)addr1) ^ + (*(unsigned long *)addr2)); + + if (sizeof(fold) == 8) + return zap_last_2bytes(fold) != 0; + + fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^ + (*(unsigned long *)(addr2 + 4))); + return fold != 0; +#else + return compare_ether_addr(addr1, addr2); +#endif +} #endif /* __KERNEL__ */ #endif /* _LINUX_ETHERDEVICE_H */ |