summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorHannes Frederic Sowa <hannes@stressinduktion.org>2014-11-05 00:23:04 +0100
committerDavid S. Miller <davem@davemloft.net>2014-11-05 22:01:21 -0500
commite5a2c899957659cd1a9f789bc462f9c0b35f5150 (patch)
tree4c9b8a6f89d961daf9ada9f5ee95f8b371ce3a04 /include/asm-generic
parent2c99cd914d4fed9160d98849c9dd38034616768e (diff)
downloadlinux-e5a2c899957659cd1a9f789bc462f9c0b35f5150.tar.gz
linux-e5a2c899957659cd1a9f789bc462f9c0b35f5150.tar.bz2
linux-e5a2c899957659cd1a9f789bc462f9c0b35f5150.zip
fast_hash: avoid indirect function calls
By default the arch_fast_hash hashing function pointers are initialized to jhash(2). If during boot-up a CPU with SSE4.2 is detected they get updated to the CRC32 ones. This dispatching scheme incurs a function pointer lookup and indirect call for every hashing operation. rhashtable as a user of arch_fast_hash e.g. stores pointers to hashing functions in its structure, too, causing two indirect branches per hashing operation. Using alternative_call we can get away with one of those indirect branches. Acked-by: Daniel Borkmann <dborkman@redhat.com> Cc: Thomas Graf <tgraf@suug.ch> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/hash.h36
1 files changed, 34 insertions, 2 deletions
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
index b6312843dbd9..3c82760ff2a4 100644
--- a/include/asm-generic/hash.h
+++ b/include/asm-generic/hash.h
@@ -1,9 +1,41 @@
#ifndef __ASM_GENERIC_HASH_H
#define __ASM_GENERIC_HASH_H
-struct fast_hash_ops;
-static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
+#include <linux/jhash.h>
+
+/**
+ * arch_fast_hash - Caclulates a hash over a given buffer that can have
+ * arbitrary size. This function will eventually use an
+ * architecture-optimized hashing implementation if
+ * available, and trades off distribution for speed.
+ *
+ * @data: buffer to hash
+ * @len: length of buffer in bytes
+ * @seed: start seed
+ *
+ * Returns 32bit hash.
+ */
+static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed)
+{
+ return jhash(data, len, seed);
+}
+
+/**
+ * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
+ * size that is of a multiple of 32bit words. This
+ * function will eventually use an architecture-
+ * optimized hashing implementation if available,
+ * and trades off distribution for speed.
+ *
+ * @data: buffer to hash (must be 32bit padded)
+ * @len: number of 32bit words
+ * @seed: start seed
+ *
+ * Returns 32bit hash.
+ */
+static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
{
+ return jhash2(data, len, seed);
}
#endif /* __ASM_GENERIC_HASH_H */