summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorJay Vosburgh <jay.vosburgh@canonical.com>2014-11-14 11:05:06 -0800
committerDavid S. Miller <davem@davemloft.net>2014-11-14 16:36:25 -0500
commita77f9c5dcdf8480a93332792c336fa2bf9d31229 (patch)
tree003964de0cc772f361d254385406fa75f3fcbff2 /include/asm-generic
parent8cd4313aa775537f724486d5b7503b4d46c9f012 (diff)
downloadlinux-a77f9c5dcdf8480a93332792c336fa2bf9d31229.tar.gz
linux-a77f9c5dcdf8480a93332792c336fa2bf9d31229.tar.bz2
linux-a77f9c5dcdf8480a93332792c336fa2bf9d31229.zip
Revert "fast_hash: avoid indirect function calls"
This reverts commit e5a2c899957659cd1a9f789bc462f9c0b35f5150. Commit e5a2c899 introduced an alternative_call, arch_fast_hash2, that selects between __jhash2 and __intel_crc4_2_hash based on the X86_FEATURE_XMM4_2. Unfortunately, the alternative_call system does not appear to be suitable for use with C functions, as register usage is not handled properly for the called functions. The __jhash2 function in particular clobbers registers that are not preserved when called via alternative_call, resulting in a panic for direct callers of arch_fast_hash2 on older CPUs lacking sse4_2. It is possible that __intel_crc4_2_hash works merely by chance because it uses fewer registers. This commit was suggested as the source of the problem by Jesse Gross <jesse@nicira.com>. Signed-off-by: Jay Vosburgh <jay.vosburgh@canonical.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/hash.h36
1 files changed, 2 insertions, 34 deletions
diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h
index 3c82760ff2a4..b6312843dbd9 100644
--- a/include/asm-generic/hash.h
+++ b/include/asm-generic/hash.h
@@ -1,41 +1,9 @@
#ifndef __ASM_GENERIC_HASH_H
#define __ASM_GENERIC_HASH_H
-#include <linux/jhash.h>
-
-/**
- * arch_fast_hash - Caclulates a hash over a given buffer that can have
- * arbitrary size. This function will eventually use an
- * architecture-optimized hashing implementation if
- * available, and trades off distribution for speed.
- *
- * @data: buffer to hash
- * @len: length of buffer in bytes
- * @seed: start seed
- *
- * Returns 32bit hash.
- */
-static inline u32 arch_fast_hash(const void *data, u32 len, u32 seed)
-{
- return jhash(data, len, seed);
-}
-
-/**
- * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
- * size that is of a multiple of 32bit words. This
- * function will eventually use an architecture-
- * optimized hashing implementation if available,
- * and trades off distribution for speed.
- *
- * @data: buffer to hash (must be 32bit padded)
- * @len: number of 32bit words
- * @seed: start seed
- *
- * Returns 32bit hash.
- */
-static inline u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed)
+struct fast_hash_ops;
+static inline void setup_arch_fast_hash(struct fast_hash_ops *ops)
{
- return jhash2(data, len, seed);
}
#endif /* __ASM_GENERIC_HASH_H */