diff options
author | Michal Simek <monstr@monstr.eu> | 2010-10-09 13:58:24 +1000 |
---|---|---|
committer | Michal Simek <monstr@monstr.eu> | 2010-10-21 15:52:01 +1000 |
commit | 93e2e85139509338c68279c7260ebb68177b23a9 (patch) | |
tree | 64171deb6d2f046da3cf7c5111f73e419be1d460 /arch/microblaze | |
parent | ccea0e6e49e4db8ee7968c183ecddb3e399c5f54 (diff) | |
download | linux-stable-93e2e85139509338c68279c7260ebb68177b23a9.tar.gz linux-stable-93e2e85139509338c68279c7260ebb68177b23a9.tar.bz2 linux-stable-93e2e85139509338c68279c7260ebb68177b23a9.zip |
microblaze: Separate library optimized functions
memcpy/memmove/memset
Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze')
-rw-r--r-- | arch/microblaze/lib/memcpy.c | 13 | ||||
-rw-r--r-- | arch/microblaze/lib/memmove.c | 26 | ||||
-rw-r--r-- | arch/microblaze/lib/memset.c | 22 |
3 files changed, 46 insertions, 15 deletions
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index 014bac92bdff..ab2d115f9ee5 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c @@ -33,17 +33,24 @@ #include <asm/system.h> #ifdef __HAVE_ARCH_MEMCPY +#ifndef CONFIG_OPT_LIB_FUNCTION void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; -#ifndef CONFIG_OPT_LIB_FUNCTION + /* Simple, byte oriented memcpy. */ while (c--) *dst++ = *src++; return v_dst; -#else +} +#else /* CONFIG_OPT_LIB_FUNCTION */ +void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on @@ -150,7 +157,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c) } return v_dst; -#endif } +#endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memcpy); #endif /* __HAVE_ARCH_MEMCPY */ diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c index 0929198c5e68..1d3c0e7990e5 100644 --- a/arch/microblaze/lib/memmove.c +++ b/arch/microblaze/lib/memmove.c @@ -31,16 +31,12 @@ #include <linux/string.h> #ifdef __HAVE_ARCH_MEMMOVE +#ifndef CONFIG_OPT_LIB_FUNCTION void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) { const char *src = v_src; char *dst = v_dst; -#ifdef CONFIG_OPT_LIB_FUNCTION - const uint32_t *i_src; - uint32_t *i_dst; -#endif - if (!c) return v_dst; @@ -48,7 +44,6 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) if (v_dst <= v_src) return memcpy(v_dst, v_src, c); -#ifndef CONFIG_OPT_LIB_FUNCTION /* copy backwards, from end to beginning */ src += c; dst += c; @@ -58,7 +53,22 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) *--dst = *--src; return v_dst; -#else +} +#else /* CONFIG_OPT_LIB_FUNCTION */ +void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) +{ + const char *src = v_src; + char *dst = v_dst; + const uint32_t *i_src; + uint32_t *i_dst; + + if (!c) + return v_dst; + + /* Use memcpy when source is higher than dest */ + if (v_dst <= v_src) + return memcpy(v_dst, v_src, c); + /* The following code tries to optimize the copy by using unsigned * alignment. This will work fine if both source and destination are * aligned on the same boundary. However, if they are aligned on @@ -169,7 +179,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c) *--dst = *--src; } return v_dst; -#endif } +#endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memmove); #endif /* __HAVE_ARCH_MEMMOVE */ diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c index ecfb663e1fc1..834565d1607e 100644 --- a/arch/microblaze/lib/memset.c +++ b/arch/microblaze/lib/memset.c @@ -31,17 +31,30 @@ #include <linux/string.h> #ifdef __HAVE_ARCH_MEMSET +#ifndef CONFIG_OPT_LIB_FUNCTION +void *memset(void *v_src, int c, __kernel_size_t n) +{ + char *src = v_src; + + /* Truncate c to 8 bits */ + c = (c & 0xFF); + + /* Simple, byte oriented memset or the rest of count. */ + while (n--) + *src++ = c; + + return v_src; +} +#else /* CONFIG_OPT_LIB_FUNCTION */ void *memset(void *v_src, int c, __kernel_size_t n) { char *src = v_src; -#ifdef CONFIG_OPT_LIB_FUNCTION uint32_t *i_src; uint32_t w32 = 0; -#endif + /* Truncate c to 8 bits */ c = (c & 0xFF); -#ifdef CONFIG_OPT_LIB_FUNCTION if (unlikely(c)) { /* Make a repeating word out of it */ w32 = c; @@ -72,12 +85,13 @@ void *memset(void *v_src, int c, __kernel_size_t n) src = (void *)i_src; } -#endif + /* Simple, byte oriented memset or the rest of count. */ while (n--) *src++ = c; return v_src; } +#endif /* CONFIG_OPT_LIB_FUNCTION */ EXPORT_SYMBOL(memset); #endif /* __HAVE_ARCH_MEMSET */ |