diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-02-15 16:26:41 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-02-15 16:26:41 +0900 |
commit | d60cf53a30956e47919788b2ef49287786a959c9 (patch) | |
tree | cb518d7cc4813f4b54a3fd5fcaad43e6d82be6a3 /arch/sh | |
parent | 0ce08870b8a4895044b6cf2bbdc774a6faaa3656 (diff) | |
parent | 13c12a4e8ecdf3998cd2d89ade69f6f194819c95 (diff) | |
download | linux-d60cf53a30956e47919788b2ef49287786a959c9.tar.gz linux-d60cf53a30956e47919788b2ef49287786a959c9.tar.bz2 linux-d60cf53a30956e47919788b2ef49287786a959c9.zip |
Merge branch 'sh/st-integration' into sh/urgent
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/sections.h | 2 | ||||
-rw-r--r-- | arch/sh/lib/delay.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 3 |
3 files changed, 13 insertions, 2 deletions
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index a78701da775b..4a5350037c8f 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -3,7 +3,7 @@ #include <asm-generic/sections.h> -extern void __nosave_begin, __nosave_end; +extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char _ebss[]; diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c index faa8f86c0db4..0901b2f14e15 100644 --- a/arch/sh/lib/delay.c +++ b/arch/sh/lib/delay.c @@ -10,6 +10,16 @@ void __delay(unsigned long loops) { __asm__ __volatile__( + /* + * ST40-300 appears to have an issue with this code, + * normally taking two cycles each loop, as with all + * other SH variants. If however the branch and the + * delay slot straddle an 8 byte boundary, this increases + * to 3 cycles. + * This align directive ensures this doesn't occur. + */ + ".balign 8\n\t" + "tst %0, %0\n\t" "1:\t" "bf/s 1b\n\t" diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 88d3dc3d30d5..5a580ea04429 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from, kunmap_atomic(vfrom, KM_USER0); } - if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || + (vma->vm_flags & VM_EXEC)) __flush_purge_region(vto, PAGE_SIZE); kunmap_atomic(vto, KM_USER1); |