summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-09-20 20:21:33 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-09-20 20:21:33 +0900
commitc15c5f8c2bf0b00d036c5c6b67264764a6e5dffc (patch)
treefb4aca715702ba76edb95424fd2a573fa3ed6a28
parentb817f7e020958c8f79842076c137daa6f72eb366 (diff)
downloadlinux-c15c5f8c2bf0b00d036c5c6b67264764a6e5dffc.tar.gz
linux-c15c5f8c2bf0b00d036c5c6b67264764a6e5dffc.tar.bz2
linux-c15c5f8c2bf0b00d036c5c6b67264764a6e5dffc.zip
sh: Support kernel stacks smaller than a page.
This follows the powerpc commit f6a616800e68b61807d0f7bb0d5dc70665ef8046 '[POWERPC] Fix kernel stack allocation alignment'. SH has traditionally forced the thread order to be relative to the page size, so there were never any situations where the same bug was triggered by slub. Regardless, the usage of > 8kB stacks for the larger page sizes is overkill, so we switch to using slab allocations there, as per the powerpc change. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/include/asm/thread_info.h32
-rw-r--r--arch/sh/mm/init.c29
2 files changed, 43 insertions, 18 deletions
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 0a894cafb1dd..f09ac4806294 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -33,20 +33,12 @@ struct thread_info {
#define PREEMPT_ACTIVE 0x10000000
#if defined(CONFIG_4KSTACKS)
-#define THREAD_SIZE_ORDER (0)
-#elif defined(CONFIG_PAGE_SIZE_4KB)
-#define THREAD_SIZE_ORDER (1)
-#elif defined(CONFIG_PAGE_SIZE_8KB)
-#define THREAD_SIZE_ORDER (1)
-#elif defined(CONFIG_PAGE_SIZE_16KB)
-#define THREAD_SIZE_ORDER (0)
-#elif defined(CONFIG_PAGE_SIZE_64KB)
-#define THREAD_SIZE_ORDER (0)
+#define THREAD_SHIFT 12
#else
-#error "Unknown thread size"
+#define THREAD_SHIFT 13
#endif
-#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define THREAD_SIZE (1 << THREAD_SHIFT)
#define STACK_WARN (THREAD_SIZE >> 3)
/*
@@ -94,15 +86,19 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
+/* thread information allocation */
+#if THREAD_SHIFT >= PAGE_SHIFT
+
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+#else /* THREAD_SHIFT < PAGE_SHIFT */
+
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-/* thread information allocation */
-#ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(ti) kzalloc(THREAD_SIZE, GFP_KERNEL)
-#else
-#define alloc_thread_info(ti) kmalloc(THREAD_SIZE, GFP_KERNEL)
-#endif
-#define free_thread_info(ti) kfree(ti)
+extern struct thread_info *alloc_thread_info(struct task_struct *tsk);
+extern void free_thread_info(struct thread_info *ti);
+
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
#endif /* __ASSEMBLY__ */
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 31211bfdc6d8..2a53943924b2 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -265,6 +265,35 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
+#if THREAD_SHIFT < PAGE_SHIFT
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+ struct thread_info *ti;
+
+ ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+ if (unlikely(ti == NULL))
+ return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ memset(ti, 0, THREAD_SIZE);
+#endif
+ return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+ thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ THREAD_SIZE, 0, NULL);
+ BUG_ON(thread_info_cache == NULL);
+}
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{