diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-05-30 10:03:27 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-05-30 10:03:34 +0200 |
commit | 67060d9c1f5d91c917cc51bed464cb5638eaddbc (patch) | |
tree | 84a9f66567bcec141f80b0fbc9db995ac9323a50 /arch | |
parent | bebd9a455b2593ba6543b961bc82c43350c2d8d9 (diff) | |
download | linux-stable-67060d9c1f5d91c917cc51bed464cb5638eaddbc.tar.gz linux-stable-67060d9c1f5d91c917cc51bed464cb5638eaddbc.tar.bz2 linux-stable-67060d9c1f5d91c917cc51bed464cb5638eaddbc.zip |
[S390] Fix section mismatch warnings.
This fixes the last remaining section mismatch warnings in s390
architecture code. It reveals also a real bug introduced by... me
with git commit 2069e978d5a6e7b45d58027e3de7f879b8c5e488
("[S390] sparsemem vmemmap: initialize memmap.")
Calling the generic vmemmap_alloc_block() function to get initialized
memory is a nice idea, however that function is __meminit annotated
and therefore the function might be gone if we try to call it later.
This can happen if a DCSS segment gets added.
So basically revert the patch and clear the memmap explicitly to fix
the original bug.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 18 |
2 files changed, 14 insertions, 6 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1f4228948dc4..42b1d12ebb10 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1089,7 +1089,7 @@ out: #ifdef CONFIG_HOTPLUG_CPU -int smp_rescan_cpus(void) +int __ref smp_rescan_cpus(void) { cpumask_t newcpus; int cpu; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index ea2804808f39..f591188fa2c0 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -27,12 +27,19 @@ struct memory_segment { static LIST_HEAD(mem_segs); -static pud_t *vmem_pud_alloc(void) +static void __ref *vmem_alloc_pages(unsigned int order) +{ + if (slab_is_available()) + return (void *)__get_free_pages(GFP_KERNEL, order); + return alloc_bootmem_pages((1 << order) * PAGE_SIZE); +} + +static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; #ifdef CONFIG_64BIT - pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0); + pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -40,12 +47,12 @@ static pud_t *vmem_pud_alloc(void) return pud; } -static pmd_t *vmem_pmd_alloc(void) +static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; #ifdef CONFIG_64BIT - pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0); + pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); @@ -207,13 +214,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) if (pte_none(*pt_dir)) { unsigned long new_page; - new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0)); + new_page =__pa(vmem_alloc_pages(0)); if (!new_page) goto out; pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); *pt_dir = pte; } } + memset(start, 0, nr * sizeof(struct page)); ret = 0; out: flush_tlb_kernel_range(start_addr, end_addr); |