diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-17 17:05:49 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-17 17:05:49 -0800 |
commit | d9cb5bfcc3339f1a63df8fe0af8cece33c83c3af (patch) | |
tree | 091dea1a92466e87e2171bbd18f2e3f4908d5f4a /arch/tile | |
parent | 0f484e42baaf5a38fc79e99b917caa5431651fb1 (diff) | |
parent | 14e73e78ee982710292248536aa84cba41e974f4 (diff) | |
download | linux-d9cb5bfcc3339f1a63df8fe0af8cece33c83c3af.tar.gz linux-d9cb5bfcc3339f1a63df8fe0af8cece33c83c3af.tar.bz2 linux-d9cb5bfcc3339f1a63df8fe0af8cece33c83c3af.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile updates from Chris Metcalf:
"Another grab-bag of miscellaneous changes"
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
tile: use __ro_after_init instead of tile-specific __write_once
tile: migrate exception table users off module.h and onto extable.h
tile: remove #pragma unroll from finv_buffer_remote()
tile-module: Rename jump labels in module_alloc()
tile-module: Use kmalloc_array() in module_alloc()
tile/pci_gx: fix spelling mistake: "delievered" -> "delivered"
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/cache.h | 7 | ||||
-rw-r--r-- | arch/tile/include/asm/sections.h | 3 | ||||
-rw-r--r-- | arch/tile/kernel/module.c | 11 | ||||
-rw-r--r-- | arch/tile/kernel/pci.c | 2 | ||||
-rw-r--r-- | arch/tile/kernel/pci_gx.c | 2 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 18 | ||||
-rw-r--r-- | arch/tile/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/tile/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/tile/kernel/unaligned.c | 2 | ||||
-rw-r--r-- | arch/tile/lib/cacheflush.c | 8 | ||||
-rw-r--r-- | arch/tile/mm/extable.c | 2 | ||||
-rw-r--r-- | arch/tile/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/tile/mm/homecache.c | 2 | ||||
-rw-r--r-- | arch/tile/mm/init.c | 10 |
14 files changed, 31 insertions, 44 deletions
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 4810e48dbbbf..7d6aaa128e8b 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h @@ -50,18 +50,15 @@ /* * Originally we used small TLB pages for kernel data and grouped some - * things together as "write once", enforcing the property at the end + * things together as ro-after-init, enforcing the property at the end * of initialization by making those pages read-only and non-coherent. * This allowed better cache utilization since cache inclusion did not * need to be maintained. However, to do this requires an extra TLB * entry, which on balance is more of a performance hit than the * non-coherence is a performance gain, so we now just make "read - * mostly" and "write once" be synonyms. We keep the attribute + * mostly" and "ro-after-init" be synonyms. We keep the attribute * separate in case we change our minds at a future date. */ -#define __write_once __read_mostly - -/* __ro_after_init is the generic name for the tile arch __write_once. */ #define __ro_after_init __read_mostly #endif /* _ASM_TILE_CACHE_H */ diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h index 86a746243dc8..50343bfe7936 100644 --- a/arch/tile/include/asm/sections.h +++ b/arch/tile/include/asm/sections.h @@ -19,9 +19,6 @@ #include <asm-generic/sections.h> -/* Write-once data is writable only till the end of initialization. */ -extern char __w1data_begin[], __w1data_end[]; - extern char vdso_start[], vdso_end[]; #ifdef CONFIG_COMPAT extern char vdso32_start[], vdso32_end[]; diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index 2305084c9b93..09233fbe7801 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c @@ -43,29 +43,28 @@ void *module_alloc(unsigned long size) int npages; npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; - pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); + pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); if (pages == NULL) return NULL; for (; i < npages; ++i) { pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!pages[i]) - goto error; + goto free_pages; } area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); if (!area) - goto error; + goto free_pages; area->nr_pages = npages; area->pages = pages; if (map_vm_area(area, prot_rwx, pages)) { vunmap(area->addr); - goto error; + goto free_pages; } return area->addr; - -error: + free_pages: while (--i >= 0) __free_page(pages[i]); kfree(pages); diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index 9475a74cd53a..bc6656b5708b 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -57,7 +57,7 @@ static int pci_probe = 1; * This flag tells if the platform is TILEmpower that needs * special configuration for the PLX switch chip. */ -int __write_once tile_plx_gen1; +int __ro_after_init tile_plx_gen1; static struct pci_controller controllers[TILE_NUM_PCIE]; static int num_controllers; diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index 0e7a5d09e023..b554a68eea1b 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -131,7 +131,7 @@ static int tile_irq_cpu(int irq) count = cpumask_weight(&intr_cpus_map); if (unlikely(count == 0)) { - pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n"); + pr_warn("intr_cpus_map empty, interrupts will be delivered to dataplane tiles\n"); return irq % (smp_height * smp_width); } diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 153020abd2f5..443a70bccc1c 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -49,7 +49,7 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; } /* Chip information */ -char chip_model[64] __write_once; +char chip_model[64] __ro_after_init; #ifdef CONFIG_VT struct screen_info screen_info; @@ -97,17 +97,17 @@ int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; #ifdef CONFIG_HIGHMEM /* Map information from VAs to PAs */ unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] - __write_once __attribute__((aligned(L2_CACHE_BYTES))); + __ro_after_init __attribute__((aligned(L2_CACHE_BYTES))); EXPORT_SYMBOL(pbase_map); /* Map information from PAs to VAs */ void *vbase_map[NR_PA_HIGHBIT_VALUES] - __write_once __attribute__((aligned(L2_CACHE_BYTES))); + __ro_after_init __attribute__((aligned(L2_CACHE_BYTES))); EXPORT_SYMBOL(vbase_map); #endif /* Node number as a function of the high PA bits */ -int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; +int highbits_to_node[NR_PA_HIGHBIT_VALUES] __ro_after_init; EXPORT_SYMBOL(highbits_to_node); static unsigned int __initdata maxmem_pfn = -1U; @@ -844,11 +844,11 @@ static void __init zone_sizes_init(void) #ifdef CONFIG_NUMA /* which logical CPUs are on which nodes */ -struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; +struct cpumask node_2_cpu_mask[MAX_NUMNODES] __ro_after_init; EXPORT_SYMBOL(node_2_cpu_mask); /* which node each logical CPU is on */ -char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); +char cpu_2_node[NR_CPUS] __ro_after_init __attribute__((aligned(L2_CACHE_BYTES))); EXPORT_SYMBOL(cpu_2_node); /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ @@ -1269,7 +1269,7 @@ static void __init validate_va(void) * cpus plus any other cpus that are willing to share their cache. * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). */ -struct cpumask __write_once cpu_lotar_map; +struct cpumask __ro_after_init cpu_lotar_map; EXPORT_SYMBOL(cpu_lotar_map); /* @@ -1291,7 +1291,7 @@ EXPORT_SYMBOL(hash_for_home_map); * cache, those tiles will only appear in cpu_lotar_map, NOT in * cpu_cacheable_map, as they are a special case. */ -struct cpumask __write_once cpu_cacheable_map; +struct cpumask __ro_after_init cpu_cacheable_map; EXPORT_SYMBOL(cpu_cacheable_map); static __initdata struct cpumask disabled_map; @@ -1506,7 +1506,7 @@ void __init setup_arch(char **cmdline_p) * Set up per-cpu memory. */ -unsigned long __per_cpu_offset[NR_CPUS] __write_once; +unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init; EXPORT_SYMBOL(__per_cpu_offset); static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index 07e3ff5cc740..94a62e1197ce 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c @@ -27,7 +27,7 @@ * We write to width and height with a single store in head_NN.S, * so make the variable aligned to "long". */ -HV_Topology smp_topology __write_once __aligned(sizeof(long)); +HV_Topology smp_topology __ro_after_init __aligned(sizeof(long)); EXPORT_SYMBOL(smp_topology); #if CHIP_HAS_IPI() diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index ea960d660917..c9357012b1c8 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -37,7 +37,7 @@ */ /* How many cycles per second we are running at. */ -static cycles_t cycles_per_sec __write_once; +static cycles_t cycles_per_sec __ro_after_init; cycles_t get_clock_rate(void) { @@ -68,7 +68,7 @@ EXPORT_SYMBOL(get_cycles); */ #define SCHED_CLOCK_SHIFT 10 -static unsigned long sched_clock_mult __write_once; +static unsigned long sched_clock_mult __ro_after_init; static cycles_t clocksource_get_cycles(struct clocksource *cs) { diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index 9772a3554282..4fe78c5b8394 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -22,7 +22,7 @@ #include <linux/mman.h> #include <linux/types.h> #include <linux/err.h> -#include <linux/module.h> +#include <linux/extable.h> #include <linux/compat.h> #include <linux/prctl.h> #include <asm/cacheflush.h> diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c index 9c0ec22009a5..c1ebc1065fc1 100644 --- a/arch/tile/lib/cacheflush.c +++ b/arch/tile/lib/cacheflush.c @@ -138,19 +138,13 @@ finv_buffer_remote(void *buffer, size_t size, int hfh) if ((unsigned long)base < (unsigned long)buffer) base = buffer; - /* - * Fire all the loads we need. The MAF only has eight entries - * so we can have at most eight outstanding loads, so we - * unroll by that amount. - */ -#pragma unroll 8 + /* Fire all the loads we need. */ for (; p >= base; p -= step_size) force_load(p); /* * Repeat, but with finv's instead of loads, to get rid of the * data we just loaded into our own cache and the old home L3. - * No need to unroll since finv's don't target a register. * The finv's are guaranteed not to actually flush the data in * the buffer back to their home, since we just read it, so the * lines are clean in cache; we will only invalidate those lines. diff --git a/arch/tile/mm/extable.c b/arch/tile/mm/extable.c index 4fb0acb9d154..aeaf20c7aaa4 100644 --- a/arch/tile/mm/extable.c +++ b/arch/tile/mm/extable.c @@ -12,7 +12,7 @@ * more details. */ -#include <linux/module.h> +#include <linux/extable.h> #include <linux/spinlock.h> #include <linux/uaccess.h> diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index beba986589e5..709f8e9ba3e9 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -29,7 +29,7 @@ #include <linux/tty.h> #include <linux/vt_kern.h> /* For unblank_screen() */ #include <linux/highmem.h> -#include <linux/module.h> +#include <linux/extable.h> #include <linux/kprobes.h> #include <linux/hugetlb.h> #include <linux/syscalls.h> diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 40ca30a9fee3..b51cc28acd0a 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -47,7 +47,7 @@ * The noallocl2 option suppresses all use of the L2 cache to cache * locally from a remote home. */ -static int __write_once noallocl2; +static int __ro_after_init noallocl2; static int __init set_noallocl2(char *str) { noallocl2 = 1; diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index adce25462b0d..3a97e4d7205c 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -190,9 +190,9 @@ static void __init page_table_range_init(unsigned long start, static int __initdata ktext_hash = 1; /* .text pages */ static int __initdata kdata_hash = 1; /* .data and .bss pages */ -int __write_once hash_default = 1; /* kernel allocator pages */ +int __ro_after_init hash_default = 1; /* kernel allocator pages */ EXPORT_SYMBOL(hash_default); -int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ +int __ro_after_init kstack_hash = 1; /* if no homecaching, use h4h */ /* * CPUs to use to for striping the pages of kernel data. If hash-for-home @@ -203,7 +203,7 @@ int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ static __initdata struct cpumask kdata_mask; static __initdata int kdata_arg_seen; -int __write_once kdata_huge; /* if no homecaching, small pages */ +int __ro_after_init kdata_huge; /* if no homecaching, small pages */ /* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ @@ -896,8 +896,8 @@ void __init pgtable_cache_init(void) panic("pgtable_cache_init(): Cannot create pgd cache"); } -static long __write_once initfree = 1; -static bool __write_once set_initfree_done; +static long __ro_after_init initfree = 1; +static bool __ro_after_init set_initfree_done; /* Select whether to free (1) or mark unusable (0) the __init pages. */ static int __init set_initfree(char *str) |