summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/sparc64/mm/tsb.c3
-rw-r--r--include/linux/slab.h1
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c5
7 files changed, 9 insertions, 14 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8508f973d9cc..c8814177b716 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -1057,8 +1057,7 @@ static int __init hugetlbpage_init(void)
huge_pgtable_cache = kmem_cache_create("hugepte_cache",
HUGEPTE_TABLE_SIZE,
HUGEPTE_TABLE_SIZE,
- SLAB_HWCACHE_ALIGN |
- SLAB_MUST_HWCACHE_ALIGN,
+ SLAB_HWCACHE_ALIGN,
zero_ctor, NULL);
if (! huge_pgtable_cache)
panic("hugetlbpage_init(): could not create hugepte cache\n");
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d12a87ec5ae9..5a7750147b7d 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -183,8 +183,7 @@ void pgtable_cache_init(void)
"for size: %08x...\n", name, i, size);
pgtable_cache[i] = kmem_cache_create(name,
size, size,
- SLAB_HWCACHE_ALIGN |
- SLAB_MUST_HWCACHE_ALIGN,
+ SLAB_HWCACHE_ALIGN,
zero_ctor,
NULL);
if (! pgtable_cache[i])
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 57eb3025537a..4be378d9a382 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -262,8 +262,7 @@ void __init pgtable_cache_init(void)
tsb_caches[i] = kmem_cache_create(name,
size, size,
- SLAB_HWCACHE_ALIGN |
- SLAB_MUST_HWCACHE_ALIGN,
+ SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!tsb_caches[i]) {
prom_printf("Could not create %s cache\n", name);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 67425c277e12..a9befa50d3e3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -26,7 +26,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
-#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
diff --git a/mm/slab.c b/mm/slab.c
index 997c3b2f50c9..583644f6ae11 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -175,12 +175,12 @@
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
- SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
+ SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
- SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
+ SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
#endif
diff --git a/mm/slob.c b/mm/slob.c
index 77786be032e0..c9401a7eaa5f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
- * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
+ * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
c->ctor = ctor;
c->dtor = dtor;
/* ignore alignment unless it's forced */
- c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
+ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < align)
c->align = align;
} else if (flags & SLAB_PANIC)
diff --git a/mm/slub.c b/mm/slub.c
index 3904002bdb35..79940e98e5e6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1496,7 +1496,7 @@ static unsigned long calculate_alignment(unsigned long flags,
* specified alignment though. If that is greater
* then use it.
*/
- if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) &&
+ if ((flags & SLAB_HWCACHE_ALIGN) &&
size > L1_CACHE_BYTES / 2)
return max_t(unsigned long, align, L1_CACHE_BYTES);
@@ -3142,8 +3142,7 @@ SLAB_ATTR(reclaim_account);
static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags &
- (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN)));
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
}
SLAB_ATTR_RO(hwcache_align);