diff options
author | Minchan Kim <minchan@kernel.org> | 2016-07-26 15:23:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 16:19:19 -0700 |
commit | 3b1d9ca65a80ced8ae737ffb11ae939334a882ca (patch) | |
tree | db2019a112c6c134f69523c7d78f4078415e78c6 | |
parent | 9bc482d3460501ac809457af26b46b72cd7dc212 (diff) | |
download | linux-3b1d9ca65a80ced8ae737ffb11ae939334a882ca.tar.gz linux-3b1d9ca65a80ced8ae737ffb11ae939334a882ca.tar.bz2 linux-3b1d9ca65a80ced8ae737ffb11ae939334a882ca.zip |
zsmalloc: use OBJ_TAG_BIT for bit shifter
Static check warns using tag as bit shifter. It doesn't break current
working but not good for redability. Let's use OBJ_TAG_BIT as bit
shifter instead of OBJ_ALLOCATED_TAG.
Link: http://lkml.kernel.org/r/20160607045146.GF26230@bbox
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/zsmalloc.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 04a4f063b4fd..6b6986a02aa0 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1052,7 +1052,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { - link->next = freeobj++ << OBJ_ALLOCATED_TAG; + link->next = freeobj++ << OBJ_TAG_BITS; link += class->size / sizeof(*link); } @@ -1063,13 +1063,13 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) */ next_page = get_next_page(page); if (next_page) { - link->next = freeobj++ << OBJ_ALLOCATED_TAG; + link->next = freeobj++ << OBJ_TAG_BITS; } else { /* - * Reset OBJ_ALLOCATED_TAG bit to last link to tell + * Reset OBJ_TAG_BITS bit to last link to tell * whether it's allocated object or not. */ - link->next = -1 << OBJ_ALLOCATED_TAG; + link->next = -1 << OBJ_TAG_BITS; } kunmap_atomic(vaddr); page = next_page; @@ -1514,7 +1514,7 @@ static unsigned long obj_malloc(struct size_class *class, vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); - set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); + set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!PageHugeObject(m_page))) /* record handle in the header of allocated chunk */ link->handle = handle; @@ -1616,7 +1616,7 @@ static void obj_free(struct size_class *class, unsigned long obj) /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); - link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG; + link->next = get_freeobj(zspage) << OBJ_TAG_BITS; kunmap_atomic(vaddr); set_freeobj(zspage, f_objidx); mod_zspage_inuse(zspage, -1); |