summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@shadowen.org>2006-12-06 20:33:03 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 08:39:23 -0800
commit25ba77c141dbcd2602dd0171824d0d72aa023a01 (patch)
tree153eb9bc567f63d739dcaf8a3caf11c8f48b8379 /mm
parentbc4ba393c007248f76c05945abb7b7b892cdd1cc (diff)
downloadlinux-25ba77c141dbcd2602dd0171824d0d72aa023a01.tar.gz
linux-25ba77c141dbcd2602dd0171824d0d72aa023a01.tar.bz2
linux-25ba77c141dbcd2602dd0171824d0d72aa023a01.zip
[PATCH] numa node ids are int, page_to_nid and zone_to_nid should return int
NUMA node ids are passed as either int or unsigned int almost exclusivly page_to_nid and zone_to_nid both return unsigned long. This is a throw back to when page_to_nid was a #define and was thus exposing the real type of the page flags field. In addition to fixing up the definitions of page_to_nid and zone_to_nid I audited the users of these functions identifying the following incorrect uses: 1) mm/page_alloc.c show_node() -- printk dumping the node id, 2) include/asm-ia64/pgalloc.h pgtable_quicklist_free() -- comparison against numa_node_id() which returns an int from cpu_to_node(), and 3) mm/mpolicy.c check_pte_range -- used as an index in node_isset which uses bit_set which in generic code takes an int. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/sparse.c2
3 files changed, 3 insertions, 3 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index fb907236bbd8..e7b69c90cfd6 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -221,7 +221,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
struct page *page;
- unsigned int nid;
+ int nid;
if (!pte_present(*pte))
continue;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 86f2984f8b79..614d427854a8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1407,7 +1407,7 @@ unsigned int nr_free_pagecache_pages(void)
static inline void show_node(struct zone *zone)
{
if (NUMA_BUILD)
- printk("Node %ld ", zone_to_nid(zone));
+ printk("Node %d ", zone_to_nid(zone));
}
void si_meminfo(struct sysinfo *val)
diff --git a/mm/sparse.c b/mm/sparse.c
index 158d6a2a5263..ac26eb0d73cd 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -36,7 +36,7 @@ static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#endif
-unsigned long page_to_nid(struct page *page)
+int page_to_nid(struct page *page)
{
return section_to_node_table[page_to_section(page)];
}