summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 01:25:37 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:59 -0700
commit523b945855a1427000ffc707c610abe5947ae607 (patch)
tree2d84b5b6822a2a20bfd79146c08ce06ac8c80b9b
parent633c0666b5a5c41c376a5a7e4304d638dc48c1b9 (diff)
downloadlinux-523b945855a1427000ffc707c610abe5947ae607.tar.gz
linux-523b945855a1427000ffc707c610abe5947ae607.tar.bz2
linux-523b945855a1427000ffc707c610abe5947ae607.zip
Memoryless nodes: Fix GFP_THISNODE behavior
GFP_THISNODE checks that the zone selected is within the pgdat (node) of the first zone of a nodelist. That only works if the node has memory. A memoryless node will have its first node on another pgdat (node). GFP_THISNODE currently will return simply memory on the first pgdat. Thus it is returning memory on other nodes. GFP_THISNODE should fail if there is no local memory on a node. Add a new set of zonelists for each node that only contain the nodes that belong to the zones itself so that no fallback is possible. Then modify gfp_type to pickup the right zone based on the presence of __GFP_THISNODE. Drop the existing GFP_THISNODE checks from the page_allocators hot path. Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Nishanth Aravamudan <nacc@us.ibm.com> Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Acked-by: Bob Picco <bob.picco@hp.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@skynet.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/gfp.h17
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--mm/page_alloc.c28
3 files changed, 48 insertions, 11 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bc68dd9a6d41..12a90a191c11 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -98,22 +98,29 @@ struct vm_area_struct;
static inline enum zone_type gfp_zone(gfp_t flags)
{
+ int base = 0;
+
+#ifdef CONFIG_NUMA
+ if (flags & __GFP_THISNODE)
+ base = MAX_NR_ZONES;
+#endif
+
#ifdef CONFIG_ZONE_DMA
if (flags & __GFP_DMA)
- return ZONE_DMA;
+ return base + ZONE_DMA;
#endif
#ifdef CONFIG_ZONE_DMA32
if (flags & __GFP_DMA32)
- return ZONE_DMA32;
+ return base + ZONE_DMA32;
#endif
if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
(__GFP_HIGHMEM | __GFP_MOVABLE))
- return ZONE_MOVABLE;
+ return base + ZONE_MOVABLE;
#ifdef CONFIG_HIGHMEM
if (flags & __GFP_HIGHMEM)
- return ZONE_HIGHMEM;
+ return base + ZONE_HIGHMEM;
#endif
- return ZONE_NORMAL;
+ return base + ZONE_NORMAL;
}
/*
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f21e5951038b..f6167f2fd7fb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -324,6 +324,17 @@ struct zone {
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
#ifdef CONFIG_NUMA
+
+/*
+ * The NUMA zonelists are doubled becausse we need zonelists that restrict the
+ * allocations to a single node for GFP_THISNODE.
+ *
+ * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback
+ * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE)
+ */
+#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
+
+
/*
* We cache key information from each zonelist for smaller cache
* footprint when scanning for free pages in get_page_from_freelist().
@@ -389,6 +400,7 @@ struct zonelist_cache {
unsigned long last_full_zap; /* when last zap'd (jiffies) */
};
#else
+#define MAX_ZONELISTS MAX_NR_ZONES
struct zonelist_cache;
#endif
@@ -455,7 +467,7 @@ extern struct page *mem_map;
struct bootmem_data;
typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
- struct zonelist node_zonelists[MAX_NR_ZONES];
+ struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP
struct page *node_mem_map;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d06f6e0f75aa..2f547f45de18 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1191,9 +1191,6 @@ zonelist_scan:
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
zone = *z;
- if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
- zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
- break;
if ((alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed_softwall(zone, gfp_mask))
goto try_next_zone;
@@ -1262,7 +1259,10 @@ restart:
z = zonelist->zones; /* the list of zones suitable for gfp_mask */
if (unlikely(*z == NULL)) {
- /* Should this ever happen?? */
+ /*
+ * Happens if we have an empty zonelist as a result of
+ * GFP_THISNODE being used on a memoryless node
+ */
return NULL;
}
@@ -1858,6 +1858,22 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
}
/*
+ * Build gfp_thisnode zonelists
+ */
+static void build_thisnode_zonelists(pg_data_t *pgdat)
+{
+ enum zone_type i;
+ int j;
+ struct zonelist *zonelist;
+
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
+ j = build_zonelists_node(pgdat, zonelist, 0, i);
+ zonelist->zones[j] = NULL;
+ }
+}
+
+/*
* Build zonelists ordered by zone and nodes within zones.
* This results in conserving DMA zone[s] until all Normal memory is
* exhausted, but results in overflowing to remote node while memory
@@ -1961,7 +1977,7 @@ static void build_zonelists(pg_data_t *pgdat)
int order = current_zonelist_order;
/* initialize zonelists */
- for (i = 0; i < MAX_NR_ZONES; i++) {
+ for (i = 0; i < MAX_ZONELISTS; i++) {
zonelist = pgdat->node_zonelists + i;
zonelist->zones[0] = NULL;
}
@@ -2006,6 +2022,8 @@ static void build_zonelists(pg_data_t *pgdat)
/* calculate node order -- i.e., DMA last! */
build_zonelists_in_zone_order(pgdat, j);
}
+
+ build_thisnode_zonelists(pgdat);
}
/* Construct the zonelist performance cache - see further mmzone.h */