diff options
-rw-r--r-- | mm/page_owner.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/page_owner.c b/mm/page_owner.c index 33634f74d0b2..8e2d7137510c 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -562,11 +562,17 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; /* - * We are safe to check buddy flag and order, because - * this is init stage and only single thread runs. + * To avoid having to grab zone->lock, be a little + * careful when reading buddy page order. The only + * danger is that we skip too much and potentially miss + * some early allocated pages, which is better than + * heavy lock contention. */ if (PageBuddy(page)) { - pfn += (1UL << page_order(page)) - 1; + unsigned long order = page_order_unsafe(page); + + if (order > 0 && order < MAX_ORDER) + pfn += (1UL << order) - 1; continue; } @@ -585,6 +591,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) __set_page_owner_handle(page_ext, early_handle, 0, 0); count++; } + cond_resched(); } pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", @@ -595,15 +602,12 @@ static void init_zones_in_node(pg_data_t *pgdat) { struct zone *zone; struct zone *node_zones = pgdat->node_zones; - unsigned long flags; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; - spin_lock_irqsave(&zone->lock, flags); init_pages_in_zone(pgdat, zone); - spin_unlock_irqrestore(&zone->lock, flags); } } |