diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 18 | ||||
-rw-r--r-- | mm/memcontrol.c | 6 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 4 | ||||
-rw-r--r-- | mm/zsmalloc.c | 3 |
4 files changed, 9 insertions, 22 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6dc4580df2af..000e7b3b9896 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) flush_delayed_work(&bdi->wb.dwork); } -/* - * Called when the device behind @bdi has been removed or ejected. - * - * We can't really do much here except for reducing the dirty ratio at - * the moment. In the future we should be able to set a flag so that - * the filesystem can handle errors at mark_inode_dirty time instead - * of only at writeback time. - */ -void bdi_unregister(struct backing_dev_info *bdi) -{ - if (WARN_ON_ONCE(!bdi->dev)) - return; - - bdi_set_min_ratio(bdi, 0); -} -EXPORT_SYMBOL(bdi_unregister); - static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) { memset(wb, 0, sizeof(*wb)); @@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi) int i; bdi_wb_shutdown(bdi); + bdi_set_min_ratio(bdi, 0); WARN_ON(!list_empty(&bdi->work_list)); WARN_ON(delayed_work_pending(&bdi->wb.dwork)); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 14c2f2017e37..a04225d372ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2323,6 +2323,8 @@ done_restock: css_get_many(&memcg->css, batch); if (batch > nr_pages) refill_stock(memcg, batch - nr_pages); + if (!(gfp_mask & __GFP_WAIT)) + goto done; /* * If the hierarchy is above the normal consumption range, * make the charging task trim their excess contribution. @@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); - /* XXX: caller holds IRQ-safe mapping->tree_lock */ - VM_BUG_ON(!irqs_disabled()); - + /* Caller disabled preemption with mapping->tree_lock */ mem_cgroup_charge_statistics(memcg, page, -1); memcg_check_events(memcg, page); } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 457bde530cbe..9e88f749aa51 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1969,8 +1969,10 @@ void try_offline_node(int nid) * wait_table may be allocated from boot memory, * here only free if it's allocated by vmalloc. */ - if (is_vmalloc_addr(zone->wait_table)) + if (is_vmalloc_addr(zone->wait_table)) { vfree(zone->wait_table); + zone->wait_table = NULL; + } } } EXPORT_SYMBOL(try_offline_node); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 08bd7a3d464a..a8b5e749e84e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool) static void destroy_handle_cache(struct zs_pool *pool) { - kmem_cache_destroy(pool->handle_cachep); + if (pool->handle_cachep) + kmem_cache_destroy(pool->handle_cachep); } static unsigned long alloc_handle(struct zs_pool *pool) |