diff options
author | Darrick J. Wong <darrick.wong@oracle.com> | 2017-03-06 11:58:20 -0800 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-10-08 10:11:22 +0200 |
commit | c9103723b7e4bd5046081c7569a73a752972cec9 (patch) | |
tree | d1778092d04a1d048e1c26e5a7ed0b2cb7d24915 | |
parent | d0bd573a6543d6f984c19158ce78653aa1f36240 (diff) | |
download | linux-stable-c9103723b7e4bd5046081c7569a73a752972cec9.tar.gz linux-stable-c9103723b7e4bd5046081c7569a73a752972cec9.tar.bz2 linux-stable-c9103723b7e4bd5046081c7569a73a752972cec9.zip |
xfs: remove kmem_zalloc_greedy
[ Upstream commit 08b005f1333154ae5b404ca28766e0ffb9f1c150 ]
The sole remaining caller of kmem_zalloc_greedy is bulkstat, which uses
it to grab 1-4 pages for staging of inobt records. The infinite loop in
the greedy allocation function is causing hangs[1] in generic/269, so
just get rid of the greedy allocator in favor of kmem_zalloc_large.
This makes bulkstat somewhat more likely to ENOMEM if there's really no
pages to spare, but eliminates a source of hangs.
[1] http://lkml.kernel.org/r/20170301044634.rgidgdqqiiwsmfpj%40XZHOUW.usersys.redhat.com
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | fs/xfs/kmem.c | 18 | ||||
-rw-r--r-- | fs/xfs/kmem.h | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_itable.c | 6 |
3 files changed, 2 insertions, 24 deletions
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index 53e95b2a1369..0abc6ae4fa57 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -24,24 +24,6 @@ #include "kmem.h" #include "xfs_message.h" -/* - * Greedy allocation. May fail and may return vmalloced memory. - */ -void * -kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize) -{ - void *ptr; - size_t kmsize = maxsize; - - while (!(ptr = vzalloc(kmsize))) { - if ((kmsize >>= 1) <= minsize) - kmsize = minsize; - } - if (ptr) - *size = kmsize; - return ptr; -} - void * kmem_alloc(size_t size, xfs_km_flags_t flags) { diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 64db0e53edea..2e75933bec1e 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -66,8 +66,6 @@ extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t); extern void kmem_free(const void *); -extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); - static inline void * kmem_zalloc(size_t size, xfs_km_flags_t flags) { diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 894924a5129b..76dd8e7e914f 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -356,7 +356,6 @@ xfs_bulkstat( xfs_agino_t agino; /* inode # in allocation group */ xfs_agnumber_t agno; /* allocation group number */ xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ - size_t irbsize; /* size of irec buffer in bytes */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ int nirbuf; /* size of irbuf */ int ubcount; /* size of user's buffer */ @@ -383,11 +382,10 @@ xfs_bulkstat( *ubcountp = 0; *done = 0; - irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); + irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP); if (!irbuf) return -ENOMEM; - - nirbuf = irbsize / sizeof(*irbuf); + nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf); /* * Loop over the allocation groups, starting from the last |