diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-11-19 11:53:15 +0200 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-11-21 18:59:33 +0200 |
commit | 3477d204658733aa3a87d3ae03b0327c1e599517 (patch) | |
tree | 1c9ba659f76c09a19b98f4bcbfac6fc67db43112 /fs/ubifs/file.c | |
parent | 6c0c42cdfd73fb161417403d8d077cb136e10bbf (diff) | |
download | linux-3477d204658733aa3a87d3ae03b0327c1e599517.tar.gz linux-3477d204658733aa3a87d3ae03b0327c1e599517.tar.bz2 linux-3477d204658733aa3a87d3ae03b0327c1e599517.zip |
UBIFS: pre-allocate bulk-read buffer
To avoid memory allocation failure during bulk-read, pre-allocate
a bulk-read buffer, so that if there is only one bulk-reader at
a time, it would just use the pre-allocated buffer and would not
do any memory allocation. However, if there are more than 1 bulk-
reader, then only one reader would use the pre-allocated buffer,
while the other reader would allocate the buffer for itself.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r-- | fs/ubifs/file.c | 31 |
1 files changed, 22 insertions, 9 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 0c5c27d63f6e..2624411d9758 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -811,15 +811,15 @@ static int ubifs_bulk_read(struct page *page) struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = page->index, last_page_read = ui->last_page_read; struct bu_info *bu; - int err = 0; + int err = 0, allocated = 0; ui->last_page_read = index; if (!c->bulk_read) return 0; /* - * Bulk-read is protected by ui_mutex, but it is an optimization, so - * don't bother if we cannot lock the mutex. + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, + * so don't bother if we cannot lock the mutex. */ if (!mutex_trylock(&ui->ui_mutex)) return 0; @@ -840,17 +840,30 @@ static int ubifs_bulk_read(struct page *page) ui->bulk_read = 1; } - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); - if (!bu) - return 0; + /* + * If possible, try to use pre-allocated bulk-read information, which + * is protected by @c->bu_mutex. + */ + if (mutex_trylock(&c->bu_mutex)) + bu = &c->bu; + else { + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); + if (!bu) + goto out_unlock; + + bu->buf = NULL; + allocated = 1; + } - bu->buf = NULL; bu->buf_len = c->max_bu_buf_len; data_key_init(c, &bu->key, inode->i_ino, page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); - err = ubifs_do_bulk_read(c, bu, page); - kfree(bu); + + if (!allocated) + mutex_unlock(&c->bu_mutex); + else + kfree(bu); out_unlock: mutex_unlock(&ui->ui_mutex); |