diff options
author | Pavel Shilovsky <pshilovsky@samba.org> | 2014-06-24 13:08:54 +0400 |
---|---|---|
committer | Steve French <smfrench@gmail.com> | 2014-08-02 01:23:03 -0500 |
commit | 387eb92ac6892518fb67e423f65fcaca76e256a8 (patch) | |
tree | 81acfa03dfde4cc3b2d9f1a6c555778036efec22 /fs/cifs/file.c | |
parent | cb7e9eabb2b584884db0d11ae0376d31ac1cfdc1 (diff) | |
download | linux-387eb92ac6892518fb67e423f65fcaca76e256a8.tar.gz linux-387eb92ac6892518fb67e423f65fcaca76e256a8.tar.bz2 linux-387eb92ac6892518fb67e423f65fcaca76e256a8.zip |
CIFS: Separate page search from readpages
Reviewed-by: Shirish Pargaonkar <spargaonkar@suse.com>
Signed-off-by: Pavel Shilovsky <pshilovsky@samba.org>
Signed-off-by: Steve French <smfrench@gmail.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r-- | fs/cifs/file.c | 107 |
1 files changed, 61 insertions, 46 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c79bdf3e6f51..bec48f1bc3ca 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3340,6 +3340,63 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, return total_read > 0 && result != -EAGAIN ? total_read : result; } +static int +readpages_get_pages(struct address_space *mapping, struct list_head *page_list, + unsigned int rsize, struct list_head *tmplist, + unsigned int *nr_pages, loff_t *offset, unsigned int *bytes) +{ + struct page *page, *tpage; + unsigned int expected_index; + int rc; + + page = list_entry(page_list->prev, struct page, lru); + + /* + * Lock the page and put it in the cache. Since no one else + * should have access to this page, we're safe to simply set + * PG_locked without checking it first. + */ + __set_page_locked(page); + rc = add_to_page_cache_locked(page, mapping, + page->index, GFP_KERNEL); + + /* give up if we can't stick it in the cache */ + if (rc) { + __clear_page_locked(page); + return rc; + } + + /* move first page to the tmplist */ + *offset = (loff_t)page->index << PAGE_CACHE_SHIFT; + *bytes = PAGE_CACHE_SIZE; + *nr_pages = 1; + list_move_tail(&page->lru, tmplist); + + /* now try and add more pages onto the request */ + expected_index = page->index + 1; + list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { + /* discontinuity ? */ + if (page->index != expected_index) + break; + + /* would this page push the read over the rsize? */ + if (*bytes + PAGE_CACHE_SIZE > rsize) + break; + + __set_page_locked(page); + if (add_to_page_cache_locked(page, mapping, page->index, + GFP_KERNEL)) { + __clear_page_locked(page); + break; + } + list_move_tail(&page->lru, tmplist); + (*bytes) += PAGE_CACHE_SIZE; + expected_index++; + (*nr_pages)++; + } + return rc; +} + static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { @@ -3394,57 +3451,15 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * the rdata->pages, then we want them in increasing order. */ while (!list_empty(page_list)) { - unsigned int i; - unsigned int bytes = PAGE_CACHE_SIZE; - unsigned int expected_index; - unsigned int nr_pages = 1; + unsigned int i, nr_pages, bytes; loff_t offset; struct page *page, *tpage; struct cifs_readdata *rdata; - page = list_entry(page_list->prev, struct page, lru); - - /* - * Lock the page and put it in the cache. Since no one else - * should have access to this page, we're safe to simply set - * PG_locked without checking it first. - */ - __set_page_locked(page); - rc = add_to_page_cache_locked(page, mapping, - page->index, GFP_KERNEL); - - /* give up if we can't stick it in the cache */ - if (rc) { - __clear_page_locked(page); + rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, + &nr_pages, &offset, &bytes); + if (rc) break; - } - - /* move first page to the tmplist */ - offset = (loff_t)page->index << PAGE_CACHE_SHIFT; - list_move_tail(&page->lru, &tmplist); - - /* now try and add more pages onto the request */ - expected_index = page->index + 1; - list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { - /* discontinuity ? */ - if (page->index != expected_index) - break; - - /* would this page push the read over the rsize? */ - if (bytes + PAGE_CACHE_SIZE > rsize) - break; - - __set_page_locked(page); - if (add_to_page_cache_locked(page, mapping, - page->index, GFP_KERNEL)) { - __clear_page_locked(page); - break; - } - list_move_tail(&page->lru, &tmplist); - bytes += PAGE_CACHE_SIZE; - expected_index++; - nr_pages++; - } rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete); if (!rdata) { |