summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/video/fb_defio.c40
1 files changed, 32 insertions, 8 deletions
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 6113c47e095a..1105a591dcc1 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -155,25 +155,41 @@ static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
- struct list_head *node, *next;
- struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct page *page, *tmp_page;
+ struct list_head *node, *tmp_node;
+ struct list_head non_dirty;
+
+ INIT_LIST_HEAD(&non_dirty);
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
- list_for_each_entry(cur, &fbdefio->pagelist, lru) {
- lock_page(cur);
- page_mkclean(cur);
- unlock_page(cur);
+ list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
+ lock_page(page);
+ /*
+ * The workqueue callback can be triggered after a
+ * ->page_mkwrite() call but before the PTE has been marked
+ * dirty. In this case page_mkclean() won't "rearm" the page.
+ *
+ * To avoid this, remove those "non-dirty" pages from the
+ * pagelist before calling the driver's callback, then add
+ * them back to get processed on the next work iteration.
+ * At that time, their PTEs will hopefully be dirty for real.
+ */
+ if (!page_mkclean(page))
+ list_move_tail(&page->lru, &non_dirty);
+ unlock_page(page);
}
/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);
- /* clear the list */
- list_for_each_safe(node, next, &fbdefio->pagelist) {
+ /* clear the list... */
+ list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
list_del(node);
}
+ /* ... and add back the "non-dirty" pages to the list */
+ list_splice_tail(&non_dirty, &fbdefio->pagelist);
mutex_unlock(&fbdefio->lock);
}
@@ -202,6 +218,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct list_head *node, *tmp_node;
struct page *page;
int i;
@@ -209,6 +226,13 @@ void fb_deferred_io_cleanup(struct fb_info *info)
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();
+ /* the list may have still some non-dirty pages at this point */
+ mutex_lock(&fbdefio->lock);
+ list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
+ list_del(node);
+ }
+ mutex_unlock(&fbdefio->lock);
+
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);