summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/compress.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/compress.c')
-rw-r--r--fs/f2fs/compress.c89
1 files changed, 64 insertions, 25 deletions
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 29e50fbe7eca..1dfb126a0cb2 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -49,6 +49,13 @@ bool f2fs_is_compressed_page(struct page *page)
return false;
if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
return false;
+ /*
+ * page->private may be set with pid.
+ * pid_max is enough to check if it is traced.
+ */
+ if (IS_IO_TRACED_PAGE(page))
+ return false;
+
f2fs_bug_on(F2FS_M_SB(page->mapping),
*((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
return true;
@@ -506,7 +513,7 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
}
-static mempool_t *compress_page_pool = NULL;
+static mempool_t *compress_page_pool;
static int num_compress_pages = 512;
module_param(num_compress_pages, uint, 0444);
MODULE_PARM_DESC(num_compress_pages,
@@ -663,6 +670,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
int ret;
+ int i;
dec_page_count(sbi, F2FS_RD_DATA);
@@ -681,6 +689,26 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}
+ dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
+ dic->cluster_size, GFP_NOFS);
+ if (!dic->tpages) {
+ ret = -ENOMEM;
+ goto out_free_dic;
+ }
+
+ for (i = 0; i < dic->cluster_size; i++) {
+ if (dic->rpages[i]) {
+ dic->tpages[i] = dic->rpages[i];
+ continue;
+ }
+
+ dic->tpages[i] = f2fs_compress_alloc_page();
+ if (!dic->tpages[i]) {
+ ret = -ENOMEM;
+ goto out_free_dic;
+ }
+ }
+
if (cops->init_decompress_ctx) {
ret = cops->init_decompress_ctx(dic);
if (ret)
@@ -821,7 +849,7 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)
}
/* return # of valid blocks in compressed cluster */
-static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
+static int f2fs_cluster_blocks(struct compress_ctx *cc)
{
return __f2fs_cluster_blocks(cc, false);
}
@@ -835,7 +863,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
};
- return f2fs_cluster_blocks(&cc, false);
+ return f2fs_cluster_blocks(&cc);
}
static bool cluster_may_compress(struct compress_ctx *cc)
@@ -886,7 +914,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
bool prealloc;
retry:
- ret = f2fs_cluster_blocks(cc, false);
+ ret = f2fs_cluster_blocks(cc);
if (ret <= 0)
return ret;
@@ -949,7 +977,7 @@ retry:
}
if (prealloc) {
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -964,7 +992,7 @@ retry:
break;
}
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
}
if (likely(!ret)) {
@@ -1096,8 +1124,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
- if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
+ if (IS_NOQUOTA(inode)) {
+ /*
+ * We need to wait for node_write to avoid block allocation during
+ * checkpoint. This can only happen to quota writes which can cause
+ * the below discard race condition.
+ */
+ down_read(&sbi->node_write);
+ } else if (!f2fs_trylock_op(sbi)) {
return -EAGAIN;
+ }
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -1137,6 +1173,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
f2fs_set_compressed_page(cc->cpages[i], inode,
cc->rpages[i + 1]->index, cic);
fio.compressed_page = cc->cpages[i];
+
+ fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
+ dn.ofs_in_node + i + 1);
+
+ /* wait for GCed page writeback via META_MAPPING */
+ f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
+
if (fio.encrypted) {
fio.page = cc->rpages[i + 1];
err = f2fs_encrypt_one_page(&fio);
@@ -1203,7 +1246,9 @@ unlock_continue:
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_put_dnode(&dn);
- if (!IS_NOQUOTA(inode))
+ if (IS_NOQUOTA(inode))
+ up_read(&sbi->node_write);
+ else
f2fs_unlock_op(sbi);
spin_lock(&fi->i_size_lock);
@@ -1230,7 +1275,9 @@ out_put_cic:
out_put_dnode:
f2fs_put_dnode(&dn);
out_unlock_op:
- if (!IS_NOQUOTA(inode))
+ if (IS_NOQUOTA(inode))
+ up_read(&sbi->node_write);
+ else
f2fs_unlock_op(sbi);
return -EAGAIN;
}
@@ -1310,6 +1357,12 @@ retry_write:
congestion_wait(BLK_RW_ASYNC,
DEFAULT_IO_TIMEOUT);
lock_page(cc->rpages[i]);
+
+ if (!PageDirty(cc->rpages[i])) {
+ unlock_page(cc->rpages[i]);
+ continue;
+ }
+
clear_page_dirty_for_io(cc->rpages[i]);
goto retry_write;
}
@@ -1353,6 +1406,8 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
err = f2fs_write_compressed_pages(cc, submitted,
wbc, io_type);
cops->destroy_compress_ctx(cc);
+ kfree(cc->cpages);
+ cc->cpages = NULL;
if (!err)
return 0;
f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
@@ -1415,22 +1470,6 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->cpages[i] = page;
}
- dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
- dic->cluster_size, GFP_NOFS);
- if (!dic->tpages)
- goto out_free;
-
- for (i = 0; i < dic->cluster_size; i++) {
- if (cc->rpages[i]) {
- dic->tpages[i] = cc->rpages[i];
- continue;
- }
-
- dic->tpages[i] = f2fs_compress_alloc_page();
- if (!dic->tpages[i])
- goto out_free;
- }
-
return dic;
out_free: