summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c93
1 files changed, 70 insertions, 23 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5f527073143e..ed2bca0fce92 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -87,7 +87,7 @@ static bool __is_cp_guaranteed(struct page *page)
sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_META_INO(sbi) ||
- inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_NODE_INO(sbi) ||
S_ISDIR(inode->i_mode) ||
(S_ISREG(inode->i_mode) &&
(f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
@@ -1073,12 +1073,13 @@ static void f2fs_release_read_bio(struct bio *bio)
/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
- block_t blkaddr, bool for_write)
+ block_t blkaddr, int op_flags, bool for_write)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
- bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index, for_write);
+ bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
+ page->index, for_write);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -1193,7 +1194,7 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
- struct extent_info ei = {0,0,0};
+ struct extent_info ei = {0, 0, 0};
struct inode *inode = dn->inode;
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
@@ -1265,7 +1266,8 @@ got_it:
return page;
}
- err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write);
+ err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
+ op_flags, for_write);
if (err)
goto put_err;
return page;
@@ -1414,7 +1416,7 @@ alloc:
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
old_blkaddr = dn->data_blkaddr;
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
- &sum, seg_type, NULL, false);
+ &sum, seg_type, NULL);
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
invalidate_mapping_pages(META_MAPPING(sbi),
old_blkaddr, old_blkaddr);
@@ -1474,7 +1476,7 @@ map_blocks:
return err;
}
-void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (lock)
@@ -1539,7 +1541,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
next_dnode:
if (map->m_may_create)
- __do_map_lock(sbi, flag, true);
+ f2fs_do_map_lock(sbi, flag, true);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -1688,7 +1690,7 @@ skip:
f2fs_put_dnode(&dn);
if (map->m_may_create) {
- __do_map_lock(sbi, flag, false);
+ f2fs_do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
@@ -1714,7 +1716,7 @@ sync_out:
f2fs_put_dnode(&dn);
unlock_out:
if (map->m_may_create) {
- __do_map_lock(sbi, flag, false);
+ f2fs_do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
@@ -1861,6 +1863,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
flags |= FIEMAP_EXTENT_LAST;
err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+ trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
if (err || err == 1)
return err;
}
@@ -1884,8 +1887,10 @@ static int f2fs_xattr_fiemap(struct inode *inode,
flags = FIEMAP_EXTENT_LAST;
}
- if (phys)
+ if (phys) {
err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+ trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
+ }
return (err < 0 ? err : 0);
}
@@ -1979,6 +1984,7 @@ next:
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size, flags);
+ trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
if (ret)
goto out;
size = 0;
@@ -2213,9 +2219,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
if (ret)
goto out;
- /* cluster was overwritten as normal cluster */
- if (dn.data_blkaddr != COMPRESS_ADDR)
- goto out;
+ f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
@@ -2342,6 +2346,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
int ret = 0;
+ bool drop_ra = false;
map.m_pblk = 0;
map.m_lblk = 0;
@@ -2352,10 +2357,26 @@ static int f2fs_mpage_readpages(struct inode *inode,
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
+ /*
+ * Two readahead threads for same address range can cause race condition
+ * which fragments sequential read IOs. So let's avoid each other.
+ */
+ if (rac && readahead_count(rac)) {
+ if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
+ drop_ra = true;
+ else
+ WRITE_ONCE(F2FS_I(inode)->ra_offset,
+ readahead_index(rac));
+ }
+
for (; nr_pages; nr_pages--) {
if (rac) {
page = readahead_page(rac);
prefetchw(&page->flags);
+ if (drop_ra) {
+ f2fs_put_page(page, 1);
+ continue;
+ }
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
@@ -2418,6 +2439,9 @@ next_page:
}
if (bio)
__submit_bio(F2FS_I_SB(inode), bio, DATA);
+
+ if (rac && readahead_count(rac) && !drop_ra)
+ WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
return ret;
}
@@ -2772,8 +2796,20 @@ write:
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
+ /*
+ * We need to wait for node_write to avoid block allocation during
+ * checkpoint. This can only happen to quota writes which can cause
+ * the below discard race condition.
+ */
+ if (IS_NOQUOTA(inode))
+ down_read(&sbi->node_write);
+
fio.need_lock = LOCK_DONE;
err = f2fs_do_write_data_page(&fio);
+
+ if (IS_NOQUOTA(inode))
+ up_read(&sbi->node_write);
+
goto done;
}
@@ -3268,7 +3304,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
if (f2fs_has_inline_data(inode) ||
(pos & PAGE_MASK) >= i_size_read(inode)) {
- __do_map_lock(sbi, flag, true);
+ f2fs_do_map_lock(sbi, flag, true);
locked = true;
}
@@ -3305,7 +3341,7 @@ restart:
err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err || dn.data_blkaddr == NULL_ADDR) {
f2fs_put_dnode(&dn);
- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
+ f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
true);
WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
locked = true;
@@ -3321,7 +3357,7 @@ out:
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
- __do_map_lock(sbi, flag, false);
+ f2fs_do_map_lock(sbi, flag, false);
return err;
}
@@ -3433,7 +3469,7 @@ repeat:
err = -EFSCORRUPTED;
goto fail;
}
- err = f2fs_submit_page_read(inode, page, blkaddr, true);
+ err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
if (err)
goto fail;
@@ -3483,6 +3519,10 @@ static int f2fs_write_end(struct file *file,
if (f2fs_compressed_file(inode) && fsdata) {
f2fs_compress_write_end(inode, fsdata, page->index, copied);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+ if (pos + copied > i_size_read(inode) &&
+ !f2fs_verity_in_progress(inode))
+ f2fs_i_size_write(inode, pos + copied);
return copied;
}
#endif
@@ -3742,10 +3782,9 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
}
f2fs_put_dnode(&dn);
-
return blknr;
#else
- return -EOPNOTSUPP;
+ return 0;
#endif
}
@@ -3753,18 +3792,26 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
+ struct buffer_head tmp = {
+ .b_size = i_blocksize(inode),
+ };
+ sector_t blknr = 0;
if (f2fs_has_inline_data(inode))
- return 0;
+ goto out;
/* make sure allocating whole blocks */
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
filemap_write_and_wait(mapping);
if (f2fs_compressed_file(inode))
- return f2fs_bmap_compress(inode, block);
+ blknr = f2fs_bmap_compress(inode, block);
- return generic_block_bmap(mapping, block, get_data_block_bmap);
+ if (!get_data_block_bmap(inode, block, &tmp, 0))
+ blknr = tmp.b_blocknr;
+out:
+ trace_f2fs_bmap(inode, block, blknr);
+ return blknr;
}
#ifdef CONFIG_MIGRATION