summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/ceph/xattr.c28
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/connect.c32
-rw-r--r--fs/cifs/smb2ops.c18
-rw-r--r--fs/cifs/smb2pdu.c13
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smbdirect.c36
-rw-r--r--fs/cifs/transport.c9
-rw-r--r--fs/ext4/balloc.c9
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c9
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c21
-rw-r--r--fs/xfs/xfs_file.c24
19 files changed, 166 insertions, 73 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e2f16b68fcbf..51b5e2da708c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3142,7 +3142,11 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
struct rb_node *node;
int ret = 0;
+ spin_lock(&root->fs_info->trans_lock);
cur_trans = root->fs_info->running_transaction;
+ if (cur_trans)
+ refcount_inc(&cur_trans->use_count);
+ spin_unlock(&root->fs_info->trans_lock);
if (!cur_trans)
return 0;
@@ -3151,6 +3155,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
+ btrfs_put_transaction(cur_trans);
return 0;
}
@@ -3167,6 +3172,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
+ btrfs_put_transaction(cur_trans);
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
@@ -3199,6 +3205,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
}
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
+ btrfs_put_transaction(cur_trans);
return ret;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 00b7d3231821..b041b945a7ae 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1841,7 +1841,7 @@ again:
old_bytenr = btrfs_node_blockptr(parent, slot);
blocksize = fs_info->nodesize;
old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
- btrfs_node_key_to_cpu(parent, &key, slot);
+ btrfs_node_key_to_cpu(parent, &first_key, slot);
if (level <= max_level) {
eb = path->nodes[level];
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 221e5cdb060b..c0074d2d7d6d 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5236,6 +5236,10 @@ static int send_write_or_clone(struct send_ctx *sctx,
len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
}
+ if (offset >= sctx->cur_inode_size) {
+ ret = 0;
+ goto out;
+ }
if (offset + len > sctx->cur_inode_size)
len = sctx->cur_inode_size - offset;
if (len == 0) {
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 7e72348639e4..315f7e63e7cc 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -228,7 +228,15 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
{
- return (ci->i_max_files || ci->i_max_bytes);
+ bool ret = false;
+ spin_lock(&ci->i_ceph_lock);
+ if ((ci->i_max_files || ci->i_max_bytes) &&
+ ci->i_vino.snap == CEPH_NOSNAP &&
+ ci->i_snap_realm &&
+ ci->i_snap_realm->ino == ci->i_vino.ino)
+ ret = true;
+ spin_unlock(&ci->i_ceph_lock);
+ return ret;
}
static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
@@ -1008,14 +1016,19 @@ int __ceph_setxattr(struct inode *inode, const char *name,
char *newval = NULL;
struct ceph_inode_xattr *xattr = NULL;
int required_blob_size;
+ bool check_realm = false;
bool lock_snap_rwsem = false;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
vxattr = ceph_match_vxattr(inode, name);
- if (vxattr && vxattr->readonly)
- return -EOPNOTSUPP;
+ if (vxattr) {
+ if (vxattr->readonly)
+ return -EOPNOTSUPP;
+ if (value && !strncmp(vxattr->name, "ceph.quota", 10))
+ check_realm = true;
+ }
/* pass any unhandled ceph.* xattrs through to the MDS */
if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
@@ -1109,6 +1122,15 @@ do_sync_unlocked:
err = -EBUSY;
} else {
err = ceph_sync_setxattr(inode, name, value, size, flags);
+ if (err >= 0 && check_realm) {
+ /* check if snaprealm was created for quota inode */
+ spin_lock(&ci->i_ceph_lock);
+ if ((ci->i_max_files || ci->i_max_bytes) &&
+ !(ci->i_snap_realm &&
+ ci->i_snap_realm->ino == ci->i_vino.ino))
+ err = -EOPNOTSUPP;
+ spin_unlock(&ci->i_ceph_lock);
+ }
}
out:
ceph_free_cap_flush(prealloc_cf);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 6d3e40d7029c..1529a088383d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -455,6 +455,9 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
server->sign = true;
}
+ if (cifs_rdma_enabled(server) && server->sign)
+ cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
+
return 0;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e8830f076a7f..a5aa158d535a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2959,6 +2959,22 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
}
}
+ if (volume_info->seal) {
+ if (ses->server->vals->protocol_id == 0) {
+ cifs_dbg(VFS,
+ "SMB3 or later required for encryption\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
+ } else if (tcon->ses->server->capabilities &
+ SMB2_GLOBAL_CAP_ENCRYPTION)
+ tcon->seal = true;
+ else {
+ cifs_dbg(VFS, "Encryption is not supported on share\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
+ }
+ }
+
/*
* BB Do we need to wrap session_mutex around this TCon call and Unix
* SetFS as we do on SessSetup and reconnect?
@@ -3007,22 +3023,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon->use_resilient = true;
}
- if (volume_info->seal) {
- if (ses->server->vals->protocol_id == 0) {
- cifs_dbg(VFS,
- "SMB3 or later required for encryption\n");
- rc = -EOPNOTSUPP;
- goto out_fail;
- } else if (tcon->ses->server->capabilities &
- SMB2_GLOBAL_CAP_ENCRYPTION)
- tcon->seal = true;
- else {
- cifs_dbg(VFS, "Encryption is not supported on share\n");
- rc = -EOPNOTSUPP;
- goto out_fail;
- }
- }
-
/*
* We can have only one retry value for a connection to a share so for
* resources mounted more than once to the same server share the last
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 38ebf3f357d2..b76b85881dcc 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -252,9 +252,14 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
#ifdef CONFIG_CIFS_SMB_DIRECT
- if (server->rdma)
- wsize = min_t(unsigned int,
+ if (server->rdma) {
+ if (server->sign)
+ wsize = min_t(unsigned int,
+ wsize, server->smbd_conn->max_fragmented_send_size);
+ else
+ wsize = min_t(unsigned int,
wsize, server->smbd_conn->max_readwrite_size);
+ }
#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
@@ -272,9 +277,14 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
#ifdef CONFIG_CIFS_SMB_DIRECT
- if (server->rdma)
- rsize = min_t(unsigned int,
+ if (server->rdma) {
+ if (server->sign)
+ rsize = min_t(unsigned int,
+ rsize, server->smbd_conn->max_fragmented_recv_size);
+ else
+ rsize = min_t(unsigned int,
rsize, server->smbd_conn->max_readwrite_size);
+ }
#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 0f044c4a2dc9..60db51bae0e3 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -383,10 +383,10 @@ static void
build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
{
pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
- pneg_ctxt->DataLength = cpu_to_le16(6);
- pneg_ctxt->CipherCount = cpu_to_le16(2);
- pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
- pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
+ pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */
+ pneg_ctxt->CipherCount = cpu_to_le16(1);
+/* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */
+ pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM;
}
static void
@@ -444,6 +444,7 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
return -EINVAL;
}
server->cipher_type = ctxt->Ciphers[0];
+ server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
return 0;
}
@@ -2590,7 +2591,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
* If we want to do a RDMA write, fill in and append
* smbd_buffer_descriptor_v1 to the end of read request
*/
- if (server->rdma && rdata &&
+ if (server->rdma && rdata && !server->sign &&
rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
struct smbd_buffer_descriptor_v1 *v1;
@@ -2968,7 +2969,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
* If we want to do a server RDMA read, fill in and append
* smbd_buffer_descriptor_v1 to the end of write request
*/
- if (server->rdma && wdata->bytes >=
+ if (server->rdma && !server->sign && wdata->bytes >=
server->smbd_conn->rdma_readwrite_threshold) {
struct smbd_buffer_descriptor_v1 *v1;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6093e5142b2b..d28f358022c5 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -297,7 +297,7 @@ struct smb2_encryption_neg_context {
__le16 DataLength;
__le32 Reserved;
__le16 CipherCount; /* AES-128-GCM and AES-128-CCM */
- __le16 Ciphers[2]; /* Ciphers[0] since only one used now */
+ __le16 Ciphers[1]; /* Ciphers[0] since only one used now */
} __packed;
struct smb2_negotiate_rsp {
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 87817ddcc096..c62f7c95683c 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -2086,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
int start, i, j;
int max_iov_size =
info->max_send_size - sizeof(struct smbd_data_transfer);
- struct kvec iov[SMBDIRECT_MAX_SGE];
+ struct kvec *iov;
int rc;
info->smbd_send_pending++;
@@ -2096,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
}
/*
- * This usually means a configuration error
- * We use RDMA read/write for packet size > rdma_readwrite_threshold
- * as long as it's properly configured we should never get into this
- * situation
- */
- if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
- log_write(ERR, "maximum send segment %x exceeding %x\n",
- rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
- rc = -EINVAL;
- goto done;
- }
-
- /*
- * Remove the RFC1002 length defined in MS-SMB2 section 2.1
- * It is used only for TCP transport
+ * Skip the RFC1002 length defined in MS-SMB2 section 2.1
+ * It is used only for TCP transport in the iov[0]
* In future we may want to add a transport layer under protocol
* layer so this will only be issued to TCP transport
*/
- iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4;
- iov[0].iov_len = rqst->rq_iov[0].iov_len - 4;
- buflen += iov[0].iov_len;
+
+ if (rqst->rq_iov[0].iov_len != 4) {
+ log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
+ return -EINVAL;
+ }
+ iov = &rqst->rq_iov[1];
/* total up iov array first */
- for (i = 1; i < rqst->rq_nvec; i++) {
- iov[i].iov_base = rqst->rq_iov[i].iov_base;
- iov[i].iov_len = rqst->rq_iov[i].iov_len;
+ for (i = 0; i < rqst->rq_nvec-1; i++) {
buflen += iov[i].iov_len;
}
@@ -2198,14 +2186,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
goto done;
}
i++;
- if (i == rqst->rq_nvec)
+ if (i == rqst->rq_nvec-1)
break;
}
start = i;
buflen = 0;
} else {
i++;
- if (i == rqst->rq_nvec) {
+ if (i == rqst->rq_nvec-1) {
/* send out all remaining vecs */
remaining_data_length -= buflen;
log_write(INFO,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 8f6f25918229..927226a2122f 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -753,7 +753,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
goto out;
#ifdef CONFIG_CIFS_SMB311
- if (ses->status == CifsNew)
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
smb311_update_preauth_hash(ses, rqst->rq_iov+1,
rqst->rq_nvec-1);
#endif
@@ -798,7 +798,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
*resp_buf_type = CIFS_SMALL_BUFFER;
#ifdef CONFIG_CIFS_SMB311
- if (ses->status == CifsNew) {
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
struct kvec iov = {
.iov_base = buf + 4,
.iov_len = get_rfc1002_length(buf)
@@ -834,8 +834,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
GFP_KERNEL);
- if (!new_iov)
+ if (!new_iov) {
+ /* otherwise cifs_send_recv below sets resp_buf_type */
+ *resp_buf_type = CIFS_NO_BUFFER;
return -ENOMEM;
+ }
} else
new_iov = s_iov;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index a33d8fb1bf2a..508b905d744d 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -321,6 +321,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
+ ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
@@ -338,7 +339,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
/* check whether block bitmap block number is set */
blk = ext4_block_bitmap(sb, desc);
offset = blk - group_first_block;
- if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize ||
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
@@ -346,7 +347,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
/* check whether the inode bitmap block number is set */
blk = ext4_inode_bitmap(sb, desc);
offset = blk - group_first_block;
- if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize ||
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
@@ -354,8 +355,8 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
/* check whether the inode table block number is set */
blk = ext4_inode_table(sb, desc);
offset = blk - group_first_block;
- if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize ||
- EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= sb->s_blocksize)
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
return blk;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0a7315961bac..c969275ce3ee 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5329,8 +5329,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
stop = le32_to_cpu(extent->ee_block);
/*
- * In case of left shift, Don't start shifting extents until we make
- * sure the hole is big enough to accommodate the shift.
+ * For left shifts, make sure the hole on the left is big enough to
+ * accommodate the shift. For right shifts, make sure the last extent
+ * won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
path = ext4_find_extent(inode, start - 1, &path,
@@ -5350,9 +5351,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end)) {
- ext4_ext_drop_refs(path);
- kfree(path);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (shift > EXT_MAX_BLOCKS -
+ (stop + ext4_ext_get_actual_len(extent))) {
+ ret = -EINVAL;
+ goto out;
}
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 185f7e61f4cf..eb104e8476f0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -5886,5 +5886,6 @@ static void __exit ext4_exit_fs(void)
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
MODULE_DESCRIPTION("Fourth Extended Filesystem");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc32c");
module_init(ext4_init_fs)
module_exit(ext4_exit_fs)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index ac311037d7a5..8aa453784402 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -532,6 +532,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
*/
ret = start_this_handle(journal, handle, GFP_NOFS);
if (ret < 0) {
+ handle->h_journal = journal;
jbd2_journal_free_reserved(handle);
return ret;
}
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index ce4a34a2751d..35a124400d60 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -511,7 +511,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
if (args->flags & ATTR_CREATE)
return retval;
retval = xfs_attr_shortform_remove(args);
- ASSERT(retval == 0);
+ if (retval)
+ return retval;
+ /*
+ * Since we have removed the old attr, clear ATTR_REPLACE so
+ * that the leaf format add routine won't trip over the attr
+ * not being around.
+ */
+ args->flags &= ~ATTR_REPLACE;
}
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 6a7c2f03ea11..040eeda8426f 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -725,12 +725,16 @@ xfs_bmap_extents_to_btree(
*logflagsp = 0;
if ((error = xfs_alloc_vextent(&args))) {
xfs_iroot_realloc(ip, -1, whichfork);
+ ASSERT(ifp->if_broot == NULL);
+ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
return error;
}
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
xfs_iroot_realloc(ip, -1, whichfork);
+ ASSERT(ifp->if_broot == NULL);
+ XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
return -ENOSPC;
}
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index ef68b1de006a..1201107eabc6 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -466,6 +466,8 @@ xfs_dinode_verify(
return __this_address;
if (di_size > XFS_DFORK_DSIZE(dip, mp))
return __this_address;
+ if (dip->di_nextents)
+ return __this_address;
/* fall through */
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
@@ -484,12 +486,31 @@ xfs_dinode_verify(
if (XFS_DFORK_Q(dip)) {
switch (dip->di_aformat) {
case XFS_DINODE_FMT_LOCAL:
+ if (dip->di_anextents)
+ return __this_address;
+ /* fall through */
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
break;
default:
return __this_address;
}
+ } else {
+ /*
+ * If there is no fork offset, this may be a freshly-made inode
+ * in a new disk cluster, in which case di_aformat is zeroed.
+ * Otherwise, such an inode must be in EXTENTS format; this goes
+ * for freed inodes as well.
+ */
+ switch (dip->di_aformat) {
+ case 0:
+ case XFS_DINODE_FMT_EXTENTS:
+ break;
+ default:
+ return __this_address;
+ }
+ if (dip->di_anextents)
+ return __this_address;
}
/* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 299aee4b7b0b..e70fb8ccecea 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -778,22 +778,26 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_INSERT_RANGE) {
- unsigned int blksize_mask = i_blocksize(inode) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
+ loff_t isize = i_size_read(inode);
- new_size = i_size_read(inode) + len;
if (offset & blksize_mask || len & blksize_mask) {
error = -EINVAL;
goto out_unlock;
}
- /* check the new inode size does not wrap through zero */
- if (new_size > inode->i_sb->s_maxbytes) {
+ /*
+ * New inode size must not exceed ->s_maxbytes, accounting for
+ * possible signed overflow.
+ */
+ if (inode->i_sb->s_maxbytes - isize < len) {
error = -EFBIG;
goto out_unlock;
}
+ new_size = isize + len;
/* Offset should be less than i_size */
- if (offset >= i_size_read(inode)) {
+ if (offset >= isize) {
error = -EINVAL;
goto out_unlock;
}
@@ -876,8 +880,18 @@ xfs_file_dedupe_range(
struct file *dst_file,
u64 dst_loff)
{
+ struct inode *srci = file_inode(src_file);
+ u64 max_dedupe;
int error;
+ /*
+ * Since we have to read all these pages in to compare them, cut
+ * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
+ * That means we won't do more than MAX_RW_COUNT IO per request.
+ */
+ max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
+ if (len > max_dedupe)
+ len = max_dedupe;
error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
len, true);
if (error)