summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/vmx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-29 13:38:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-29 13:38:39 -0700
commitb4df50de6ab66e41b3b8d8acf3ce45c632084163 (patch)
tree5e2c010a8642ca28c675d5a8b4cedf3f1a65d053 /drivers/crypto/vmx
parent3f16503b7d2274ac8cbab11163047ac0b4c66cfe (diff)
parent3d7c82060d1fe65bde4023aac41a0b1bd7718e07 (diff)
downloadlinux-b4df50de6ab66e41b3b8d8acf3ce45c632084163.tar.gz
linux-b4df50de6ab66e41b3b8d8acf3ce45c632084163.tar.bz2
linux-b4df50de6ab66e41b3b8d8acf3ce45c632084163.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: - Check for the right CPU feature bit in sm4-ce on arm64. - Fix scatterwalk WARN_ON in aes-gcm-ce on arm64. - Fix unaligned fault in aesni on x86. - Fix potential NULL pointer dereference on exit in chtls. - Fix DMA mapping direction for RSA in caam. - Fix error path return value for xts setkey in caam. - Fix address endianness when DMA unmapping in caam. - Fix sleep-in-atomic in vmx. - Fix command corruption when queue is full in cavium/nitrox. * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: cavium/nitrox - fix for command corruption in queue full case with backlog submissions. crypto: vmx - Fix sleep-in-atomic bugs crypto: arm64/aes-gcm-ce - fix scatterwalk API violation crypto: aesni - Use unaligned loads from gcm_context_data crypto: chtls - fix null dereference chtls_free_uld() crypto: arm64/sm4-ce - check for the right CPU feature bit crypto: caam - fix DMA mapping direction for RSA forms 2 & 3 crypto: caam/qi - fix error path in xts setkey crypto: caam/jr - fix descriptor DMA unmapping
Diffstat (limited to 'drivers/crypto/vmx')
-rw-r--r--drivers/crypto/vmx/aes_cbc.c30
-rw-r--r--drivers/crypto/vmx/aes_xts.c21
2 files changed, 28 insertions, 23 deletions
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 5285ece4f33a..b71895871be3 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 8bd9aff0f55f..e9954a7d4694 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ ret = blkcipher_walk_virt(desc, &walk);
+
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
- blkcipher_walk_init(&walk, dst, src, nbytes);
-
- ret = blkcipher_walk_virt(desc, &walk);
iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
if (enc)
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
else
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
}
return ret;
}