summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/nx/nx-aes-ccm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/nx/nx-aes-ccm.c')
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c45
1 files changed, 14 insertions, 31 deletions
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5be8f01c5da8..4c9362eebefd 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -327,7 +327,7 @@ static int generate_pat(u8 *iv,
}
static int ccm_nx_decrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -348,7 +348,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
req->src, nbytes + req->assoclen, authsize,
SCATTERWALK_FROM_SG);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -367,7 +367,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -381,7 +381,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -405,7 +405,7 @@ out:
}
static int ccm_nx_encrypt(struct aead_request *req,
- struct blkcipher_desc *desc,
+ u8 *iv,
unsigned int assoclen)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
@@ -418,7 +418,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
+ rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
@@ -436,7 +436,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
&to_process, processed + req->assoclen,
csbcpb->cpb.aes_ccm.iv_or_ctr);
if (rc)
@@ -450,7 +450,7 @@ static int ccm_nx_encrypt(struct aead_request *req,
/* for partial completion, copy following for next
* entry into loop...
*/
- memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
memcpy(csbcpb->cpb.aes_ccm.in_s0,
@@ -481,67 +481,50 @@ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_encrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_encrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_encrypt(req, &desc, req->assoclen);
+ return ccm_nx_encrypt(req, req->iv, req->assoclen);
}
static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
- struct blkcipher_desc desc;
u8 *iv = rctx->iv;
iv[0] = 3;
memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
memcpy(iv + 4, req->iv, 8);
- desc.info = iv;
-
- return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
+ return ccm_nx_decrypt(req, iv, req->assoclen - 8);
}
static int ccm_aes_nx_decrypt(struct aead_request *req)
{
- struct blkcipher_desc desc;
int rc;
- desc.info = req->iv;
-
- rc = crypto_ccm_check_iv(desc.info);
+ rc = crypto_ccm_check_iv(req->iv);
if (rc)
return rc;
- return ccm_nx_decrypt(req, &desc, req->assoclen);
+ return ccm_nx_decrypt(req, req->iv, req->assoclen);
}
-/* tell the block cipher walk routines that this is a stream cipher by
- * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
- * during encrypt/decrypt doesn't solve this problem, because it calls
- * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
- * but instead uses this tfm->blocksize. */
struct aead_alg nx_ccm_aes_alg = {
.base = {
.cra_name = "ccm(aes)",