summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/tegra
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/tegra')
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c400
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c287
-rw-r--r--drivers/crypto/tegra/tegra-se-key.c29
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c16
-rw-r--r--drivers/crypto/tegra/tegra-se.h39
5 files changed, 522 insertions, 249 deletions
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index d734c9a56786..0e07d0523291 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -28,6 +28,9 @@ struct tegra_aes_ctx {
u32 ivsize;
u32 key1_id;
u32 key2_id;
+ u32 keylen;
+ u8 key1[AES_MAX_KEY_SIZE];
+ u8 key2[AES_MAX_KEY_SIZE];
};
struct tegra_aes_reqctx {
@@ -43,8 +46,9 @@ struct tegra_aead_ctx {
struct tegra_se *se;
unsigned int authsize;
u32 alg;
- u32 keylen;
u32 key_id;
+ u32 keylen;
+ u8 key[AES_MAX_KEY_SIZE];
};
struct tegra_aead_reqctx {
@@ -56,8 +60,8 @@ struct tegra_aead_reqctx {
unsigned int cryptlen;
unsigned int authsize;
bool encrypt;
- u32 config;
u32 crypto_config;
+ u32 config;
u32 key_id;
u32 iv[4];
u8 authdata[16];
@@ -67,6 +71,8 @@ struct tegra_cmac_ctx {
struct tegra_se *se;
unsigned int alg;
u32 key_id;
+ u32 keylen;
+ u8 key[AES_MAX_KEY_SIZE];
struct crypto_shash *fallback_tfm;
};
@@ -260,17 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
struct tegra_se *se = ctx->se;
- unsigned int cmdlen;
+ unsigned int cmdlen, key1_id, key2_id;
int ret;
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- return -ENOMEM;
-
- rctx->datbuf.size = SE_AES_BUFLEN;
- rctx->iv = (u32 *)req->iv;
+ rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
rctx->len = req->cryptlen;
+ key1_id = ctx->key1_id;
+ key2_id = ctx->key2_id;
/* Pad input to AES Block size */
if (ctx->alg != SE_ALG_XTS) {
@@ -278,20 +280,59 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
}
+ rctx->datbuf.size = rctx->len;
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
+
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
+ rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
+ rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
+
+ if (!key1_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
+ ctx->keylen, ctx->alg, &key1_id);
+ if (ret)
+ goto out;
+ }
+
+ rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
+
+ if (ctx->alg == SE_ALG_XTS) {
+ if (!key2_id) {
+ ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
+ ctx->keylen, ctx->alg, &key2_id);
+ if (ret)
+ goto out;
+ }
+
+ rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
+ }
+
/* Prepare the command and submit for execution */
cmdlen = tegra_aes_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
/* Copy the result */
tegra_aes_update_iv(req, ctx);
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
+out:
/* Free the buffer */
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
+ if (tegra_key_is_reserved(key1_id))
+ tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
+
+ if (tegra_key_is_reserved(key2_id))
+ tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
+
+out_finalize:
crypto_finalize_skcipher_request(se->engine, req, ret);
return 0;
@@ -313,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
ctx->se = se_alg->se_dev;
ctx->key1_id = 0;
ctx->key2_id = 0;
+ ctx->keylen = 0;
algname = crypto_tfm_alg_name(&tfm->base);
ret = se_algname_to_algid(algname);
@@ -341,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key1, key, keylen);
+ }
+
+ return 0;
}
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
@@ -365,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
ret = tegra_key_submit(ctx->se, key, len,
ctx->alg, &ctx->key1_id);
- if (ret)
- return ret;
+ if (ret) {
+ ctx->keylen = len;
+ memcpy(ctx->key1, key, len);
+ }
- return tegra_key_submit(ctx->se, key + len, len,
+ ret = tegra_key_submit(ctx->se, key + len, len,
ctx->alg, &ctx->key2_id);
+ if (ret) {
+ ctx->keylen = len;
+ memcpy(ctx->key2, key + len, len);
+ }
return 0;
}
@@ -444,12 +499,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
return 0;
rctx->encrypt = encrypt;
- rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
- rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
- rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
-
- if (ctx->key2_id)
- rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
}
@@ -715,11 +764,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
@@ -732,11 +781,11 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -755,11 +804,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -886,12 +935,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Prepare command and submit */
cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
- return tegra_se_host1x_submit(se, cmdlen);
+ return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
}
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1073,7 +1122,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
- SE_AES_KEY_INDEX(ctx->key_id);
+ SE_AES_KEY_INDEX(rctx->key_id);
/* Copy authdata in the top of buffer for encryption/decryption */
if (rctx->encrypt)
@@ -1098,7 +1147,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
/* Prepare command and submit */
cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
return ret;
@@ -1117,6 +1166,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
rctx->assoclen = req->assoclen;
rctx->authsize = crypto_aead_authsize(tfm);
+ if (rctx->encrypt)
+ rctx->cryptlen = req->cryptlen;
+ else
+ rctx->cryptlen = req->cryptlen - rctx->authsize;
+
memcpy(iv, req->iv, 16);
ret = tegra_ccm_check_iv(iv);
@@ -1145,30 +1199,35 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret;
+ ret = tegra_ccm_crypt_init(req, se, rctx);
+ if (ret)
+ goto out_finalize;
+
+ rctx->key_id = ctx->key_id;
+
/* Allocate buffers required */
- rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
&rctx->inbuf.addr, GFP_KERNEL);
if (!rctx->inbuf.buf)
- return -ENOMEM;
-
- rctx->inbuf.size = SE_AES_BUFLEN;
+ goto out_finalize;
- rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
&rctx->outbuf.addr, GFP_KERNEL);
if (!rctx->outbuf.buf) {
ret = -ENOMEM;
- goto outbuf_err;
+ goto out_free_inbuf;
}
- rctx->outbuf.size = SE_AES_BUFLEN;
-
- ret = tegra_ccm_crypt_init(req, se, rctx);
- if (ret)
- goto out;
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
if (rctx->encrypt) {
- rctx->cryptlen = req->cryptlen;
-
/* CBC MAC Operation */
ret = tegra_ccm_compute_auth(ctx, rctx);
if (ret)
@@ -1179,8 +1238,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
if (ret)
goto out;
} else {
- rctx->cryptlen = req->cryptlen - ctx->authsize;
-
/* CTR operation */
ret = tegra_ccm_do_ctr(ctx, rctx);
if (ret)
@@ -1193,13 +1250,17 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
}
out:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+out_free_inbuf:
+ dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
@@ -1213,23 +1274,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
int ret;
- /* Allocate buffers required */
- rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
- &rctx->inbuf.addr, GFP_KERNEL);
- if (!rctx->inbuf.buf)
- return -ENOMEM;
-
- rctx->inbuf.size = SE_AES_BUFLEN;
-
- rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
- &rctx->outbuf.addr, GFP_KERNEL);
- if (!rctx->outbuf.buf) {
- ret = -ENOMEM;
- goto outbuf_err;
- }
-
- rctx->outbuf.size = SE_AES_BUFLEN;
-
rctx->src_sg = req->src;
rctx->dst_sg = req->dst;
rctx->assoclen = req->assoclen;
@@ -1243,6 +1287,32 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
rctx->iv[3] = (1 << 24);
+ rctx->key_id = ctx->key_id;
+
+ /* Allocate buffers required */
+ rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
+ rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
+ &rctx->inbuf.addr, GFP_KERNEL);
+ if (!rctx->inbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
+
+ rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
+ rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
+ &rctx->outbuf.addr, GFP_KERNEL);
+ if (!rctx->outbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free_inbuf;
+ }
+
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
+
/* If there is associated data perform GMAC operation */
if (rctx->assoclen) {
ret = tegra_gcm_do_gmac(ctx, rctx);
@@ -1266,14 +1336,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
ret = tegra_gcm_do_verify(ctx->se, rctx);
out:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+ dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
- dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
+out_free_inbuf:
+ dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
- /* Finalize the request if there are no errors */
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
@@ -1295,6 +1368,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm)
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
+ ctx->keylen = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
@@ -1376,13 +1450,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm,
const u8 *key, u32 keylen)
{
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
return -EINVAL;
}
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+ }
+
+ return 0;
}
static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
@@ -1456,6 +1537,35 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct
se->base + se->hw->regs->result + (i * 4));
}
+static int tegra_cmac_do_init(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int i;
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ return -ENOMEM;
+
+ rctx->residue.size = 0;
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+}
+
static int tegra_cmac_do_update(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
@@ -1483,7 +1593,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
rctx->total_len += rctx->datbuf.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
- rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
+ rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
/*
* Keep one block and residue bytes in residue and
@@ -1497,6 +1607,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
return 0;
}
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -1511,23 +1626,19 @@ static int tegra_cmac_do_update(struct ahash_request *req)
rctx->residue.size = nresidue;
/*
- * If this is not the first 'update' call, paste the previous copied
+ * If this is not the first task, paste the previous copied
* intermediate results to the registers so that it gets picked up.
- * This is to support the import/export functionality.
*/
if (!(rctx->task & SHA_FIRST))
tegra_cmac_paste_result(ctx->se, rctx);
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
- ret = tegra_se_host1x_submit(se, cmdlen);
- /*
- * If this is not the final update, copy the intermediate results
- * from the registers so that it can be used in the next 'update'
- * call. This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FINAL))
- tegra_cmac_copy_result(ctx->se, rctx);
+ tegra_cmac_copy_result(ctx->se, rctx);
+
+ dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
@@ -1543,17 +1654,34 @@ static int tegra_cmac_do_final(struct ahash_request *req)
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
return crypto_shash_tfm_digest(ctx->fallback_tfm,
- rctx->datbuf.buf, 0, req->result);
+ NULL, 0, req->result);
+ }
+
+ if (rctx->residue.size) {
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
}
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
+ /*
+ * If this is not the first task, paste the previous copied
+ * intermediate results to the registers so that it gets picked up.
+ */
+ if (!(rctx->task & SHA_FIRST))
+ tegra_cmac_paste_result(ctx->se, rctx);
+
/* Prepare command and submit */
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
- ret = tegra_se_host1x_submit(se, cmdlen);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
if (ret)
goto out;
@@ -1565,8 +1693,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
writel(0, se->base + se->hw->regs->result + (i * 4));
out:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN,
- rctx->datbuf.buf, rctx->datbuf.addr);
+ if (rctx->residue.size)
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
rctx->residue.buf, rctx->residue.addr);
return ret;
@@ -1579,17 +1709,41 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_se *se = ctx->se;
- int ret;
+ int ret = 0;
+
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_cmac_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
+ if (!ctx->key_id) {
+ ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
+ ctx->keylen, ctx->alg, &rctx->key_id);
+ if (ret)
+ goto out;
+ }
if (rctx->task & SHA_UPDATE) {
ret = tegra_cmac_do_update(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_cmac_do_final(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_FINAL;
}
+out:
+ if (tegra_key_is_reserved(rctx->key_id))
+ tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
crypto_finalize_hash_request(se->engine, req, ret);
@@ -1631,6 +1785,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
ctx->se = se_alg->se_dev;
ctx->key_id = 0;
+ ctx->keylen = 0;
ret = se_algname_to_algid(algname);
if (ret < 0) {
@@ -1655,51 +1810,11 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_cmac_init(struct ahash_request *req)
-{
- struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
- int i;
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->task = SHA_FIRST;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- rctx->residue.size = 0;
-
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- goto datbuf_fail;
-
- rctx->datbuf.size = 0;
-
- /* Clear any previous result */
- for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
- writel(0, se->base + se->hw->regs->result + (i * 4));
-
- return 0;
-
-datbuf_fail:
- dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
- rctx->residue.addr);
-resbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen)) {
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
@@ -1709,7 +1824,24 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ctx->fallback_tfm)
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret) {
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+ }
+
+ return 0;
+}
+
+static int tegra_cmac_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_cmac_update(struct ahash_request *req)
@@ -1750,13 +1882,9 @@ static int tegra_cmac_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- int ret;
- ret = tegra_cmac_init(req);
- if (ret)
- return ret;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 0b5cdd5676b1..42d007b7af45 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -34,6 +34,7 @@ struct tegra_sha_reqctx {
struct tegra_se_datbuf datbuf;
struct tegra_se_datbuf residue;
struct tegra_se_datbuf digest;
+ struct tegra_se_datbuf intr_res;
unsigned int alg;
unsigned int config;
unsigned int total_len;
@@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
return crypto_ahash_export(&rctx->fallback_req, out);
}
-static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
+static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
+ struct tegra_sha_reqctx *rctx)
+{
+ __be32 *res_be = (__be32 *)rctx->intr_res.buf;
+ u32 *res = (u32 *)rctx->intr_res.buf;
+ int i = 0, j;
+
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT);
+
+ for (j = 0; j < HASH_RESULT_REG_COUNT; j++) {
+ int idx = j;
+
+ /*
+ * The initial, intermediate and final hash value of SHA-384, SHA-512
+ * in SHA_HASH_RESULT registers follow the below layout of bytes.
+ *
+ * +---------------+------------+
+ * | HASH_RESULT_0 | B4...B7 |
+ * +---------------+------------+
+ * | HASH_RESULT_1 | B0...B3 |
+ * +---------------+------------+
+ * | HASH_RESULT_2 | B12...B15 |
+ * +---------------+------------+
+ * | HASH_RESULT_3 | B8...B11 |
+ * +---------------+------------+
+ * | ...... |
+ * +---------------+------------+
+ * | HASH_RESULT_14| B60...B63 |
+ * +---------------+------------+
+ * | HASH_RESULT_15| B56...B59 |
+ * +---------------+------------+
+ *
+ */
+ if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512)
+ idx = (j % 2) ? j - 1 : j + 1;
+
+ /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial
+ * intermediate and final hash value when stored in
+ * SHA_HASH_RESULT registers, the byte order is NOT in
+ * little-endian.
+ */
+ if (ctx->alg <= SE_ALG_SHA512)
+ cpuvaddr[i++] = be32_to_cpu(res_be[idx]);
+ else
+ cpuvaddr[i++] = res[idx];
+ }
+
+ return i;
+}
+
+static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
struct tegra_sha_reqctx *rctx)
{
+ struct tegra_se *se = ctx->se;
u64 msg_len, msg_left;
int i = 0;
@@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = upper_32_bits(msg_left);
cpuvaddr[i++] = 0;
cpuvaddr[i++] = 0;
- cpuvaddr[i++] = host1x_opcode_setpayload(6);
+ cpuvaddr[i++] = host1x_opcode_setpayload(2);
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
cpuvaddr[i++] = rctx->config;
@@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
rctx->task &= ~SHA_FIRST;
} else {
- cpuvaddr[i++] = 0;
+ /*
+ * If it isn't the first task, program the HASH_RESULT register
+ * with the intermediate result from the previous task
+ */
+ i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx);
}
+ cpuvaddr[i++] = host1x_opcode_setpayload(4);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR);
cpuvaddr[i++] = rctx->datbuf.addr;
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
SE_ADDR_HI_SZ(rctx->datbuf.size));
- cpuvaddr[i++] = rctx->digest.addr;
- cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
- SE_ADDR_HI_SZ(rctx->digest.size));
+
+ if (rctx->task & SHA_UPDATE) {
+ cpuvaddr[i++] = rctx->intr_res.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) |
+ SE_ADDR_HI_SZ(rctx->intr_res.size));
+ } else {
+ cpuvaddr[i++] = rctx->digest.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
+ SE_ADDR_HI_SZ(rctx->digest.size));
+ }
+
if (rctx->key_id) {
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
@@ -266,42 +334,72 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
- cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
- SE_SHA_OP_START |
+ cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START |
SE_SHA_OP_LASTBUF;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
- dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
- msg_len, msg_left, rctx->config);
+ dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x",
+ msg_len, msg_left, rctx->datbuf.size, rctx->config);
return i;
}
-static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+static int tegra_sha_do_init(struct ahash_request *req)
{
- int i;
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
-}
+ if (ctx->fallback)
+ return tegra_sha_fallback_init(req);
-static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
-{
- int i;
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->alg = ctx->alg;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+ rctx->digest.size = crypto_ahash_digestsize(tfm);
+
+ rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
+ &rctx->digest.addr, GFP_KERNEL);
+ if (!rctx->digest.buf)
+ goto digbuf_fail;
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4;
+ rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size,
+ &rctx->intr_res.addr, GFP_KERNEL);
+ if (!rctx->intr_res.buf)
+ goto intr_res_fail;
+
+ return 0;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- writel(rctx->result[i],
- se->base + se->hw->regs->result + (i * 4));
+intr_res_fail:
+ dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+ rctx->digest.addr);
+digbuf_fail:
+ return -ENOMEM;
}
static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct tegra_se *se = ctx->se;
unsigned int nblks, nresidue, size, ret;
- u32 *cpuvaddr = ctx->se->cmdbuf->addr;
+ u32 *cpuvaddr = se->cmdbuf->addr;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
@@ -317,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req)
rctx->src_sg = req->src;
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
- rctx->total_len += rctx->datbuf.size;
/*
* If nbytes are less than a block size, copy it residue and
@@ -326,11 +423,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
if (nblks < 1) {
scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes, 0);
-
rctx->residue.size += req->nbytes;
+
return 0;
}
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -343,29 +445,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
/* Update residue value with the residue after current block */
rctx->residue.size = nresidue;
+ rctx->total_len += rctx->datbuf.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
- SE_SHA_DST_HASH_REG;
-
- /*
- * If this is not the first 'update' call, paste the previous copied
- * intermediate results to the registers so that it gets picked up.
- * This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FIRST))
- tegra_sha_paste_hash_result(ctx->se, rctx);
+ SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
- ret = tegra_se_host1x_submit(ctx->se, size);
-
- /*
- * If this is not the final update, copy the intermediate results
- * from the registers so that it can be used in the next 'update'
- * call. This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FINAL))
- tegra_sha_copy_hash_result(ctx->se, rctx);
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
@@ -379,16 +468,25 @@ static int tegra_sha_do_final(struct ahash_request *req)
u32 *cpuvaddr = se->cmdbuf->addr;
int size, ret = 0;
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ if (rctx->residue.size) {
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ }
+
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
-
- ret = tegra_se_host1x_submit(se, size);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
goto out;
@@ -396,12 +494,18 @@ static int tegra_sha_do_final(struct ahash_request *req)
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
out:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN,
- rctx->datbuf.buf, rctx->datbuf.addr);
+ if (rctx->residue.size)
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
rctx->residue.buf, rctx->residue.addr);
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
+
+ dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf,
+ rctx->intr_res.addr);
+
return ret;
}
@@ -414,16 +518,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret = 0;
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_sha_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
if (rctx->task & SHA_UPDATE) {
ret = tegra_sha_do_update(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_sha_do_final(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_FINAL;
}
+out:
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
@@ -497,52 +616,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_sha_init(struct ahash_request *req)
-{
- struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
-
- if (ctx->fallback)
- return tegra_sha_fallback_init(req);
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->key_id = ctx->key_id;
- rctx->task = SHA_FIRST;
- rctx->alg = ctx->alg;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
- rctx->digest.size = crypto_ahash_digestsize(tfm);
-
- rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
- &rctx->digest.addr, GFP_KERNEL);
- if (!rctx->digest.buf)
- goto digbuf_fail;
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- goto datbuf_fail;
-
- return 0;
-
-datbuf_fail:
- dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
- rctx->residue.addr);
-resbuf_fail:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
- rctx->datbuf.addr);
-digbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
unsigned int keylen)
{
@@ -559,13 +632,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen))
return tegra_hmac_fallback_setkey(ctx, key, keylen);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret)
+ return tegra_hmac_fallback_setkey(ctx, key, keylen);
+
ctx->fallback = false;
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ return 0;
+}
+
+static int tegra_sha_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_update(struct ahash_request *req)
@@ -615,16 +704,12 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- ret = tegra_sha_init(req);
- if (ret)
- return ret;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c
index ac14678dbd30..956fa9b4e9b1 100644
--- a/drivers/crypto/tegra/tegra-se-key.c
+++ b/drivers/crypto/tegra/tegra-se-key.c
@@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg)
{
const u32 *keyval = (u32 *)key;
- u32 *addr = se->cmdbuf->addr, size;
+ u32 *addr = se->keybuf->addr, size;
+ int ret;
+
+ mutex_lock(&kslt_lock);
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
+ ret = tegra_se_host1x_submit(se, se->keybuf, size);
+
+ mutex_unlock(&kslt_lock);
- return tegra_se_host1x_submit(se, size);
+ return ret;
}
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
@@ -135,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
tegra_keyslot_free(keyid);
}
+void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg)
+{
+ u8 zkey[AES_MAX_KEY_SIZE] = {0};
+
+ if (!keyid)
+ return;
+
+ /* Overwrite the key with 0s */
+ tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
+}
+
+inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ return tegra_key_insert(se, key, keylen, *keyid, alg);
+}
+
int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
{
int ret;
@@ -143,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
if (!tegra_key_in_kslt(*keyid)) {
*keyid = tegra_keyslot_alloc();
if (!(*keyid)) {
- dev_err(se->dev, "failed to allocate key slot\n");
+ dev_dbg(se->dev, "failed to allocate key slot\n");
return -ENOMEM;
}
}
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index 918c0b10614d..1c94f1de0546 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
return cmdbuf;
}
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
{
struct host1x_job *job;
int ret;
@@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID;
- se->cmdbuf->words = size;
+ cmdbuf->words = size;
- host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
+ host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev);
if (ret) {
@@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
goto syncpt_put;
}
+ se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
+ if (!se->keybuf) {
+ ret = -ENOMEM;
+ goto cmdbuf_put;
+ }
+
ret = se->hw->init_alg(se);
if (ret) {
dev_err(se->dev, "failed to register algorithms\n");
- goto cmdbuf_put;
+ goto keybuf_put;
}
return 0;
+keybuf_put:
+ tegra_se_cmdbuf_put(&se->keybuf->bo);
cmdbuf_put:
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
syncpt_put:
diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h
index b9dd7ceb8783..b6cac9384f66 100644
--- a/drivers/crypto/tegra/tegra-se.h
+++ b/drivers/crypto/tegra/tegra-se.h
@@ -24,6 +24,7 @@
#define SE_STREAM_ID 0x90
#define SE_SHA_CFG 0x4004
+#define SE_SHA_IN_ADDR 0x400c
#define SE_SHA_KEY_ADDR 0x4094
#define SE_SHA_KEY_DATA 0x4098
#define SE_SHA_KEYMANIFEST 0x409c
@@ -340,12 +341,14 @@
#define SE_CRYPTO_CTR_REG_COUNT 4
#define SE_MAX_KEYSLOT 15
#define SE_MAX_MEM_ALLOC SZ_4M
-#define SE_AES_BUFLEN 0x8000
-#define SE_SHA_BUFLEN 0x2000
+
+#define TEGRA_AES_RESERVED_KSLT 14
+#define TEGRA_XTS_RESERVED_KSLT 15
#define SHA_FIRST BIT(0)
-#define SHA_UPDATE BIT(1)
-#define SHA_FINAL BIT(2)
+#define SHA_INIT BIT(1)
+#define SHA_UPDATE BIT(2)
+#define SHA_FINAL BIT(3)
/* Security Engine operation modes */
enum se_aes_alg {
@@ -420,6 +423,7 @@ struct tegra_se {
struct host1x_client client;
struct host1x_channel *channel;
struct tegra_se_cmdbuf *cmdbuf;
+ struct tegra_se_cmdbuf *keybuf;
struct crypto_engine *engine;
struct host1x_syncpt *syncpt;
struct device *dev;
@@ -501,8 +505,33 @@ void tegra_deinit_aes(struct tegra_se *se);
void tegra_deinit_hash(struct tegra_se *se);
int tegra_key_submit(struct tegra_se *se, const u8 *key,
u32 keylen, u32 alg, u32 *keyid);
+
+int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid);
+
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
-int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
+void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg);
+int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
+
+static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ *keyid = TEGRA_AES_RESERVED_KSLT;
+ return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
+}
+
+static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key,
+ u32 keylen, u32 alg, u32 *keyid)
+{
+ *keyid = TEGRA_XTS_RESERVED_KSLT;
+ return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
+}
+
+static inline bool tegra_key_is_reserved(u32 keyid)
+{
+ return ((keyid == TEGRA_AES_RESERVED_KSLT) ||
+ (keyid == TEGRA_XTS_RESERVED_KSLT));
+}
/* HOST1x OPCODES */
static inline u32 host1x_opcode_setpayload(unsigned int payload)