summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/tegra/tegra-se-hash.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/tegra/tegra-se-hash.c')
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c287
1 files changed, 186 insertions, 101 deletions
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 0b5cdd5676b1..42d007b7af45 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -34,6 +34,7 @@ struct tegra_sha_reqctx {
struct tegra_se_datbuf datbuf;
struct tegra_se_datbuf residue;
struct tegra_se_datbuf digest;
+ struct tegra_se_datbuf intr_res;
unsigned int alg;
unsigned int config;
unsigned int total_len;
@@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
return crypto_ahash_export(&rctx->fallback_req, out);
}
-static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
+static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
+ struct tegra_sha_reqctx *rctx)
+{
+ __be32 *res_be = (__be32 *)rctx->intr_res.buf;
+ u32 *res = (u32 *)rctx->intr_res.buf;
+ int i = 0, j;
+
+ cpuvaddr[i++] = 0;
+ cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT);
+
+ for (j = 0; j < HASH_RESULT_REG_COUNT; j++) {
+ int idx = j;
+
+ /*
+ * The initial, intermediate and final hash value of SHA-384, SHA-512
+ * in SHA_HASH_RESULT registers follow the below layout of bytes.
+ *
+ * +---------------+------------+
+ * | HASH_RESULT_0 | B4...B7 |
+ * +---------------+------------+
+ * | HASH_RESULT_1 | B0...B3 |
+ * +---------------+------------+
+ * | HASH_RESULT_2 | B12...B15 |
+ * +---------------+------------+
+ * | HASH_RESULT_3 | B8...B11 |
+ * +---------------+------------+
+ * | ...... |
+ * +---------------+------------+
+ * | HASH_RESULT_14| B60...B63 |
+ * +---------------+------------+
+ * | HASH_RESULT_15| B56...B59 |
+ * +---------------+------------+
+ *
+ */
+ if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512)
+ idx = (j % 2) ? j - 1 : j + 1;
+
+ /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial
+ * intermediate and final hash value when stored in
+ * SHA_HASH_RESULT registers, the byte order is NOT in
+ * little-endian.
+ */
+ if (ctx->alg <= SE_ALG_SHA512)
+ cpuvaddr[i++] = be32_to_cpu(res_be[idx]);
+ else
+ cpuvaddr[i++] = res[idx];
+ }
+
+ return i;
+}
+
+static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
struct tegra_sha_reqctx *rctx)
{
+ struct tegra_se *se = ctx->se;
u64 msg_len, msg_left;
int i = 0;
@@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = upper_32_bits(msg_left);
cpuvaddr[i++] = 0;
cpuvaddr[i++] = 0;
- cpuvaddr[i++] = host1x_opcode_setpayload(6);
+ cpuvaddr[i++] = host1x_opcode_setpayload(2);
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
cpuvaddr[i++] = rctx->config;
@@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
rctx->task &= ~SHA_FIRST;
} else {
- cpuvaddr[i++] = 0;
+ /*
+ * If it isn't the first task, program the HASH_RESULT register
+ * with the intermediate result from the previous task
+ */
+ i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx);
}
+ cpuvaddr[i++] = host1x_opcode_setpayload(4);
+ cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR);
cpuvaddr[i++] = rctx->datbuf.addr;
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
SE_ADDR_HI_SZ(rctx->datbuf.size));
- cpuvaddr[i++] = rctx->digest.addr;
- cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
- SE_ADDR_HI_SZ(rctx->digest.size));
+
+ if (rctx->task & SHA_UPDATE) {
+ cpuvaddr[i++] = rctx->intr_res.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) |
+ SE_ADDR_HI_SZ(rctx->intr_res.size));
+ } else {
+ cpuvaddr[i++] = rctx->digest.addr;
+ cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
+ SE_ADDR_HI_SZ(rctx->digest.size));
+ }
+
if (rctx->key_id) {
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
@@ -266,42 +334,72 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
- cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
- SE_SHA_OP_START |
+ cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START |
SE_SHA_OP_LASTBUF;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
- dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
- msg_len, msg_left, rctx->config);
+ dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x",
+ msg_len, msg_left, rctx->datbuf.size, rctx->config);
return i;
}
-static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
+static int tegra_sha_do_init(struct ahash_request *req)
{
- int i;
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
-}
+ if (ctx->fallback)
+ return tegra_sha_fallback_init(req);
-static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
-{
- int i;
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->alg = ctx->alg;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+ rctx->digest.size = crypto_ahash_digestsize(tfm);
+
+ rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
+ &rctx->digest.addr, GFP_KERNEL);
+ if (!rctx->digest.buf)
+ goto digbuf_fail;
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4;
+ rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size,
+ &rctx->intr_res.addr, GFP_KERNEL);
+ if (!rctx->intr_res.buf)
+ goto intr_res_fail;
+
+ return 0;
- for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
- writel(rctx->result[i],
- se->base + se->hw->regs->result + (i * 4));
+intr_res_fail:
+ dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf,
+ rctx->residue.addr);
+resbuf_fail:
+ dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+ rctx->digest.addr);
+digbuf_fail:
+ return -ENOMEM;
}
static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct tegra_se *se = ctx->se;
unsigned int nblks, nresidue, size, ret;
- u32 *cpuvaddr = ctx->se->cmdbuf->addr;
+ u32 *cpuvaddr = se->cmdbuf->addr;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
@@ -317,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req)
rctx->src_sg = req->src;
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
- rctx->total_len += rctx->datbuf.size;
/*
* If nbytes are less than a block size, copy it residue and
@@ -326,11 +423,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
if (nblks < 1) {
scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
rctx->src_sg, 0, req->nbytes, 0);
-
rctx->residue.size += req->nbytes;
+
return 0;
}
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf)
+ return -ENOMEM;
+
/* Copy the previous residue first */
if (rctx->residue.size)
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
@@ -343,29 +445,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
/* Update residue value with the residue after current block */
rctx->residue.size = nresidue;
+ rctx->total_len += rctx->datbuf.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
- SE_SHA_DST_HASH_REG;
-
- /*
- * If this is not the first 'update' call, paste the previous copied
- * intermediate results to the registers so that it gets picked up.
- * This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FIRST))
- tegra_sha_paste_hash_result(ctx->se, rctx);
+ SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
- ret = tegra_se_host1x_submit(ctx->se, size);
-
- /*
- * If this is not the final update, copy the intermediate results
- * from the registers so that it can be used in the next 'update'
- * call. This is to support the import/export functionality.
- */
- if (!(rctx->task & SHA_FINAL))
- tegra_sha_copy_hash_result(ctx->se, rctx);
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
return ret;
}
@@ -379,16 +468,25 @@ static int tegra_sha_do_final(struct ahash_request *req)
u32 *cpuvaddr = se->cmdbuf->addr;
int size, ret = 0;
- memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ if (rctx->residue.size) {
+ rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
+ &rctx->datbuf.addr, GFP_KERNEL);
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
+ }
+
rctx->datbuf.size = rctx->residue.size;
rctx->total_len += rctx->residue.size;
rctx->config = tegra_sha_get_config(rctx->alg) |
SE_SHA_DST_MEMORY;
- size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
-
- ret = tegra_se_host1x_submit(se, size);
+ size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
+ ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
if (ret)
goto out;
@@ -396,12 +494,18 @@ static int tegra_sha_do_final(struct ahash_request *req)
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
out:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN,
- rctx->datbuf.buf, rctx->datbuf.addr);
+ if (rctx->residue.size)
+ dma_free_coherent(se->dev, rctx->datbuf.size,
+ rctx->datbuf.buf, rctx->datbuf.addr);
+out_free:
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
rctx->residue.buf, rctx->residue.addr);
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
rctx->digest.addr);
+
+ dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf,
+ rctx->intr_res.addr);
+
return ret;
}
@@ -414,16 +518,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
struct tegra_se *se = ctx->se;
int ret = 0;
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_sha_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
if (rctx->task & SHA_UPDATE) {
ret = tegra_sha_do_update(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_UPDATE;
}
if (rctx->task & SHA_FINAL) {
ret = tegra_sha_do_final(req);
+ if (ret)
+ goto out;
+
rctx->task &= ~SHA_FINAL;
}
+out:
crypto_finalize_hash_request(se->engine, req, ret);
return 0;
@@ -497,52 +616,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_sha_init(struct ahash_request *req)
-{
- struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
-
- if (ctx->fallback)
- return tegra_sha_fallback_init(req);
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->key_id = ctx->key_id;
- rctx->task = SHA_FIRST;
- rctx->alg = ctx->alg;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
- rctx->digest.size = crypto_ahash_digestsize(tfm);
-
- rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
- &rctx->digest.addr, GFP_KERNEL);
- if (!rctx->digest.buf)
- goto digbuf_fail;
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
- &rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- goto datbuf_fail;
-
- return 0;
-
-datbuf_fail:
- dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
- rctx->residue.addr);
-resbuf_fail:
- dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
- rctx->datbuf.addr);
-digbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
unsigned int keylen)
{
@@ -559,13 +632,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ int ret;
if (aes_check_keylen(keylen))
return tegra_hmac_fallback_setkey(ctx, key, keylen);
+ ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ if (ret)
+ return tegra_hmac_fallback_setkey(ctx, key, keylen);
+
ctx->fallback = false;
- return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
+ return 0;
+}
+
+static int tegra_sha_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
static int tegra_sha_update(struct ahash_request *req)
@@ -615,16 +704,12 @@ static int tegra_sha_digest(struct ahash_request *req)
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- ret = tegra_sha_init(req);
- if (ret)
- return ret;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}