summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/caam/caamalg.c11
-rw-r--r--drivers/crypto/caam/ctrl.c5
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c16
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h3
-rw-r--r--drivers/crypto/chelsio/chcr_core.c3
-rw-r--r--drivers/crypto/marvell/hash.c11
-rw-r--r--drivers/crypto/padlock-aes.c23
-rw-r--r--drivers/crypto/padlock-sha.c18
8 files changed, 33 insertions, 57 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 156aad167cd6..954a64c7757b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -137,7 +137,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str,
}
buf = it_page + it->offset;
- len = min(tlen, it->length);
+ len = min_t(size_t, tlen, it->length);
print_hex_dump(level, prefix_str, prefix_type, rowsize,
groupsize, buf, len, ascii);
tlen -= len;
@@ -4583,6 +4583,15 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /*
+ * Check support for AES modes not available
+ * on LP devices.
+ */
+ if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+ if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
+ continue;
+
t_alg = caam_alg_alloc(alg);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 72ff19658985..e483b78c6343 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -558,8 +558,9 @@ static int caam_probe(struct platform_device *pdev)
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index e4ddb921d7b3..56b153805462 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -592,16 +592,18 @@ badkey_err:
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{
- int ret = 0;
- struct sge_ofld_txq *q;
struct adapter *adap = netdev2adap(dev);
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+ struct sge_uld_txq *txq;
+ int ret = 0;
local_bh_disable();
- q = &adap->sge.ofldtxq[idx];
- spin_lock(&q->sendq.lock);
- if (q->full)
+ txq = &txq_info->uldtxq[idx];
+ spin_lock(&txq->sendq.lock);
+ if (txq->full)
ret = -1;
- spin_unlock(&q->sendq.lock);
+ spin_unlock(&txq->sendq.lock);
local_bh_enable();
return ret;
}
@@ -674,11 +676,11 @@ static int chcr_device_init(struct chcr_context *ctx)
}
u_ctx = ULD_CTX(ctx);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
- ctx->dev->tx_channel_id = 0;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_channel_id = rxq_idx;
+ ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
spin_unlock(&ctx->dev->lock_chcr_dev);
}
out:
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index ec64fbcdeb49..199b0bb69b89 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -422,7 +422,7 @@ static inline void get_aes_decrypt_key(unsigned char *dec_key,
{
u32 temp;
u32 w_ring[MAX_NK];
- int i, j, k = 0;
+ int i, j, k;
u8 nr, nk;
switch (keylength) {
@@ -460,6 +460,7 @@ static inline void get_aes_decrypt_key(unsigned char *dec_key,
temp = w_ring[i % nk];
i++;
}
+ i--;
for (k = 0, j = i % nk; k < nk; k++) {
*((u32 *)dec_key + k) = htonl(w_ring[j]);
j--;
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index fb5f9bbfa09c..4d7f6700fd7e 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -42,6 +42,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
.rxq_size = 1024,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
@@ -126,7 +127,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
int chcr_send_wr(struct sk_buff *skb)
{
- return cxgb4_ofld_send(skb->dev, skb);
+ return cxgb4_crypto_send(skb->dev, skb);
}
static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 9f284682c091..77712b375b84 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -168,12 +168,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_adjust_op(engine, &creq->op_tmpl);
memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
- digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- for (i = 0; i < digsize / 4; i++)
- writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
-
- mv_cesa_adjust_op(engine, &creq->op_tmpl);
- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+ if (!sreq->offset) {
+ digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ for (i = 0; i < digsize / 4; i++)
+ writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
+ }
if (creq->cache_ptr)
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 441e86b23571..b3869748cc6b 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -183,8 +183,8 @@ static inline void padlock_store_cword(struct cword *cword)
/*
* While the padlock instructions don't use FP/SSE registers, they
- * generate a spurious DNA fault when cr0.ts is '1'. These instructions
- * should be used only inside the irq_ts_save/restore() context
+ * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
+ * the kernel doesn't use CR0.TS.
*/
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
@@ -298,24 +298,18 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -346,14 +340,12 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
@@ -361,7 +353,6 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -375,14 +366,12 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.decrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
@@ -390,7 +379,6 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -425,14 +413,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
@@ -442,7 +428,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.decrypt);
@@ -456,14 +441,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -472,8 +455,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
-
padlock_store_cword(&ctx->cword.encrypt);
return err;
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 8c5f90647b7a..bc72d20c32c3 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
struct sha1_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA1_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
@@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
struct sha256_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA256_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
@@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
memcpy(sctx->buffer + partial, data,
done + SHA1_BLOCK_SIZE);
src = sctx->buffer;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst) \
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA1_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from the input data */
if (len - done >= SHA1_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
src = data + done;
}
@@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
memcpy(sctx->buf + partial, data,
done + SHA256_BLOCK_SIZE);
src = sctx->buf;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA256_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from input data*/
if (len - done >= SHA256_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / 64)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % 64);
src = data + done;
}