diff options
Diffstat (limited to 'drivers/crypto/chelsio/chcr_algo.c')
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 100 |
1 files changed, 72 insertions, 28 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index caf1136e7ef9..f26a7a15551a 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -255,7 +255,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key, return; } for (i = 0; i < nk; i++) - w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); + w_ring[i] = get_unaligned_be32(&key[i * 4]); i = 0; temp = w_ring[nk - 1]; @@ -274,7 +274,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key, } i--; for (k = 0, j = i % nk; k < nk; k++) { - *((u32 *)dec_key + k) = htonl(w_ring[j]); + put_unaligned_be32(w_ring[j], &dec_key[k * 4]); j--; if (j < 0) j += nk; @@ -1053,8 +1053,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) u32 temp = be32_to_cpu(*--b); temp = ~temp; - c = (u64)temp + 1; // No of block can processed withou overflow - if ((bytes / AES_BLOCK_SIZE) > c) + c = (u64)temp + 1; // No of block can processed without overflow + if ((bytes / AES_BLOCK_SIZE) >= c) bytes = c * AES_BLOCK_SIZE; return bytes; } @@ -1076,7 +1076,14 @@ static int chcr_update_tweak(struct skcipher_request *req, u8 *iv, keylen = ablkctx->enckey_len / 2; key = ablkctx->key + keylen; - ret = aes_expandkey(&aes, key, keylen); + /* For a 192 bit key remove the padded zeroes which was + * added in chcr_xts_setkey + */ + if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr)) + == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) + ret = aes_expandkey(&aes, key, keylen - 8); + else + ret = aes_expandkey(&aes, key, keylen); if (ret) return ret; aes_encrypt(&aes, iv, iv); @@ -1157,15 +1164,16 @@ static int chcr_final_cipher_iv(struct skcipher_request *req, static int chcr_handle_cipher_resp(struct skcipher_request *req, unsigned char *input, int err) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chcr_context *ctx = c_ctx(tfm); - struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); - struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); - struct sk_buff *skb; struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; - struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); - struct cipher_wr_param wrparam; + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); + struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct chcr_dev *dev = c_ctx(tfm)->dev; + struct chcr_context *ctx = c_ctx(tfm); + struct adapter *adap = padap(ctx->dev); + struct cipher_wr_param wrparam; + struct sk_buff *skb; int bytes; if (err) @@ -1196,6 +1204,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, if (unlikely(bytes == 0)) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); + memcpy(req->iv, reqctx->init_iv, IV); + atomic_inc(&adap->chcr_stats.fallback); err = chcr_cipher_fallback(ablkctx->sw_cipher, req->base.flags, req->src, @@ -1247,20 +1257,28 @@ static int process_cipher(struct skcipher_request *req, struct sk_buff **skb, unsigned short op_type) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); unsigned int ivsize = crypto_skcipher_ivsize(tfm); - struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); + struct adapter *adap = padap(c_ctx(tfm)->dev); struct cipher_wr_param wrparam; int bytes, err = -EINVAL; + int subtype; reqctx->processed = 0; reqctx->partial_req = 0; if (!req->iv) goto error; + subtype = get_cryptoalg_subtype(tfm); if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || (req->cryptlen == 0) || (req->cryptlen % crypto_skcipher_blocksize(tfm))) { + if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS) + goto fallback; + else if (req->cryptlen % crypto_skcipher_blocksize(tfm) && + subtype == CRYPTO_ALG_SUB_TYPE_XTS) + goto fallback; pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", ablkctx->enckey_len, req->cryptlen, ivsize); goto error; @@ -1301,12 +1319,10 @@ static int process_cipher(struct skcipher_request *req, } else { bytes = req->cryptlen; } - if (get_cryptoalg_subtype(tfm) == - CRYPTO_ALG_SUB_TYPE_CTR) { + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) { bytes = adjust_ctr_overflow(req->iv, bytes); } - if (get_cryptoalg_subtype(tfm) == - CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); @@ -1314,20 +1330,25 @@ static int process_cipher(struct skcipher_request *req, /* initialize counter portion of counter block */ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); + memcpy(reqctx->init_iv, reqctx->iv, IV); } else { memcpy(reqctx->iv, req->iv, IV); + memcpy(reqctx->init_iv, req->iv, IV); } if (unlikely(bytes == 0)) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); +fallback: atomic_inc(&adap->chcr_stats.fallback); err = chcr_cipher_fallback(ablkctx->sw_cipher, req->base.flags, req->src, req->dst, req->cryptlen, - reqctx->iv, + subtype == + CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? + reqctx->iv : req->iv, op_type); goto error; } @@ -1442,6 +1463,7 @@ static int chcr_device_init(struct chcr_context *ctx) if (!ctx->dev) { u_ctx = assign_chcr_device(); if (!u_ctx) { + err = -ENXIO; pr_err("chcr device assignment fails\n"); goto out; } @@ -1983,7 +2005,7 @@ static int chcr_ahash_digest(struct ahash_request *req) req_ctx->data_len += params.bfr_len + params.sg_len; if (req->nbytes == 0) { - create_last_hash_block(req_ctx->reqbfr, bs, 0); + create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); params.more = 1; params.bfr_len = bs; } @@ -2249,12 +2271,28 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, ablkctx->enckey_len = key_len; get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; - ablkctx->key_ctx_hdr = + /* Both keys for xts must be aligned to 16 byte boundary + * by padding with zeros. So for 24 byte keys padding 8 zeroes. + */ + if (key_len == 48) { + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len + + 16) >> 4; + memmove(ablkctx->key + 32, ablkctx->key + 24, 24); + memset(ablkctx->key + 24, 0, 8); + memset(ablkctx->key + 56, 0, 8); + ablkctx->enckey_len = 64; + ablkctx->key_ctx_hdr = + FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192, + CHCR_KEYCTX_NO_KEY, 1, + 0, context_size); + } else { + ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : CHCR_KEYCTX_CIPHER_KEY_SIZE_256, CHCR_KEYCTX_NO_KEY, 1, 0, context_size); + } ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; return 0; badkey_err: @@ -2555,7 +2593,7 @@ int chcr_aead_dma_map(struct device *dev, int dst_size; dst_size = req->assoclen + req->cryptlen + (op_type ? - -authsize : authsize); + 0 : authsize); if (!req->cryptlen || !dst_size) return 0; reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), @@ -2602,15 +2640,16 @@ void chcr_aead_dma_unmap(struct device *dev, int dst_size; dst_size = req->assoclen + req->cryptlen + (op_type ? - -authsize : authsize); + 0 : authsize); if (!req->cryptlen || !dst_size) return; dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (req->src == req->dst) { - dma_unmap_sg(dev, req->src, sg_nents(req->src), - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, + sg_nents_for_len(req->src, dst_size), + DMA_BIDIRECTIONAL); } else { dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -2887,8 +2926,7 @@ static int ccm_format_packet(struct aead_request *req, memcpy(ivptr, req->iv, 16); } if (assoclen) - *((unsigned short *)(reqctx->scratch_pad + 16)) = - htons(assoclen); + put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]); rc = generate_b0(req, ivptr, op_type); /* zero the ctr value */ @@ -2909,7 +2947,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int ccm_xtra; - unsigned char tag_offset = 0, auth_offset = 0; + unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) @@ -3162,8 +3200,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, } else { memcpy(ivptr, req->iv, GCM_AES_IV_SIZE); } - *((unsigned int *)(ivptr + 12)) = htonl(0x01); - + put_unaligned_be32(0x01, &ivptr[12]); ulptx = (struct ulptx_sgl *)(ivptr + 16); chcr_add_aead_dst_ent(req, phys_cpl, qid); @@ -3701,6 +3738,13 @@ static int chcr_aead_op(struct aead_request *req, return -ENOSPC; } + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && + crypto_ipsec_check_assoclen(req->assoclen) != 0) { + pr_err("RFC4106: Invalid value of assoclen %d\n", + req->assoclen); + return -EINVAL; + } + /* Form a WR from req */ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size); |