diff options
Diffstat (limited to 'drivers/crypto/chelsio')
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 103 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.h | 4 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_core.c | 23 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_core.h | 10 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_crypto.h | 1 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_ipsec.c | 6 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_ktls.c | 196 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_ktls.h | 9 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.c | 221 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.h | 1 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_io.c | 2 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_main.c | 16 |
12 files changed, 390 insertions, 202 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index c29b80dd30d8..f26a7a15551a 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -44,7 +44,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/crypto.h> -#include <linux/cryptohash.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/highmem.h> @@ -256,7 +255,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key, return; } for (i = 0; i < nk; i++) - w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); + w_ring[i] = get_unaligned_be32(&key[i * 4]); i = 0; temp = w_ring[nk - 1]; @@ -275,7 +274,7 @@ static void get_aes_decrypt_key(unsigned char *dec_key, } i--; for (k = 0, j = i % nk; k < nk; k++) { - *((u32 *)dec_key + k) = htonl(w_ring[j]); + put_unaligned_be32(w_ring[j], &dec_key[k * 4]); j--; if (j < 0) j += nk; @@ -1054,8 +1053,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) u32 temp = be32_to_cpu(*--b); temp = ~temp; - c = (u64)temp + 1; // No of block can processed withou overflow - if ((bytes / AES_BLOCK_SIZE) > c) + c = (u64)temp + 1; // No of block can processed without overflow + if ((bytes / AES_BLOCK_SIZE) >= c) bytes = c * AES_BLOCK_SIZE; return bytes; } @@ -1077,7 +1076,14 @@ static int chcr_update_tweak(struct skcipher_request *req, u8 *iv, keylen = ablkctx->enckey_len / 2; key = ablkctx->key + keylen; - ret = aes_expandkey(&aes, key, keylen); + /* For a 192 bit key remove the padded zeroes which was + * added in chcr_xts_setkey + */ + if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr)) + == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) + ret = aes_expandkey(&aes, key, keylen - 8); + else + ret = aes_expandkey(&aes, key, keylen); if (ret) return ret; aes_encrypt(&aes, iv, iv); @@ -1158,15 +1164,16 @@ static int chcr_final_cipher_iv(struct skcipher_request *req, static int chcr_handle_cipher_resp(struct skcipher_request *req, unsigned char *input, int err) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chcr_context *ctx = c_ctx(tfm); - struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); - struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); - struct sk_buff *skb; struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; - struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); - struct cipher_wr_param wrparam; + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); + struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct chcr_dev *dev = c_ctx(tfm)->dev; + struct chcr_context *ctx = c_ctx(tfm); + struct adapter *adap = padap(ctx->dev); + struct cipher_wr_param wrparam; + struct sk_buff *skb; int bytes; if (err) @@ -1197,6 +1204,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, if (unlikely(bytes == 0)) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); + memcpy(req->iv, reqctx->init_iv, IV); + atomic_inc(&adap->chcr_stats.fallback); err = chcr_cipher_fallback(ablkctx->sw_cipher, req->base.flags, req->src, @@ -1248,20 +1257,28 @@ static int process_cipher(struct skcipher_request *req, struct sk_buff **skb, unsigned short op_type) { + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); unsigned int ivsize = crypto_skcipher_ivsize(tfm); - struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); + struct adapter *adap = padap(c_ctx(tfm)->dev); struct cipher_wr_param wrparam; int bytes, err = -EINVAL; + int subtype; reqctx->processed = 0; reqctx->partial_req = 0; if (!req->iv) goto error; + subtype = get_cryptoalg_subtype(tfm); if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || (req->cryptlen == 0) || (req->cryptlen % crypto_skcipher_blocksize(tfm))) { + if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS) + goto fallback; + else if (req->cryptlen % crypto_skcipher_blocksize(tfm) && + subtype == CRYPTO_ALG_SUB_TYPE_XTS) + goto fallback; pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", ablkctx->enckey_len, req->cryptlen, ivsize); goto error; @@ -1302,12 +1319,10 @@ static int process_cipher(struct skcipher_request *req, } else { bytes = req->cryptlen; } - if (get_cryptoalg_subtype(tfm) == - CRYPTO_ALG_SUB_TYPE_CTR) { + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) { bytes = adjust_ctr_overflow(req->iv, bytes); } - if (get_cryptoalg_subtype(tfm) == - CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); @@ -1315,20 +1330,25 @@ static int process_cipher(struct skcipher_request *req, /* initialize counter portion of counter block */ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); + memcpy(reqctx->init_iv, reqctx->iv, IV); } else { memcpy(reqctx->iv, req->iv, IV); + memcpy(reqctx->init_iv, req->iv, IV); } if (unlikely(bytes == 0)) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); +fallback: atomic_inc(&adap->chcr_stats.fallback); err = chcr_cipher_fallback(ablkctx->sw_cipher, req->base.flags, req->src, req->dst, req->cryptlen, - reqctx->iv, + subtype == + CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? + reqctx->iv : req->iv, op_type); goto error; } @@ -1443,6 +1463,7 @@ static int chcr_device_init(struct chcr_context *ctx) if (!ctx->dev) { u_ctx = assign_chcr_device(); if (!u_ctx) { + err = -ENXIO; pr_err("chcr device assignment fails\n"); goto out; } @@ -1757,7 +1778,7 @@ static int chcr_ahash_final(struct ahash_request *req) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); struct chcr_context *ctx = h_ctx(rtfm); u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); - int error = -EINVAL; + int error; unsigned int cpu; cpu = get_cpu(); @@ -1984,7 +2005,7 @@ static int chcr_ahash_digest(struct ahash_request *req) req_ctx->data_len += params.bfr_len + params.sg_len; if (req->nbytes == 0) { - create_last_hash_block(req_ctx->reqbfr, bs, 0); + create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); params.more = 1; params.bfr_len = bs; } @@ -2250,12 +2271,28 @@ static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, ablkctx->enckey_len = key_len; get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; - ablkctx->key_ctx_hdr = + /* Both keys for xts must be aligned to 16 byte boundary + * by padding with zeros. So for 24 byte keys padding 8 zeroes. + */ + if (key_len == 48) { + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len + + 16) >> 4; + memmove(ablkctx->key + 32, ablkctx->key + 24, 24); + memset(ablkctx->key + 24, 0, 8); + memset(ablkctx->key + 56, 0, 8); + ablkctx->enckey_len = 64; + ablkctx->key_ctx_hdr = + FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192, + CHCR_KEYCTX_NO_KEY, 1, + 0, context_size); + } else { + ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : CHCR_KEYCTX_CIPHER_KEY_SIZE_256, CHCR_KEYCTX_NO_KEY, 1, 0, context_size); + } ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; return 0; badkey_err: @@ -2556,7 +2593,7 @@ int chcr_aead_dma_map(struct device *dev, int dst_size; dst_size = req->assoclen + req->cryptlen + (op_type ? - -authsize : authsize); + 0 : authsize); if (!req->cryptlen || !dst_size) return 0; reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), @@ -2603,15 +2640,16 @@ void chcr_aead_dma_unmap(struct device *dev, int dst_size; dst_size = req->assoclen + req->cryptlen + (op_type ? - -authsize : authsize); + 0 : authsize); if (!req->cryptlen || !dst_size) return; dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (req->src == req->dst) { - dma_unmap_sg(dev, req->src, sg_nents(req->src), - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, + sg_nents_for_len(req->src, dst_size), + DMA_BIDIRECTIONAL); } else { dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -2888,8 +2926,7 @@ static int ccm_format_packet(struct aead_request *req, memcpy(ivptr, req->iv, 16); } if (assoclen) - *((unsigned short *)(reqctx->scratch_pad + 16)) = - htons(assoclen); + put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]); rc = generate_b0(req, ivptr, op_type); /* zero the ctr value */ @@ -2910,7 +2947,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int ccm_xtra; - unsigned char tag_offset = 0, auth_offset = 0; + unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) @@ -3163,8 +3200,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, } else { memcpy(ivptr, req->iv, GCM_AES_IV_SIZE); } - *((unsigned int *)(ivptr + 12)) = htonl(0x01); - + put_unaligned_be32(0x01, &ivptr[12]); ulptx = (struct ulptx_sgl *)(ivptr + 16); chcr_add_aead_dst_ent(req, phys_cpl, qid); @@ -3702,6 +3738,13 @@ static int chcr_aead_op(struct aead_request *req, return -ENOSPC; } + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && + crypto_ipsec_check_assoclen(req->assoclen) != 0) { + pr_err("RFC4106: Invalid value of assoclen %d\n", + req->assoclen); + return -EINVAL; + } + /* Form a WR from req */ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size); diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index f58c2b5c7fc5..d4f6e010dc79 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -389,10 +389,6 @@ static inline void copy_hash_init_values(char *key, int digestsize) } } -static const u8 sgl_lengths[20] = { - 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15 -}; - /* Number of len fields(8) * size of one addr field */ #define PHYSDSGL_MAX_LEN_SIZE 16 diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index ffd4ec0c7374..bd8dac806e7a 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -33,6 +33,13 @@ static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input); static void *chcr_uld_add(const struct cxgb4_lld_info *lld); static int chcr_uld_state_change(void *handle, enum cxgb4_state state); +#if defined(CONFIG_CHELSIO_TLS_DEVICE) +static const struct tlsdev_ops chcr_ktls_ops = { + .tls_dev_add = chcr_ktls_dev_add, + .tls_dev_del = chcr_ktls_dev_del, +}; +#endif + #ifdef CONFIG_CHELSIO_IPSEC_INLINE static void update_netdev_features(void); #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ @@ -56,6 +63,9 @@ static struct cxgb4_uld_info chcr_uld_info = { #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE) .tx_handler = chcr_uld_tx_handler, #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */ +#if defined(CONFIG_CHELSIO_TLS_DEVICE) + .tlsdev_ops = &chcr_ktls_ops, +#endif }; static void detach_work_fn(struct work_struct *work) @@ -207,11 +217,6 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) } u_ctx->lldi = *lld; chcr_dev_init(u_ctx); - -#ifdef CONFIG_CHELSIO_TLS_DEVICE - if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE) - chcr_enable_ktls(padap(&u_ctx->dev)); -#endif out: return u_ctx; } @@ -348,20 +353,12 @@ static void __exit chcr_crypto_exit(void) list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { adap = padap(&u_ctx->dev); memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); -#ifdef CONFIG_CHELSIO_TLS_DEVICE - if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) - chcr_disable_ktls(adap); -#endif list_del(&u_ctx->entry); kfree(u_ctx); } list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { adap = padap(&u_ctx->dev); memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); -#ifdef CONFIG_CHELSIO_TLS_DEVICE - if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) - chcr_disable_ktls(adap); -#endif list_del(&u_ctx->entry); kfree(u_ctx); } diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index 2c09672e00a4..67d77abd6775 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -37,6 +37,7 @@ #define __CHCR_CORE_H__ #include <crypto/algapi.h> +#include <net/tls.h> #include "t4_hw.h" #include "cxgb4.h" #include "t4_msg.h" @@ -223,10 +224,15 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); void chcr_add_xfrmops(const struct cxgb4_lld_info *lld); #ifdef CONFIG_CHELSIO_TLS_DEVICE -void chcr_enable_ktls(struct adapter *adap); -void chcr_disable_ktls(struct adapter *adap); int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input); int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input); int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev); +extern int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn); +extern void chcr_ktls_dev_del(struct net_device *netdev, + struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction); #endif #endif /* __CHCR_CORE_H__ */ diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 542bebae001f..b3fdbdc25acb 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -302,6 +302,7 @@ struct chcr_skcipher_req_ctx { unsigned int op; u16 imm; u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; + u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN]; u16 txqidx; u16 rxqidx; }; diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 9fd3b9d1ec2f..967babd67a51 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -40,7 +40,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/crypto.h> -#include <linux/cryptohash.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/highmem.h> @@ -294,9 +293,6 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) return false; } - /* Inline single pdu */ - if (skb_shinfo(skb)->gso_size) - return false; return true; } @@ -406,7 +402,7 @@ inline void *copy_esn_pktxt(struct sk_buff *skb, xo = xfrm_offload(skb); aadiv->spi = (esphdr->spi); - seqlo = htonl(esphdr->seq_no); + seqlo = ntohl(esphdr->seq_no); seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32)); memcpy(aadiv->seq_no, &seqno, 8); iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c index cd1769ecdc1c..91dee616d15e 100644 --- a/drivers/crypto/chelsio/chcr_ktls.c +++ b/drivers/crypto/chelsio/chcr_ktls.c @@ -120,12 +120,10 @@ out: static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, int new_state) { - unsigned long flags; - /* This function can be called from both rx (interrupt context) and tx * queue contexts. */ - spin_lock_irqsave(&tx_info->lock, flags); + spin_lock_bh(&tx_info->lock); switch (tx_info->connection_state) { case KTLS_CONN_CLOSED: tx_info->connection_state = new_state; @@ -169,7 +167,7 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, pr_err("unknown KTLS connection state\n"); break; } - spin_unlock_irqrestore(&tx_info->lock, flags); + spin_unlock_bh(&tx_info->lock); return tx_info->connection_state; } @@ -223,6 +221,7 @@ static int chcr_ktls_act_open_req(struct sock *sk, return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te); } +#if IS_ENABLED(CONFIG_IPV6) /* * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection. * @sk - tcp socket. @@ -272,6 +271,7 @@ static int chcr_ktls_act_open_req6(struct sock *sk, return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te); } +#endif /* #if IS_ENABLED(CONFIG_IPV6) */ /* * chcr_setup_connection: create a TCB entry so that TP will form tcp packets. @@ -292,20 +292,26 @@ static int chcr_setup_connection(struct sock *sk, tx_info->atid = atid; tx_info->ip_family = sk->sk_family; - if (sk->sk_family == AF_INET || - (sk->sk_family == AF_INET6 && !sk->sk_ipv6only && - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) { + if (sk->sk_family == AF_INET) { tx_info->ip_family = AF_INET; ret = chcr_ktls_act_open_req(sk, tx_info, atid); +#if IS_ENABLED(CONFIG_IPV6) } else { - tx_info->ip_family = AF_INET6; - ret = - cxgb4_clip_get(tx_info->netdev, - (const u32 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8, - 1); - if (ret) - goto out; - ret = chcr_ktls_act_open_req6(sk, tx_info, atid); + if (!sk->sk_ipv6only && + ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { + tx_info->ip_family = AF_INET; + ret = chcr_ktls_act_open_req(sk, tx_info, atid); + } else { + tx_info->ip_family = AF_INET6; + ret = cxgb4_clip_get(tx_info->netdev, + (const u32 *) + &sk->sk_v6_rcv_saddr.s6_addr, + 1); + if (ret) + goto out; + ret = chcr_ktls_act_open_req6(sk, tx_info, atid); + } +#endif } /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret @@ -375,9 +381,9 @@ static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info) * @tls_cts - tls context. * @direction - TX/RX crypto direction */ -static void chcr_ktls_dev_del(struct net_device *netdev, - struct tls_context *tls_ctx, - enum tls_offload_ctx_dir direction) +void chcr_ktls_dev_del(struct net_device *netdev, + struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) { struct chcr_ktls_ofld_ctx_tx *tx_ctx = chcr_get_ktls_tx_context(tls_ctx); @@ -396,11 +402,13 @@ static void chcr_ktls_dev_del(struct net_device *netdev, if (tx_info->l2te) cxgb4_l2t_release(tx_info->l2te); +#if IS_ENABLED(CONFIG_IPV6) /* clear clip entry */ if (tx_info->ip_family == AF_INET6) cxgb4_clip_release(netdev, (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8, 1); +#endif /* clear tid */ if (tx_info->tid != -1) { @@ -413,6 +421,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev, atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close); kvfree(tx_info); tx_ctx->chcr_info = NULL; + /* release module refcount */ + module_put(THIS_MODULE); } /* @@ -424,10 +434,10 @@ static void chcr_ktls_dev_del(struct net_device *netdev, * @direction - TX/RX crypto direction * return: SUCCESS/FAILURE. */ -static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, - enum tls_offload_ctx_dir direction, - struct tls_crypto_info *crypto_info, - u32 start_offload_tcp_sn) +int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct chcr_ktls_ofld_ctx_tx *tx_ctx; @@ -491,12 +501,16 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, goto out2; /* get peer ip */ - if (sk->sk_family == AF_INET || - (sk->sk_family == AF_INET6 && !sk->sk_ipv6only && - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) { + if (sk->sk_family == AF_INET) { memcpy(daaddr, &sk->sk_daddr, 4); +#if IS_ENABLED(CONFIG_IPV6) } else { - memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16); + if (!sk->sk_ipv6only && + ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) + memcpy(daaddr, &sk->sk_daddr, 4); + else + memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16); +#endif } /* get the l2t index */ @@ -530,6 +544,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, if (ret) goto out2; + /* Driver shouldn't be removed until any single connection exists */ + if (!try_module_get(THIS_MODULE)) { + ret = -EINVAL; + goto out2; + } + atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open); return 0; out2: @@ -539,43 +559,6 @@ out: return ret; } -static const struct tlsdev_ops chcr_ktls_ops = { - .tls_dev_add = chcr_ktls_dev_add, - .tls_dev_del = chcr_ktls_dev_del, -}; - -/* - * chcr_enable_ktls: add NETIF_F_HW_TLS_TX flag in all the ports. - */ -void chcr_enable_ktls(struct adapter *adap) -{ - struct net_device *netdev; - int i; - - for_each_port(adap, i) { - netdev = adap->port[i]; - netdev->features |= NETIF_F_HW_TLS_TX; - netdev->hw_features |= NETIF_F_HW_TLS_TX; - netdev->tlsdev_ops = &chcr_ktls_ops; - } -} - -/* - * chcr_disable_ktls: remove NETIF_F_HW_TLS_TX flag from all the ports. - */ -void chcr_disable_ktls(struct adapter *adap) -{ - struct net_device *netdev; - int i; - - for_each_port(adap, i) { - netdev = adap->port[i]; - netdev->features &= ~NETIF_F_HW_TLS_TX; - netdev->hw_features &= ~NETIF_F_HW_TLS_TX; - netdev->tlsdev_ops = NULL; - } -} - /* * chcr_init_tcb_fields: Initialize tcb fields to handle TCP seq number * handling. @@ -675,41 +658,14 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) return 0; } -/* - * chcr_write_cpl_set_tcb_ulp: update tcb values. - * TCB is responsible to create tcp headers, so all the related values - * should be correctly updated. - * @tx_info - driver specific tls info. - * @q - tx queue on which packet is going out. - * @tid - TCB identifier. - * @pos - current index where should we start writing. - * @word - TCB word. - * @mask - TCB word related mask. - * @val - TCB word related value. - * @reply - set 1 if looking for TP response. - * return - next position to write. - */ -static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, - struct sge_eth_txq *q, u32 tid, - void *pos, u16 word, u64 mask, +static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, + u32 tid, void *pos, u16 word, u64 mask, u64 val, u32 reply) { struct cpl_set_tcb_field_core *cpl; struct ulptx_idata *idata; struct ulp_txpkt *txpkt; - void *save_pos = NULL; - u8 buf[48] = {0}; - int left; - left = (void *)q->q.stat - pos; - if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) { - if (!left) { - pos = q->q.desc; - } else { - save_pos = pos; - pos = buf; - } - } /* ULP_TXPKT */ txpkt = pos; txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); @@ -734,18 +690,54 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, idata = (struct ulptx_idata *)(cpl + 1); idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); idata->len = htonl(0); + pos = idata + 1; - if (save_pos) { - pos = chcr_copy_to_txd(buf, &q->q, save_pos, - CHCR_SET_TCB_FIELD_LEN); - } else { - /* check again if we are at the end of the queue */ - if (left == CHCR_SET_TCB_FIELD_LEN) + return pos; +} + + +/* + * chcr_write_cpl_set_tcb_ulp: update tcb values. + * TCB is responsible to create tcp headers, so all the related values + * should be correctly updated. + * @tx_info - driver specific tls info. + * @q - tx queue on which packet is going out. + * @tid - TCB identifier. + * @pos - current index where should we start writing. + * @word - TCB word. + * @mask - TCB word related mask. + * @val - TCB word related value. + * @reply - set 1 if looking for TP response. + * return - next position to write. + */ +static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, + struct sge_eth_txq *q, u32 tid, + void *pos, u16 word, u64 mask, + u64 val, u32 reply) +{ + int left = (void *)q->q.stat - pos; + + if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) { + if (!left) { pos = q->q.desc; - else - pos = idata + 1; + } else { + u8 buf[48] = {0}; + + __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, + mask, val, reply); + + return chcr_copy_to_txd(buf, &q->q, pos, + CHCR_SET_TCB_FIELD_LEN); + } } + pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, + mask, val, reply); + + /* check again if we are at the end of the queue */ + if (left == CHCR_SET_TCB_FIELD_LEN) + pos = q->q.desc; + return pos; } @@ -925,7 +917,9 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, struct fw_eth_tx_pkt_wr *wr; struct cpl_tx_pkt_core *cpl; u32 ctrl, iplen, maclen; +#if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ip6; +#endif unsigned int ndesc; struct tcphdr *tcp; int len16, pktlen; @@ -980,9 +974,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, /* we need to correct ip header len */ ip = (struct iphdr *)(buf + maclen); ip->tot_len = htons(pktlen - maclen); +#if IS_ENABLED(CONFIG_IPV6) } else { ip6 = (struct ipv6hdr *)(buf + maclen); ip6->payload_len = htons(pktlen - maclen - iplen); +#endif } /* now take care of the tcp header, if fin is not set then clear push * bit as well, and if fin is set, it will be sent at the last so we diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/crypto/chelsio/chcr_ktls.h index 5a7ae2ca446e..5cbd84b1da05 100644 --- a/drivers/crypto/chelsio/chcr_ktls.h +++ b/drivers/crypto/chelsio/chcr_ktls.h @@ -89,10 +89,15 @@ static inline int chcr_get_first_rx_qid(struct adapter *adap) return u_ctx->lldi.rxq_ids[0]; } -void chcr_enable_ktls(struct adapter *adap); -void chcr_disable_ktls(struct adapter *adap); int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input); int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input); int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev); +int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn); +void chcr_ktls_dev_del(struct net_device *netdev, + struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction); #endif /* CONFIG_CHELSIO_TLS_DEVICE */ #endif /* __CHCR_KTLS_H__ */ diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index d5720a859443..f200fae6f7cb 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -18,13 +18,20 @@ #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/if_vlan.h> +#include <linux/ipv6.h> +#include <net/ipv6.h> +#include <net/transp_v6.h> +#include <net/ip6_route.h> #include <net/inet_common.h> #include <net/tcp.h> #include <net/dst.h> #include <net/tls.h> +#include <net/addrconf.h> +#include <net/secure_seq.h> #include "chtls.h" #include "chtls_cm.h" +#include "clip_tbl.h" /* * State transitions and actions for close. Note that if we are in SYN_SENT @@ -82,15 +89,40 @@ static void chtls_sock_release(struct kref *ref) kfree(csk); } -static struct net_device *chtls_ipv4_netdev(struct chtls_dev *cdev, +static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, struct sock *sk) { struct net_device *ndev = cdev->ports[0]; +#if IS_ENABLED(CONFIG_IPV6) + struct net_device *temp; + int addr_type; +#endif + + switch (sk->sk_family) { + case PF_INET: + if (likely(!inet_sk(sk)->inet_rcv_saddr)) + return ndev; + ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); + break; +#if IS_ENABLED(CONFIG_IPV6) + case PF_INET6: + addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); + if (likely(addr_type == IPV6_ADDR_ANY)) + return ndev; + + for_each_netdev_rcu(&init_net, temp) { + if (ipv6_chk_addr(&init_net, (struct in6_addr *) + &sk->sk_v6_rcv_saddr, temp, 1)) { + ndev = temp; + break; + } + } + break; +#endif + default: + return NULL; + } - if (likely(!inet_sk(sk)->inet_rcv_saddr)) - return ndev; - - ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); if (!ndev) return NULL; @@ -446,7 +478,12 @@ void chtls_destroy_sock(struct sock *sk) free_tls_keyid(sk); kref_put(&csk->kref, chtls_sock_release); csk->cdev = NULL; - sk->sk_prot = &tcp_prot; + if (sk->sk_family == AF_INET) + sk->sk_prot = &tcp_prot; +#if IS_ENABLED(CONFIG_IPV6) + else + sk->sk_prot = &tcpv6_prot; +#endif sk->sk_prot->destroy(sk); } @@ -473,7 +510,8 @@ static void chtls_disconnect_acceptq(struct sock *listen_sk) while (*pprev) { struct request_sock *req = *pprev; - if (req->rsk_ops == &chtls_rsk_ops) { + if (req->rsk_ops == &chtls_rsk_ops || + req->rsk_ops == &chtls_rsk_opsv6) { struct sock *child = req->sk; *pprev = req->dl_next; @@ -597,17 +635,17 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx) int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) { struct net_device *ndev; +#if IS_ENABLED(CONFIG_IPV6) + bool clip_valid = false; +#endif struct listen_ctx *ctx; struct adapter *adap; struct port_info *pi; + int ret = 0; int stid; - int ret; - - if (sk->sk_family != PF_INET) - return -EAGAIN; rcu_read_lock(); - ndev = chtls_ipv4_netdev(cdev, sk); + ndev = chtls_find_netdev(cdev, sk); rcu_read_unlock(); if (!ndev) return -EBADF; @@ -638,16 +676,39 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) if (!listen_hash_add(cdev, sk, stid)) goto free_stid; - ret = cxgb4_create_server(ndev, stid, - inet_sk(sk)->inet_rcv_saddr, - inet_sk(sk)->inet_sport, 0, - cdev->lldi->rxq_ids[0]); + if (sk->sk_family == PF_INET) { + ret = cxgb4_create_server(ndev, stid, + inet_sk(sk)->inet_rcv_saddr, + inet_sk(sk)->inet_sport, 0, + cdev->lldi->rxq_ids[0]); +#if IS_ENABLED(CONFIG_IPV6) + } else { + int addr_type; + + addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); + if (addr_type != IPV6_ADDR_ANY) { + ret = cxgb4_clip_get(ndev, (const u32 *) + &sk->sk_v6_rcv_saddr, 1); + if (ret) + goto del_hash; + clip_valid = true; + } + ret = cxgb4_create_server6(ndev, stid, + &sk->sk_v6_rcv_saddr, + inet_sk(sk)->inet_sport, + cdev->lldi->rxq_ids[0]); +#endif + } if (ret > 0) ret = net_xmit_errno(ret); if (ret) goto del_hash; return 0; del_hash: +#if IS_ENABLED(CONFIG_IPV6) + if (clip_valid) + cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1); +#endif listen_hash_del(cdev, sk); free_stid: cxgb4_free_stid(cdev->tids, stid, sk->sk_family); @@ -671,7 +732,21 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk) chtls_reset_synq(listen_ctx); cxgb4_remove_server(cdev->lldi->ports[0], stid, - cdev->lldi->rxq_ids[0], 0); + cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6); + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == PF_INET6) { + struct chtls_sock *csk; + int addr_type = 0; + + csk = rcu_dereference_sk_user_data(sk); + addr_type = ipv6_addr_type((const struct in6_addr *) + &sk->sk_v6_rcv_saddr); + if (addr_type != IPV6_ADDR_ANY) + cxgb4_clip_release(csk->egress_dev, (const u32 *) + &sk->sk_v6_rcv_saddr, 1); + } +#endif chtls_disconnect_acceptq(sk); } @@ -880,7 +955,12 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk, tp = tcp_sk(sk); tcpoptsz = 0; - iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr); + else +#endif + iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); if (req->tcpopt.tstamp) tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4); @@ -1027,13 +1107,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk, const struct cpl_pass_accept_req *req, struct chtls_dev *cdev) { + struct neighbour *n = NULL; struct inet_sock *newinet; const struct iphdr *iph; struct tls_context *ctx; struct net_device *ndev; struct chtls_sock *csk; struct dst_entry *dst; - struct neighbour *n; struct tcp_sock *tp; struct sock *newsk; u16 port_id; @@ -1045,11 +1125,31 @@ static struct sock *chtls_recv_sock(struct sock *lsk, if (!newsk) goto free_oreq; - dst = inet_csk_route_child_sock(lsk, newsk, oreq); - if (!dst) - goto free_sk; + if (lsk->sk_family == AF_INET) { + dst = inet_csk_route_child_sock(lsk, newsk, oreq); + if (!dst) + goto free_sk; - n = dst_neigh_lookup(dst, &iph->saddr); + n = dst_neigh_lookup(dst, &iph->saddr); +#if IS_ENABLED(CONFIG_IPV6) + } else { + const struct ipv6hdr *ip6h; + struct flowi6 fl6; + + ip6h = (const struct ipv6hdr *)network_hdr; + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_proto = IPPROTO_TCP; + fl6.saddr = ip6h->daddr; + fl6.daddr = ip6h->saddr; + fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port; + fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num); + security_req_classify_flow(oreq, flowi6_to_flowi(&fl6)); + dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL); + if (IS_ERR(dst)) + goto free_sk; + n = dst_neigh_lookup(dst, &ip6h->saddr); +#endif + } if (!n) goto free_sk; @@ -1072,9 +1172,30 @@ static struct sock *chtls_recv_sock(struct sock *lsk, tp = tcp_sk(newsk); newinet = inet_sk(newsk); - newinet->inet_daddr = iph->saddr; - newinet->inet_rcv_saddr = iph->daddr; - newinet->inet_saddr = iph->daddr; + if (iph->version == 0x4) { + newinet->inet_daddr = iph->saddr; + newinet->inet_rcv_saddr = iph->daddr; + newinet->inet_saddr = iph->daddr; +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk; + struct inet_request_sock *treq = inet_rsk(oreq); + struct ipv6_pinfo *newnp = inet6_sk(newsk); + struct ipv6_pinfo *np = inet6_sk(lsk); + + inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; + memcpy(newnp, np, sizeof(struct ipv6_pinfo)); + newsk->sk_v6_daddr = treq->ir_v6_rmt_addr; + newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr; + inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr; + newnp->ipv6_fl_list = NULL; + newnp->pktoptions = NULL; + newsk->sk_bound_dev_if = treq->ir_iif; + newinet->inet_opt = NULL; + newinet->inet_daddr = LOOPBACK4_IPV6; + newinet->inet_saddr = LOOPBACK4_IPV6; +#endif + } oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); sk_setup_caps(newsk, dst); @@ -1156,6 +1277,7 @@ static void chtls_pass_accept_request(struct sock *sk, struct sk_buff *reply_skb; struct chtls_sock *csk; struct chtls_dev *cdev; + struct ipv6hdr *ip6h; struct tcphdr *tcph; struct sock *newsk; struct ethhdr *eh; @@ -1196,37 +1318,52 @@ static void chtls_pass_accept_request(struct sock *sk, if (sk_acceptq_is_full(sk)) goto reject; - oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true); - if (!oreq) - goto reject; - - oreq->rsk_rcv_wnd = 0; - oreq->rsk_window_clamp = 0; - oreq->cookie_ts = 0; - oreq->mss = 0; - oreq->ts_recent = 0; eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len)); if (eth_hdr_len == ETH_HLEN) { eh = (struct ethhdr *)(req + 1); iph = (struct iphdr *)(eh + 1); + ip6h = (struct ipv6hdr *)(eh + 1); network_hdr = (void *)(eh + 1); } else { vlan_eh = (struct vlan_ethhdr *)(req + 1); iph = (struct iphdr *)(vlan_eh + 1); + ip6h = (struct ipv6hdr *)(vlan_eh + 1); network_hdr = (void *)(vlan_eh + 1); } - if (iph->version != 0x4) - goto free_oreq; - tcph = (struct tcphdr *)(iph + 1); - skb_set_network_header(skb, (void *)iph - (void *)req); + if (iph->version == 0x4) { + tcph = (struct tcphdr *)(iph + 1); + skb_set_network_header(skb, (void *)iph - (void *)req); + oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true); + } else { + tcph = (struct tcphdr *)(ip6h + 1); + skb_set_network_header(skb, (void *)ip6h - (void *)req); + oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false); + } + + if (!oreq) + goto reject; + + oreq->rsk_rcv_wnd = 0; + oreq->rsk_window_clamp = 0; + oreq->cookie_ts = 0; + oreq->mss = 0; + oreq->ts_recent = 0; tcp_rsk(oreq)->tfo_listener = false; tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq); chtls_set_req_port(oreq, tcph->source, tcph->dest); - chtls_set_req_addr(oreq, iph->daddr, iph->saddr); - ip_dsfield = ipv4_get_dsfield(iph); + if (iph->version == 0x4) { + chtls_set_req_addr(oreq, iph->daddr, iph->saddr); + ip_dsfield = ipv4_get_dsfield(iph); +#if IS_ENABLED(CONFIG_IPV6) + } else { + inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; + inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; + ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb)); +#endif + } if (req->tcpopt.wsf <= 14 && sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { inet_rsk(oreq)->wscale_ok = 1; @@ -1243,7 +1380,7 @@ static void chtls_pass_accept_request(struct sock *sk, newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); if (!newsk) - goto reject; + goto free_oreq; if (chtls_get_module(newsk)) goto reject; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h index 3fac0c74a41f..47ba81e42f5d 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.h +++ b/drivers/crypto/chelsio/chtls/chtls_cm.h @@ -79,6 +79,7 @@ enum { typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb); extern struct request_sock_ops chtls_rsk_ops; +extern struct request_sock_ops chtls_rsk_opsv6; struct deferred_skb_cb { defer_handler_t handler; diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index dccef3a2908b..e1401d9cc756 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp) make_tx_data_wr(sk, skb, immdlen, len, credits_needed, completion); tp->snd_nxt += len; - tp->lsndtime = tcp_time_stamp(tp); + tp->lsndtime = tcp_jiffies32; if (completion) ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; } else { diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 2110d0893bc7..d98b89d0fa6e 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -13,6 +13,8 @@ #include <linux/net.h> #include <linux/ip.h> #include <linux/tcp.h> +#include <net/ipv6.h> +#include <net/transp_v6.h> #include <net/tcp.h> #include <net/tls.h> @@ -30,8 +32,8 @@ static DEFINE_MUTEX(cdev_mutex); static DEFINE_MUTEX(notify_mutex); static RAW_NOTIFIER_HEAD(listen_notify_list); -static struct proto chtls_cpl_prot; -struct request_sock_ops chtls_rsk_ops; +static struct proto chtls_cpl_prot, chtls_cpl_protv6; +struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6; static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT; static void register_listen_notifier(struct notifier_block *nb) @@ -586,7 +588,10 @@ static struct cxgb4_uld_info chtls_uld_info = { void chtls_install_cpl_ops(struct sock *sk) { - sk->sk_prot = &chtls_cpl_prot; + if (sk->sk_family == AF_INET) + sk->sk_prot = &chtls_cpl_prot; + else + sk->sk_prot = &chtls_cpl_protv6; } static void __init chtls_init_ulp_ops(void) @@ -603,6 +608,11 @@ static void __init chtls_init_ulp_ops(void) chtls_cpl_prot.recvmsg = chtls_recvmsg; chtls_cpl_prot.setsockopt = chtls_setsockopt; chtls_cpl_prot.getsockopt = chtls_getsockopt; +#if IS_ENABLED(CONFIG_IPV6) + chtls_cpl_protv6 = chtls_cpl_prot; + chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6, + &tcpv6_prot, PF_INET6); +#endif } static int __init chtls_register(void) |