diff options
Diffstat (limited to 'drivers/crypto/intel')
37 files changed, 332 insertions, 661 deletions
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index c3776b0de51d..09d9589f2d68 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -33,8 +33,6 @@ static unsigned int nr_cpus_per_node; /* Number of physical cpus sharing each iaa instance */ static unsigned int cpus_per_iaa; -static struct crypto_comp *deflate_generic_tfm; - /* Per-cpu lookup table for balanced wqs */ static struct wq_table_entry __percpu *wq_table; @@ -1001,17 +999,14 @@ out: static int deflate_generic_decompress(struct acomp_req *req) { - void *src, *dst; + ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); int ret; - src = kmap_local_page(sg_page(req->src)) + req->src->offset; - dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset; - - ret = crypto_comp_decompress(deflate_generic_tfm, - src, req->slen, dst, &req->dlen); - - kunmap_local(src); - kunmap_local(dst); + acomp_request_set_callback(fbreq, 0, NULL, NULL); + acomp_request_set_params(fbreq, req->src, req->dst, req->slen, + req->dlen); + ret = crypto_acomp_decompress(fbreq); + req->dlen = fbreq->dlen; update_total_sw_decomp_calls(); @@ -1136,8 +1131,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc, - bool disable_async) + u32 *compression_crc) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1180,7 +1174,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src2_size = sizeof(struct aecs_comp_table_record); desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1193,7 +1187,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) + } else if (ctx->async_mode) req->base.data = idxd_desc; dev_dbg(dev, "%s: compression mode %s," @@ -1214,7 +1208,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_comp_calls(); update_wq_comp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1234,7 +1228,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1500,13 +1494,11 @@ static int iaa_comp_acompress(struct acomp_req *req) struct iaa_compression_ctx *compression_ctx; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; - bool disable_async = false; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; u32 compression_crc; struct idxd_wq *wq; struct device *dev; - int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1536,21 +1528,6 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); - if (!req->dst) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - - /* incompressible data will always be < 2 * slen */ - req->dlen = 2 * req->slen; - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - disable_async = true; - } - dev = &wq->idxd->pdev->dev; nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -1580,7 +1557,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc, disable_async); + &req->dlen, &compression_crc); if (ret == -EINPROGRESS) return ret; @@ -1611,100 +1588,6 @@ err_map_dst: out: iaa_wq_put(wq); - if (order >= 0) - sgl_free_order(req->dst, order); - - return ret; -} - -static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) -{ - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC; - struct crypto_tfm *tfm = req->base.tfm; - dma_addr_t src_addr, dst_addr; - int nr_sgs, cpu, ret = 0; - struct iaa_wq *iaa_wq; - struct device *dev; - struct idxd_wq *wq; - int order = -1; - - cpu = get_cpu(); - wq = wq_table_next_wq(cpu); - put_cpu(); - if (!wq) { - pr_debug("no wq configured for cpu=%d\n", cpu); - return -ENODEV; - } - - ret = iaa_wq_get(wq); - if (ret) { - pr_debug("no wq available for cpu=%d\n", cpu); - return -ENODEV; - } - - iaa_wq = idxd_wq_get_private(wq); - - dev = &wq->idxd->pdev->dev; - - nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map src sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto out; - } - src_addr = sg_dma_address(req->src); - dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," - " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, - req->src, req->slen, sg_dma_len(req->src)); - - req->dlen = 4 * req->slen; /* start with ~avg comp rato */ -alloc_dest: - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - - nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map dst sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto err_map_dst; - } - - dst_addr = sg_dma_address(req->dst); - dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," - " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, - req->dst, req->dlen, sg_dma_len(req->dst)); - ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, true); - if (ret == -EOVERFLOW) { - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - req->dlen *= 2; - if (req->dlen > CRYPTO_ACOMP_DST_MAX) - goto err_map_dst; - goto alloc_dest; - } - - if (ret != 0) - dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); - - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); -err_map_dst: - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); -out: - iaa_wq_put(wq); - - if (order >= 0) - sgl_free_order(req->dst, order); - return ret; } @@ -1727,9 +1610,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) return -EINVAL; } - if (!req->dst) - return iaa_comp_adecompress_alloc_dest(req); - cpu = get_cpu(); wq = wq_table_next_wq(cpu); put_cpu(); @@ -1810,19 +1690,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) return 0; } -static void dst_free(struct scatterlist *sgl) -{ - /* - * Called for req->dst = NULL cases but we free elsewhere - * using sgl_free_order(). - */ -} - static struct acomp_alg iaa_acomp_fixed_deflate = { .init = iaa_comp_init_fixed, .compress = iaa_comp_acompress, .decompress = iaa_comp_adecompress, - .dst_free = dst_free, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", @@ -2022,15 +1893,6 @@ static int __init iaa_crypto_init_module(void) } nr_cpus_per_node = nr_cpus / nr_nodes; - if (crypto_has_comp("deflate-generic", 0, 0)) - deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0); - - if (IS_ERR_OR_NULL(deflate_generic_tfm)) { - pr_err("IAA could not alloc %s tfm: errcode = %ld\n", - "deflate-generic", PTR_ERR(deflate_generic_tfm)); - return -ENOMEM; - } - ret = iaa_aecs_init_fixed(); if (ret < 0) { pr_debug("IAA fixed compression mode init failed\n"); @@ -2072,7 +1934,6 @@ err_verify_attr_create: err_driver_reg: iaa_aecs_cleanup_fixed(); err_aecs_init: - crypto_free_comp(deflate_generic_tfm); goto out; } @@ -2089,7 +1950,6 @@ static void __exit iaa_crypto_cleanup_module(void) &driver_attr_verify_compress); idxd_driver_unregister(&iaa_crypto_driver); iaa_aecs_cleanup_fixed(); - crypto_free_comp(deflate_generic_tfm); pr_debug("cleaned up\n"); } diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile index 45728659fbc4..72b24b1804cf 100644 --- a/drivers/crypto/intel/qat/qat_420xx/Makefile +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o -qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o +qat_420xx-y := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 9faef33e54bd..4feeef83f7a3 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -98,7 +98,7 @@ static struct adf_hw_device_class adf_420xx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses; + u32 me_disable = self->fuses[ADF_FUSECTL4]; return ~me_disable & ADF_420XX_ACCELENGINES_MASK; } @@ -106,8 +106,7 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return ARRAY_SIZE(adf_fw_cy_config); case SVC_DC: return ARRAY_SIZE(adf_fw_dc_config); @@ -118,10 +117,8 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) case SVC_ASYM: return ARRAY_SIZE(adf_fw_asym_config); case SVC_ASYM_DC: - case SVC_DC_ASYM: return ARRAY_SIZE(adf_fw_asym_dc_config); case SVC_SYM_DC: - case SVC_DC_SYM: return ARRAY_SIZE(adf_fw_sym_dc_config); default: return 0; @@ -131,8 +128,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return adf_fw_cy_config; case SVC_DC: return adf_fw_dc_config; @@ -143,10 +139,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev case SVC_ASYM: return adf_fw_asym_config; case SVC_ASYM_DC: - case SVC_DC_ASYM: return adf_fw_asym_dc_config; case SVC_SYM_DC: - case SVC_DC_SYM: return adf_fw_sym_dc_config; default: return NULL; @@ -266,8 +260,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return capabilities_sym | capabilities_asym; case SVC_DC: return capabilities_dc; @@ -284,10 +277,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) case SVC_ASYM: return capabilities_asym; case SVC_ASYM_DC: - case SVC_DC_ASYM: return capabilities_asym | capabilities_dc; case SVC_SYM_DC: - case SVC_DC_SYM: return capabilities_sym | capabilities_dc; default: return 0; @@ -420,6 +411,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK; dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; } @@ -482,6 +474,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_420XX_AE_FREQ; + hw_data->services_supported = adf_gen4_services_supported; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index 9589d60fb281..8084aa0f7f41 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -79,7 +79,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile index 9ba202079a22..e8480bb80dee 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o -qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o +qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index bbd92c017c28..4eb6ef99efdd 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -101,7 +101,7 @@ static struct adf_hw_device_class adf_4xxx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses; + u32 me_disable = self->fuses[ADF_FUSECTL4]; return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; } @@ -178,8 +178,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return capabilities_sym | capabilities_asym; case SVC_DC: return capabilities_dc; @@ -196,10 +195,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) case SVC_ASYM: return capabilities_asym; case SVC_ASYM_DC: - case SVC_DC_ASYM: return capabilities_asym | capabilities_dc; case SVC_SYM_DC: - case SVC_DC_SYM: return capabilities_sym | capabilities_dc; default: return 0; @@ -241,8 +238,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return adf_fw_cy_config; case SVC_DC: return adf_fw_dc_config; @@ -253,10 +249,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev case SVC_ASYM: return adf_fw_asym_config; case SVC_ASYM_DC: - case SVC_DC_ASYM: return adf_fw_asym_dc_config; case SVC_SYM_DC: - case SVC_DC_SYM: return adf_fw_sym_dc_config; default: return NULL; @@ -466,6 +460,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; + hw_data->services_supported = adf_gen4_services_supported; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index d7de1cad1335..5537a9991e4e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -81,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile index 7a06ad519bfc..d9e568572da8 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o -qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o +qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 201f9412c582..e78f7bfd30b8 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -27,8 +27,8 @@ static struct adf_hw_device_class c3xxx_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; u32 accel; accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET; @@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; unsigned long disabled; u32 ae_disable; int accel; diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index caa53882fda6..b825b35ab4bf 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_c3xxx(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET, &hw_data->straps); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile index 7ef633058c4f..31a908a211ac 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o -qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o +qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile index cc9255b3b198..cbdaaa135e84 100644 --- a/drivers/crypto/intel/qat/qat_c62x/Makefile +++ b/drivers/crypto/intel/qat/qat_c62x/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o -qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o +qat_c62x-y := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 6b5b0cf9c7c7..32ebe09477a8 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -27,8 +27,8 @@ static struct adf_hw_device_class c62x_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; u32 accel; accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET; @@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; unsigned long disabled; u32 ae_disable; int accel; diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index b7398fee19ed..8a7bdec358d6 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_c62x(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, &hw_data->straps); @@ -169,7 +169,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); /* Find and map all the device's BARS */ - i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; + i = (hw_data->fuses[ADF_FUSECTL0] & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile index 256786662d60..60e499b041ec 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o -qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o +qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 7acf9c576149..af5df29fd2e3 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,62 +1,62 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"' -intel_qat-objs := adf_cfg.o \ - adf_isr.o \ - adf_ctl_drv.o \ +intel_qat-y := adf_accel_engine.o \ + adf_admin.o \ + adf_aer.o \ + adf_cfg.o \ adf_cfg_services.o \ + adf_clock.o \ + adf_ctl_drv.o \ adf_dev_mgr.o \ - adf_init.o \ - adf_accel_engine.o \ - adf_aer.o \ - adf_transport.o \ - adf_admin.o \ - adf_hw_arbiter.o \ - adf_sysfs.o \ - adf_sysfs_ras_counters.o \ + adf_gen2_config.o \ + adf_gen2_dc.o \ adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ - adf_gen2_config.o \ adf_gen4_config.o \ + adf_gen4_dc.o \ adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ - adf_gen4_vf_mig.o \ adf_gen4_pm.o \ - adf_gen2_dc.o \ - adf_gen4_dc.o \ adf_gen4_ras.o \ adf_gen4_timer.o \ - adf_clock.o \ + adf_gen4_vf_mig.o \ + adf_hw_arbiter.o \ + adf_init.o \ + adf_isr.o \ adf_mstate_mgr.o \ - qat_crypto.o \ - qat_compression.o \ - qat_comp_algs.o \ - qat_algs.o \ - qat_asym_algs.o \ - qat_algs_send.o \ - adf_rl.o \ adf_rl_admin.o \ + adf_rl.o \ + adf_sysfs.o \ + adf_sysfs_ras_counters.o \ adf_sysfs_rl.o \ - qat_uclo.o \ - qat_hal.o \ + adf_transport.o \ + qat_algs.o \ + qat_algs_send.o \ + qat_asym_algs.o \ qat_bl.o \ - qat_mig_dev.o + qat_comp_algs.o \ + qat_compression.o \ + qat_crypto.o \ + qat_hal.o \ + qat_mig_dev.o \ + qat_uclo.o -intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ +intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \ + adf_dbgfs.o \ adf_fw_counters.o \ - adf_cnv_dbgfs.o \ adf_gen4_pm_debugfs.o \ adf_gen4_tl.o \ - adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ + adf_heartbeat.o \ adf_pm_dbgfs.o \ adf_telemetry.o \ adf_tl_debugfs.o \ - adf_dbgfs.o + adf_transport_debug.o -intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ +intel_qat-$(CONFIG_PCI_IOV) += adf_gen2_pfvf.o adf_gen4_pfvf.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ - adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ - adf_gen2_pfvf.o adf_gen4_pfvf.o + adf_pfvf_utils.o adf_pfvf_vf_msg.o \ + adf_pfvf_vf_proto.o adf_sriov.o adf_vf_isr.o intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 7830ecb1a1f1..dc21551153cb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -10,6 +10,7 @@ #include <linux/ratelimit.h> #include <linux/types.h> #include <linux/qat/qat_mig_dev.h> +#include <linux/wordpart.h> #include "adf_cfg_common.h" #include "adf_rl.h" #include "adf_telemetry.h" @@ -52,6 +53,16 @@ enum adf_accel_capabilities { ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 }; +enum adf_fuses { + ADF_FUSECTL0, + ADF_FUSECTL1, + ADF_FUSECTL2, + ADF_FUSECTL3, + ADF_FUSECTL4, + ADF_FUSECTL5, + ADF_MAX_FUSES +}; + struct adf_bar { resource_size_t base_addr; void __iomem *virt_addr; @@ -333,6 +344,7 @@ struct adf_hw_device_data { int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); + bool (*services_supported)(unsigned long mask); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; @@ -343,7 +355,7 @@ struct adf_hw_device_data { struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; - u32 fuses; + u32 fuses[ADF_MAX_FUSES]; u32 straps; u32 accel_capabilities_mask; u32 extended_dc_capabilities; @@ -370,6 +382,15 @@ struct adf_hw_device_data { /* CSR write macro */ #define ADF_CSR_WR(csr_base, csr_offset, val) \ __raw_writel(val, csr_base + csr_offset) +/* + * CSR write macro to handle cases where the high and low + * offsets are sparsely located. + */ +#define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \ +do { \ + ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \ + ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \ +} while (0) /* CSR read macro */ #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset) diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c index 268052294468..30abcd9e1283 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2023 Intel Corporation */ +#include <linux/array_size.h> +#include <linux/bitops.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/string.h> @@ -8,40 +10,165 @@ #include "adf_cfg_services.h" #include "adf_cfg_strings.h" -const char *const adf_cfg_services[] = { - [SVC_CY] = ADF_CFG_CY, - [SVC_CY2] = ADF_CFG_ASYM_SYM, +static const char *const adf_cfg_services[] = { + [SVC_ASYM] = ADF_CFG_ASYM, + [SVC_SYM] = ADF_CFG_SYM, [SVC_DC] = ADF_CFG_DC, [SVC_DCC] = ADF_CFG_DCC, - [SVC_SYM] = ADF_CFG_SYM, - [SVC_ASYM] = ADF_CFG_ASYM, - [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, - [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, - [SVC_DC_SYM] = ADF_CFG_DC_SYM, - [SVC_SYM_DC] = ADF_CFG_SYM_DC, }; -EXPORT_SYMBOL_GPL(adf_cfg_services); -int adf_get_service_enabled(struct adf_accel_dev *accel_dev) +/* + * Ensure that the size of the array matches the number of services, + * SVC_BASE_COUNT, that is used to size the bitmap. + */ +static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_BASE_COUNT); + +/* + * Ensure that the maximum number of concurrent services that can be + * enabled on a device is less than or equal to the number of total + * supported services. + */ +static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC); + +/* + * Ensure that the number of services fit a single unsigned long, as each + * service is represented by a bit in the mask. + */ +static_assert(BITS_PER_LONG >= SVC_BASE_COUNT); + +/* + * Ensure that size of the concatenation of all service strings is smaller + * than the size of the buffer that will contain them. + */ +static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER + ADF_CFG_ASYM ADF_SERVICES_DELIMITER + ADF_CFG_DC ADF_SERVICES_DELIMITER + ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES); + +static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf, + size_t len, unsigned long *out_mask) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + unsigned long mask = 0; + char *substr, *token; + int id, num_svc = 0; + + if (len > ADF_CFG_MAX_VAL_LEN_IN_BYTES - 1) + return -EINVAL; + + strscpy(services, buf, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + substr = services; + + while ((token = strsep(&substr, ADF_SERVICES_DELIMITER))) { + id = sysfs_match_string(adf_cfg_services, token); + if (id < 0) + return id; + + if (test_and_set_bit(id, &mask)) + return -EINVAL; + + if (num_svc++ == MAX_NUM_CONCURR_SVC) + return -EINVAL; + } + + if (hw_data->services_supported && !hw_data->services_supported(mask)) + return -EINVAL; + + *out_mask = mask; + + return 0; +} + +static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len) +{ + int offset = 0; + int bit; + + if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES) + return -ENOSPC; + + for_each_set_bit(bit, &mask, SVC_BASE_COUNT) { + if (offset) + offset += scnprintf(buf + offset, len - offset, + ADF_SERVICES_DELIMITER); + + offset += scnprintf(buf + offset, len - offset, "%s", + adf_cfg_services[bit]); + } + + return 0; +} + +int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, + size_t in_len, char *out, size_t out_len) { - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long mask; + int ret; + + ret = adf_service_string_to_mask(accel_dev, in, in_len, &mask); + if (ret) + return ret; + + if (!mask) + return -EINVAL; + + return adf_service_mask_to_string(mask, out, out_len); +} + +static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + size_t len; int ret; ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_SERVICES_ENABLED, services); if (ret) { - dev_err(&GET_DEV(accel_dev), - ADF_SERVICES_ENABLED " param not found\n"); + dev_err(&GET_DEV(accel_dev), "%s param not found\n", + ADF_SERVICES_ENABLED); return ret; } - ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services), - services); - if (ret < 0) - dev_err(&GET_DEV(accel_dev), - "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n", - services); + len = strnlen(services, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + ret = adf_service_string_to_mask(accel_dev, services, len, mask); + if (ret) + dev_err(&GET_DEV(accel_dev), "Invalid value of %s param: %s\n", + ADF_SERVICES_ENABLED, services); return ret; } + +int adf_get_service_enabled(struct adf_accel_dev *accel_dev) +{ + unsigned long mask; + int ret; + + ret = adf_get_service_mask(accel_dev, &mask); + if (ret) + return ret; + + if (test_bit(SVC_SYM, &mask) && test_bit(SVC_ASYM, &mask)) + return SVC_SYM_ASYM; + + if (test_bit(SVC_SYM, &mask) && test_bit(SVC_DC, &mask)) + return SVC_SYM_DC; + + if (test_bit(SVC_ASYM, &mask) && test_bit(SVC_DC, &mask)) + return SVC_ASYM_DC; + + if (test_bit(SVC_SYM, &mask)) + return SVC_SYM; + + if (test_bit(SVC_ASYM, &mask)) + return SVC_ASYM; + + if (test_bit(SVC_DC, &mask)) + return SVC_DC; + + if (test_bit(SVC_DCC, &mask)) + return SVC_DCC; + + return -EINVAL; +} EXPORT_SYMBOL_GPL(adf_get_service_enabled); diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h index c6b0328b0f5b..f6bafc15cbc6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h @@ -8,21 +8,29 @@ struct adf_accel_dev; enum adf_services { - SVC_CY = 0, - SVC_CY2, + SVC_ASYM = 0, + SVC_SYM, SVC_DC, SVC_DCC, - SVC_SYM, - SVC_ASYM, - SVC_DC_ASYM, - SVC_ASYM_DC, - SVC_DC_SYM, + SVC_BASE_COUNT +}; + +enum adf_composed_services { + SVC_SYM_ASYM = SVC_BASE_COUNT, SVC_SYM_DC, - SVC_COUNT + SVC_ASYM_DC, +}; + +enum { + ADF_ONE_SERVICE = 1, + ADF_TWO_SERVICES, + ADF_THREE_SERVICES, }; -extern const char *const adf_cfg_services[SVC_COUNT]; +#define MAX_NUM_CONCURR_SVC ADF_THREE_SERVICES +int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, + size_t in_len, char *out, size_t out_len); int adf_get_service_enabled(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index e015ad6cace2..b79982c4a856 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -27,13 +27,9 @@ #define ADF_CFG_CY "sym;asym" #define ADF_CFG_SYM "sym" #define ADF_CFG_ASYM "asym" -#define ADF_CFG_ASYM_SYM "asym;sym" -#define ADF_CFG_ASYM_DC "asym;dc" -#define ADF_CFG_DC_ASYM "dc;asym" -#define ADF_CFG_SYM_DC "sym;dc" -#define ADF_CFG_DC_SYM "dc;sym" #define ADF_CFG_DCC "dcc" #define ADF_SERVICES_ENABLED "ServicesEnabled" +#define ADF_SERVICES_DELIMITER ";" #define ADF_PM_IDLE_SUPPORT "PmIdleSupport" #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" #define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index 1f64bf49b221..2b263442c856 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -115,8 +115,8 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; + u32 fuses = hw_data->fuses[ADF_FUSECTL0]; u32 straps = hw_data->straps; - u32 fuses = hw_data->fuses; u32 legfuses; u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c index fe1f3d727dc5..f97e7a880f3a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -213,7 +213,6 @@ static int adf_no_dev_config(struct adf_accel_dev *accel_dev) */ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) { - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; int ret; ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); @@ -224,18 +223,8 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) - goto err; - - ret = sysfs_match_string(adf_cfg_services, services); - if (ret < 0) - goto err; - - switch (ret) { - case SVC_CY: - case SVC_CY2: + switch (adf_get_service_enabled(accel_dev)) { + case SVC_SYM_ASYM: ret = adf_crypto_dev_config(accel_dev); break; case SVC_DC: diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 41a0979e68c1..099949a2421c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ +#include <linux/bitops.h> #include <linux/iopoll.h> #include <asm/div64.h> #include "adf_accel_devices.h" @@ -134,36 +135,18 @@ int adf_gen4_init_device(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen4_init_device); -static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, - u32 *lower) -{ - *lower = lower_32_bits(value); - *upper = upper_32_bits(value); -} - void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; - u32 ssm_wdt_pke_high = 0; - u32 ssm_wdt_pke_low = 0; - u32 ssm_wdt_high = 0; - u32 ssm_wdt_low = 0; - /* Convert 64bit WDT timer value into 32bit values for - * mmio write to 32bit CSRs. - */ - adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); - adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high, - &ssm_wdt_pke_low); - - /* Enable WDT for sym and dc */ - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low); - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high); - /* Enable WDT for pke */ - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low); - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high); + /* Enable watchdog timer for sym and dc */ + ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTL_OFFSET, ADF_SSMWDTH_OFFSET, timer_val); + + /* Enable watchdog timer for pke */ + ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, + timer_val_pke); } EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); @@ -265,18 +248,29 @@ static bool is_single_service(int service_id) case SVC_SYM: case SVC_ASYM: return true; - case SVC_CY: - case SVC_CY2: - case SVC_DCC: - case SVC_ASYM_DC: - case SVC_DC_ASYM: - case SVC_SYM_DC: - case SVC_DC_SYM: default: return false; } } +bool adf_gen4_services_supported(unsigned long mask) +{ + unsigned long num_svc = hweight_long(mask); + + if (mask >= BIT(SVC_BASE_COUNT)) + return false; + + switch (num_svc) { + case ADF_ONE_SERVICE: + return true; + case ADF_TWO_SERVICES: + return !test_bit(SVC_DCC, &mask); + default: + return false; + } +} +EXPORT_SYMBOL_GPL(adf_gen4_services_supported); + int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index e8c53bd76f1b..51fc2eaa263e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -179,5 +179,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); +bool adf_gen4_services_supported(unsigned long service_mask); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 2dd3772bf58a..0f7f00a19e7d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, if (err_mask->parerr_wat_wcp_mask) adf_poll_slicehang_csr(accel_dev, csr, ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, - "ath_cph"); + "wat_wcp"); return false; } @@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, return reset_required; } -static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, +static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); - u32 reg; - if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) - return false; - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); - reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); - reg &= err_mask->parerr_ath_cph_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); - reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); - reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); - reg &= err_mask->parerr_pke_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); - } - - if (err_mask->parerr_wat_wcp_mask) { - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); - reg &= err_mask->parerr_wat_wcp_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, - reg); - } - } + return; + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); - return false; + return; } static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, @@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); - reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + adf_handle_rf_parr_err(accel_dev, csr, iastatssm); ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 4fcd61ff70d1..6c39194647f0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -3,6 +3,7 @@ #include <linux/device.h> #include <linux/errno.h> #include <linux/pci.h> +#include <linux/string_choices.h> #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_cfg_services.h" @@ -19,14 +20,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adf_accel_dev *accel_dev; - char *state; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; - state = adf_dev_started(accel_dev) ? "up" : "down"; - return sysfs_emit(buf, "%s\n", state); + return sysfs_emit(buf, "%s\n", str_up_down(adf_dev_started(accel_dev))); } static ssize_t state_store(struct device *dev, struct device_attribute *attr, @@ -117,25 +116,27 @@ static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev, static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; struct adf_hw_device_data *hw_data; struct adf_accel_dev *accel_dev; int ret; - ret = sysfs_match_string(adf_cfg_services, buf); - if (ret < 0) - return ret; - accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; + ret = adf_parse_service_string(accel_dev, buf, count, services, + ADF_CFG_MAX_VAL_LEN_IN_BYTES); + if (ret) + return ret; + if (adf_dev_started(accel_dev)) { dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n", accel_dev->accel_id); return -EINVAL; } - ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]); + ret = adf_sysfs_update_dev_config(accel_dev, services); if (ret < 0) return ret; @@ -207,16 +208,13 @@ static DEVICE_ATTR_RW(pm_idle_enabled); static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, char *buf) { - char *auto_reset; struct adf_accel_dev *accel_dev; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; - auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; - - return sysfs_emit(buf, "%s\n", auto_reset); + return sysfs_emit(buf, "%s\n", str_on_off(accel_dev->autoreset_on_error)); } static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h index a03d43fef2b3..04f645957e28 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h @@ -16,8 +16,8 @@ enum icp_qat_fw_comp_20_cmd_id { ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4, ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5, ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6, - ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7, - ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8, + ICP_QAT_FW_COMP_20_CMD_RESERVED_7 = 7, + ICP_QAT_FW_COMP_20_CMD_RESERVED_8 = 8, ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9, ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10, ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11, diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index e28241bdd0f4..1c7bcd8e4055 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -43,7 +43,6 @@ #define ICP_QAT_SUOF_OBJS "SUF_OBJS" #define ICP_QAT_SUOF_IMAG "SUF_IMAG" #define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) -#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) #define DSS_FWSK_MODULUS_LEN 384 /* RSA3K */ #define DSS_FWSK_EXPONENT_LEN 4 @@ -75,13 +74,6 @@ DSS_SIGNATURE_LEN : \ CSS_SIGNATURE_LEN) -#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \ - ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ - ICP_QAT_SIMG_AE_INSTS_LEN) -#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \ - ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \ - ICP_QAT_CSS_SIGNATURE_LEN(handle) + \ - ICP_QAT_CSS_AE_IMG_LEN) #define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \ @@ -404,8 +396,6 @@ struct icp_qat_suof_img_hdr { char *simg_buf; unsigned long simg_len; char *css_header; - char *css_key; - char *css_signature; char *css_simg; unsigned long simg_size; unsigned int ae_num; diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 338acf29c487..5e4dad4693ca 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -251,162 +251,3 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, extra_dst_buff, sz_extra_dst_buff, sskip, dskip, flags); } - -static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev, - struct qat_alg_buf_list *bl) -{ - struct device *dev = &GET_DEV(accel_dev); - int n = bl->num_bufs; - int i; - - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bl->buffers[i].addr)) - dma_unmap_single(dev, bl->buffers[i].addr, - bl->buffers[i].len, DMA_FROM_DEVICE); -} - -static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, - struct scatterlist *sgl, - struct qat_alg_buf_list **bl) -{ - struct device *dev = &GET_DEV(accel_dev); - struct qat_alg_buf_list *bufl; - int node = dev_to_node(dev); - struct scatterlist *sg; - int n, i, sg_nctr; - size_t sz; - - n = sg_nents(sgl); - sz = struct_size(bufl, buffers, n); - bufl = kzalloc_node(sz, GFP_KERNEL, node); - if (unlikely(!bufl)) - return -ENOMEM; - - for (i = 0; i < n; i++) - bufl->buffers[i].addr = DMA_MAPPING_ERROR; - - sg_nctr = 0; - for_each_sg(sgl, sg, n, i) { - int y = sg_nctr; - - if (!sg->length) - continue; - - bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - DMA_FROM_DEVICE); - bufl->buffers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr))) - goto err_map; - sg_nctr++; - } - bufl->num_bufs = sg_nctr; - bufl->num_mapped_bufs = sg_nctr; - - *bl = bufl; - - return 0; - -err_map: - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bufl->buffers[i].addr)) - dma_unmap_single(dev, bufl->buffers[i].addr, - bufl->buffers[i].len, - DMA_FROM_DEVICE); - kfree(bufl); - *bl = NULL; - - return -ENOMEM; -} - -static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev, - struct scatterlist *sgl, - struct qat_alg_buf_list *bl, - bool free_bl) -{ - if (bl) { - qat_bl_sgl_unmap(accel_dev, bl); - - if (free_bl) - kfree(bl); - } - if (sgl) - sgl_free(sgl); -} - -static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev, - struct scatterlist **sgl, - struct qat_alg_buf_list **bl, - unsigned int dlen, - gfp_t gfp) -{ - struct scatterlist *dst; - int ret; - - dst = sgl_alloc(dlen, gfp, NULL); - if (!dst) { - dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n"); - return -ENOMEM; - } - - ret = qat_bl_sgl_map(accel_dev, dst, bl); - if (ret) - goto err; - - *sgl = dst; - - return 0; - -err: - sgl_free(dst); - *sgl = NULL; - return ret; -} - -int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, - struct scatterlist **sg, - unsigned int dlen, - struct qat_request_buffs *qat_bufs, - gfp_t gfp) -{ - struct device *dev = &GET_DEV(accel_dev); - dma_addr_t new_blp = DMA_MAPPING_ERROR; - struct qat_alg_buf_list *new_bl; - struct scatterlist *new_sg; - size_t new_bl_size; - int ret; - - ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp); - if (ret) - return ret; - - new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs); - - /* Map new firmware SGL descriptor */ - new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, new_blp))) - goto err; - - /* Unmap old firmware SGL descriptor */ - dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE); - - /* Free and unmap old scatterlist */ - qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout, - !qat_bufs->sgl_dst_valid); - - qat_bufs->sgl_dst_valid = false; - qat_bufs->blout = new_bl; - qat_bufs->bloutp = new_blp; - qat_bufs->sz_out = new_bl_size; - - *sg = new_sg; - - return 0; -err: - qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true); - - if (!dma_mapping_error(dev, new_blp)) - dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE); - - return -ENOMEM; -} diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index 3f5b79015400..2827d5055d3c 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -65,10 +65,4 @@ static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } -int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, - struct scatterlist **newd, - unsigned int dlen, - struct qat_request_buffs *qat_bufs, - gfp_t gfp); - #endif diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index 2ba4aa22e092..a6e02405d402 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -29,11 +29,6 @@ struct qat_compression_ctx { int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp); }; -struct qat_dst { - bool is_null; - int resubmitted; -}; - struct qat_compression_req { u8 req[QAT_COMP_REQ_SIZE]; struct qat_compression_ctx *qat_compression_ctx; @@ -42,8 +37,6 @@ struct qat_compression_req { enum direction dir; int actual_dlen; struct qat_alg_req alg_req; - struct work_struct resubmit; - struct qat_dst dst; }; static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, @@ -60,46 +53,6 @@ static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, return qat_alg_send_message(alg_req); } -static void qat_comp_resubmit(struct work_struct *work) -{ - struct qat_compression_req *qat_req = - container_of(work, struct qat_compression_req, resubmit); - struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx; - struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; - struct qat_request_buffs *qat_bufs = &qat_req->buf; - struct qat_compression_instance *inst = ctx->inst; - struct acomp_req *areq = qat_req->acompress_req; - struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); - unsigned int dlen = CRYPTO_ACOMP_DST_MAX; - u8 *req = qat_req->req; - dma_addr_t dfbuf; - int ret; - - areq->dlen = dlen; - - dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n", - crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), - qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen); - - ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs, - qat_algs_alloc_flags(&areq->base)); - if (ret) - goto err; - - qat_req->dst.resubmitted = true; - - dfbuf = qat_req->buf.bloutp; - qat_comp_override_dst(req, dfbuf, dlen); - - ret = qat_alg_send_dc_message(qat_req, inst, &areq->base); - if (ret != -ENOSPC) - return; - -err: - qat_bl_free_bufl(accel_dev, qat_bufs); - acomp_request_complete(areq, ret); -} - static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { @@ -131,21 +84,6 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, areq->dlen = 0; - if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) { - if (cmp_err == ERR_CODE_OVERFLOW_ERROR) { - if (qat_req->dst.resubmitted) { - dev_dbg(&GET_DEV(accel_dev), - "Output does not fit destination buffer\n"); - res = -EOVERFLOW; - goto end; - } - - INIT_WORK(&qat_req->resubmit, qat_comp_resubmit); - adf_misc_wq_queue_work(&qat_req->resubmit); - return; - } - } - if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) goto end; @@ -245,29 +183,9 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi if (!areq->src || !slen) return -EINVAL; - if (areq->dst && !dlen) + if (!areq->dst || !dlen) return -EINVAL; - qat_req->dst.is_null = false; - - /* Handle acomp requests that require the allocation of a destination - * buffer. The size of the destination buffer is double the source - * buffer (rounded up to the size of a page) to fit the decompressed - * output or an expansion on the data for compression. - */ - if (!areq->dst) { - qat_req->dst.is_null = true; - - dlen = round_up(2 * slen, PAGE_SIZE); - areq->dst = sgl_alloc(dlen, f, NULL); - if (!areq->dst) - return -ENOMEM; - - dlen -= dhdr + dftr; - areq->dlen = dlen; - qat_req->dst.resubmitted = false; - } - if (dir == COMPRESSION) { params.extra_dst_buff = inst->dc_data->ovf_buff_p; ovf_buff_sz = inst->dc_data->ovf_buff_sz; @@ -329,7 +247,6 @@ static struct acomp_alg qat_acomp[] = { { .exit = qat_comp_alg_exit_tfm, .compress = qat_comp_alg_compress, .decompress = qat_comp_alg_decompress, - .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), }}; diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h index 404e32c5e778..18a1f33a6db9 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h @@ -25,16 +25,6 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen, req_pars->out_buffer_sz = dlen; } -static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen) -{ - struct icp_qat_fw_comp_req *fw_req = req; - struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars; - - fw_req->comn_mid.dest_data_addr = dst; - fw_req->comn_mid.dst_length = dlen; - req_pars->out_buffer_sz = dlen; -} - static inline void qat_comp_create_compression_req(void *ctx, void *req, u64 src, u32 slen, u64 dst, u32 dlen, diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 7ea40b4f6e5b..7678a93c6853 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ +#include <linux/align.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/kernel.h> @@ -1064,6 +1065,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; + unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle); struct icp_qat_simg_ae_mode *ae_mode; struct icp_qat_suof_objhdr *suof_objhdr; @@ -1075,13 +1077,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, suof_chunk_hdr->offset))->img_length; suof_img_hdr->css_header = suof_img_hdr->simg_buf; - suof_img_hdr->css_key = (suof_img_hdr->css_header + - sizeof(struct icp_qat_css_hdr)); - suof_img_hdr->css_signature = suof_img_hdr->css_key + - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + - ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); - suof_img_hdr->css_simg = suof_img_hdr->css_signature + - ICP_QAT_CSS_SIGNATURE_LEN(handle); + suof_img_hdr->css_simg = suof_img_hdr->css_header + offset; ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); suof_img_hdr->ae_mask = ae_mode->ae_mask; @@ -1419,20 +1415,21 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *auth_desc; struct icp_qat_auth_chunk *auth_chunk; u64 virt_addr, bus_addr, virt_base; - unsigned int length, simg_offset = sizeof(*auth_chunk); + unsigned int simg_offset = sizeof(*auth_chunk); struct icp_qat_simg_ae_mode *simg_ae_mode; struct icp_firml_dram_desc img_desc; + int ret; - if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) { - pr_err("QAT: error, input image size overflow %d\n", size); - return -EINVAL; - } - length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? - ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset : - size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset; - if (qat_uclo_simg_alloc(handle, &img_desc, length)) { + ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); + if (ret) { pr_err("QAT: error, allocate continuous dram fail\n"); - return -ENOMEM; + return ret; + } + + if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) { + pr_debug("QAT: invalid address\n"); + qat_uclo_simg_free(handle, &img_desc); + return -EINVAL; } auth_chunk = img_desc.dram_base_addr_v; @@ -1490,6 +1487,13 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_low = (unsigned int)bus_addr; auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle); + if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr + + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { + pr_err("QAT: insufficient memory size for authentication data\n"); + qat_uclo_simg_free(handle, &img_desc); + return -ENOMEM; + } + memcpy((void *)(uintptr_t)virt_addr, (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)), auth_desc->img_len); diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile index cfd3bd757715..5bf5c890c362 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o -qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o +qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index c0661ff5e929..e48bcf1818cd 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -29,7 +29,7 @@ static struct adf_hw_device_class dh895xcc_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { - u32 fuses = self->fuses; + u32 fuses = self->fuses[ADF_FUSECTL0]; return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & ADF_DH895XCC_ACCELERATORS_MASK; @@ -37,7 +37,7 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 fuses = self->fuses; + u32 fuses = self->fuses[ADF_FUSECTL0]; return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK; } @@ -99,7 +99,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { - int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) + int sku = (self->fuses[ADF_FUSECTL0] & ADF_DH895XCC_FUSECTL_SKU_MASK) >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; switch (sku) { diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 3137fc3b5cf6..07e9d7e52861 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_dh895xcc(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile index 64b54e92b2b4..93f9c81edf09 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o -qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o +qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o |