From 108586eba094b318e6a831f977f4ddcc403a15da Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Mon, 25 Jul 2022 12:09:28 +0800 Subject: crypto: sahara - don't sleep when in softirq Function of sahara_aes_crypt maybe could be called by function of crypto_skcipher_encrypt during the rx softirq, so it is not allowed to use mutex lock. Fixes: c0c3c89ae347 ("crypto: sahara - replace tasklets with...") Signed-off-by: Zhengchao Shao Signed-off-by: Herbert Xu --- drivers/crypto/sahara.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 457084b344c1..b07ae4ba165e 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -26,10 +26,10 @@ #include #include #include -#include #include #include #include +#include #define SHA_BUFFER_LEN PAGE_SIZE #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE @@ -196,7 +196,7 @@ struct sahara_dev { void __iomem *regs_base; struct clk *clk_ipg; struct clk *clk_ahb; - struct mutex queue_mutex; + spinlock_t queue_spinlock; struct task_struct *kthread; struct completion dma_completion; @@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) rctx->mode = mode; - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); err = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data) do { __set_current_state(TASK_INTERRUPTIBLE); - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); backlog = crypto_get_backlog(&dev->queue); async_req = crypto_dequeue_request(&dev->queue); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last) rctx->first = 1; } - mutex_lock(&dev->queue_mutex); + spin_lock_bh(&dev->queue_spinlock); ret = crypto_enqueue_request(&dev->queue, &req->base); - mutex_unlock(&dev->queue_mutex); + spin_unlock_bh(&dev->queue_spinlock); wake_up_process(dev->kthread); @@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev) crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH); - mutex_init(&dev->queue_mutex); + spin_lock_init(&dev->queue_spinlock); dev_ptr = dev; -- cgit v1.2.3 From 908f24270d9ccbe120b91e7029b372f3dcd18290 Mon Sep 17 00:00:00 2001 From: Srinivas Kerekare Date: Mon, 25 Jul 2022 11:40:09 +0100 Subject: crypto: qat - add check to validate firmware images The function qat_uclo_check_image() validates the MMP and AE firmware images. If the QAT device supports firmware authentication (indicated by the handle to firmware loader), the input signed binary MMP and AE images are validated by parsing the following information: - Header length - Full size of the binary - Type of binary image (MMP or AE Firmware) Firmware binaries use RSA3K for signing and verification. The header length for the RSA3k is 0x384 bytes. All the size field values in the binary are quantified as DWORDS (1 DWORD = 4bytes). On an invalid value the function prints an error message and returns with an error code "EINVAL". Signed-off-by: Srinivas Kerekare Reviewed-by: Giovanni Cabiddu Reviewed-by: Wojciech Ziemba Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/icp_qat_uclo.h | 3 +- drivers/crypto/qat/qat_common/qat_uclo.c | 56 +++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h index 4b36869bf460..69482abdb8b9 100644 --- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h @@ -86,7 +86,8 @@ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \ ICP_QAT_CSS_SIGNATURE_LEN(handle)) -#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 +#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000 +#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 0fe5a474aa45..b7f7869ef8b2 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -1367,6 +1367,48 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, } } +static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, + char *image, unsigned int size, + unsigned int fw_type) +{ + char *fw_type_name = fw_type ? "MMP" : "AE"; + unsigned int css_dword_size = sizeof(u32); + + if (handle->chip_info->fw_auth) { + struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; + unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle); + + if ((css_hdr->header_len * css_dword_size) != header_len) + goto err; + if ((css_hdr->size * css_dword_size) != size) + goto err; + if (fw_type != css_hdr->fw_type) + goto err; + if (size <= header_len) + goto err; + size -= header_len; + } + + if (fw_type == CSS_AE_FIRMWARE) { + if (size < sizeof(struct icp_qat_simg_ae_mode *) + + ICP_QAT_SIMG_AE_INIT_SEQ_LEN) + goto err; + if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) + goto err; + } else if (fw_type == CSS_MMP_FIRMWARE) { + if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN) + goto err; + } else { + pr_err("QAT: Unsupported firmware type\n"); + return -EINVAL; + } + return 0; + +err: + pr_err("QAT: Invalid %s firmware image\n", fw_type_name); + return -EINVAL; +} + static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, char *image, unsigned int size, struct icp_qat_fw_auth_desc **desc) @@ -1379,7 +1421,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_simg_ae_mode *simg_ae_mode; struct icp_firml_dram_desc img_desc; - if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) { + if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) { pr_err("QAT: error, input image size overflow %d\n", size); return -EINVAL; } @@ -1547,6 +1589,11 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, { struct icp_qat_fw_auth_desc *desc = NULL; int status = 0; + int ret; + + ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE); + if (ret) + return ret; if (handle->chip_info->fw_auth) { status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc); @@ -2018,8 +2065,15 @@ static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) struct icp_qat_fw_auth_desc *desc = NULL; struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; + int ret; for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { + ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf, + simg_hdr[i].simg_len, + CSS_AE_FIRMWARE); + if (ret) + return ret; + if (qat_uclo_map_auth_fw(handle, (char *)simg_hdr[i].simg_buf, (unsigned int) -- cgit v1.2.3 From d74f9340097a881869c4c22ca376654cc2516ecc Mon Sep 17 00:00:00 2001 From: Ye Weihua Date: Thu, 28 Jul 2022 10:07:58 +0800 Subject: crypto: hisilicon/zip - fix mismatch in get/set sgl_sge_nr KASAN reported this Bug: [17619.659757] BUG: KASAN: global-out-of-bounds in param_get_int+0x34/0x60 [17619.673193] Read of size 4 at addr fffff01332d7ed00 by task read_all/1507958 ... [17619.698934] The buggy address belongs to the variable: [17619.708371] sgl_sge_nr+0x0/0xffffffffffffa300 [hisi_zip] There is a mismatch in hisi_zip when get/set the variable sgl_sge_nr. The type of sgl_sge_nr is u16, and get/set sgl_sge_nr by param_get/set_int. Replacing param_get/set_int to param_get/set_ushort can fix this bug. Fixes: f081fda293ffb ("crypto: hisilicon - add sgl_sge_nr module param for zip") Signed-off-by: Ye Weihua Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index ad35434a3fdb..06a2d6e81ae9 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -123,12 +123,12 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) return -EINVAL; - return param_set_int(val, kp); + return param_set_ushort(val, kp); } static const struct kernel_param_ops sgl_sge_nr_ops = { .set = sgl_sge_nr_set, - .get = param_get_int, + .get = param_get_ushort, }; static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; -- cgit v1.2.3 From 882aa6525cabcfa0cea61e1a19c9af4c543118ac Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 29 Jul 2022 17:35:31 +0800 Subject: crypto: qcom-rng - Fix qcom_rng_of_match unused warning Module device tables need to be declared as maybe_unused because they will be unused when built-in and the corresponding option is also disabled. This patch adds the maybe_unused attributes to OF and ACPI. This also allows us to remove the ifdef around the ACPI data structure. Reported-by: kernel test robot Signed-off-by: Herbert Xu Reviewed-by: Vinod Koul Signed-off-by: Herbert Xu --- drivers/crypto/qcom-rng.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 031b5f701a0a..72dd1a4ebac4 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -201,15 +202,13 @@ static int qcom_rng_remove(struct platform_device *pdev) return 0; } -#if IS_ENABLED(CONFIG_ACPI) -static const struct acpi_device_id qcom_rng_acpi_match[] = { +static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = { { .id = "QCOM8160", .driver_data = 1 }, {} }; MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match); -#endif -static const struct of_device_id qcom_rng_of_match[] = { +static const struct of_device_id __maybe_unused qcom_rng_of_match[] = { { .compatible = "qcom,prng", .data = (void *)0}, { .compatible = "qcom,prng-ee", .data = (void *)1}, {} -- cgit v1.2.3 From 00278564a60e11df8bcca0ececd8b2f55434e406 Mon Sep 17 00:00:00 2001 From: Zhuo Chen Date: Tue, 2 Aug 2022 11:29:37 +0800 Subject: crypto: hisilicon - Remove pci_aer_clear_nonfatal_status() call Calls to pci_cleanup_aer_uncorrect_error_status() have already been removed after commit 62b36c3ea664 ("PCI/AER: Remove pci_cleanup_aer_uncorrect_error_status() calls"). But in commit 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset interface") pci_aer_clear_nonfatal_status() was used again, so remove it in this patch. note: pci_cleanup_aer_uncorrect_error_status() was renamed to pci_aer_clear_nonfatal_status() in commit 894020fdd88c ("PCI/AER: Rationalize error status register clearing") Signed-off-by: Zhuo Chen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index ad83c194d664..9701434a8ef3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -5466,8 +5466,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) if (pdev->is_virtfn) return PCI_ERS_RESULT_RECOVERED; - pci_aer_clear_nonfatal_status(pdev); - /* reset pcie device controller */ ret = qm_controller_reset(qm); if (ret) { -- cgit v1.2.3 From 7433d2fda2f0d1fcbfc72d3b36f908ad94aebf6d Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Tue, 2 Aug 2022 07:48:20 +0000 Subject: crypto: sun8i-ce - using the pm_runtime_resume_and_get to simplify the code Using pm_runtime_resume_and_get() to instade of pm_runtime_get_sync and pm_runtime_put_noidle. Reported-by: Zeal Robot Signed-off-by: ye xingchen Acked-by: Jernej Skrabec Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c index 19cd2e52f89d..c4b0a8b58842 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa goto err_dst; } - err = pm_runtime_get_sync(ce->dev); - if (err < 0) { - pm_runtime_put_noidle(ce->dev); + err = pm_runtime_resume_and_get(ce->dev); + if (err < 0) goto err_pm; - } mutex_lock(&ce->rnglock); chan = &ce->chanlist[flow]; -- cgit v1.2.3 From 56fae4304c8eea2dfb9037ded1aa774acb110d69 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Wed, 3 Aug 2022 22:47:55 +0200 Subject: crypto: keembay-ocs - Drop obsolete dependency on COMPILE_TEST Since commit 0166dc11be91 ("of: make CONFIG_OF user selectable"), it is possible to test-build any driver which depends on OF on any architecture by explicitly selecting OF. Therefore depending on COMPILE_TEST as an alternative is no longer needed. It is actually better to always build such drivers with OF enabled, so that the test builds are closer to how each driver will actually be built on its intended target. Building them without OF may not test much as the compiler will optimize out potentially large parts of the code. In the worst case, this could even pop false positive warnings. Dropping COMPILE_TEST here improves the quality of our testing and avoids wasting time on non-existent issues. Signed-off-by: Jean Delvare Cc: Declan Murphy Cc: Daniele Alessandrelli Cc: Mark Gross Cc: Herbert Xu Cc: Prabhjot Khurana Cc: "David S. Miller" Signed-off-by: Herbert Xu --- drivers/crypto/keembay/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig index 7942b48dd55a..1cd62f9c3e3a 100644 --- a/drivers/crypto/keembay/Kconfig +++ b/drivers/crypto/keembay/Kconfig @@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS config CRYPTO_DEV_KEEMBAY_OCS_ECC tristate "Support for Intel Keem Bay OCS ECC HW acceleration" depends on ARCH_KEEMBAY || COMPILE_TEST - depends on OF || COMPILE_TEST + depends on OF depends on HAS_IOMEM select CRYPTO_ECDH select CRYPTO_ENGINE @@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU select CRYPTO_ENGINE depends on HAS_IOMEM depends on ARCH_KEEMBAY || COMPILE_TEST - depends on OF || COMPILE_TEST + depends on OF help Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash Control Unit (HCU) hardware acceleration for use with Crypto API. -- cgit v1.2.3 From b3b9fdf1a9be4266b01a2063b1f37cdc20806e3b Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Wed, 10 Aug 2022 01:49:15 +0300 Subject: crypto: ccp - Add a quirk to firmware update A quirk for fixing the committed TCB version, when upgrading from a firmware version earlier than 1.50. This is a known issue, and the documented workaround is to load the firmware twice. Currently, this issue requires the following workaround: sudo modprobe -r kvm_amd sudo modprobe -r ccp sudo modprobe ccp sudo modprobe kvm_amd Implement this workaround inside kernel by checking whether the API version is less than 1.50, and if so, download the firmware twice. This addresses the TCB version issue. Link: https://lore.kernel.org/all/de02389f-249d-f565-1136-4af3655fab2a@profian.com/ Reported-by: Harald Hoyer Signed-off-by: Jarkko Sakkinen Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 9f588c9728f8..b292641c8a99 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -744,6 +744,11 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; + if (!sev_version_greater_or_equal(0, 15)) { + dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); + return -1; + } + if (sev_get_firmware(dev, &firmware) == -ENOENT) { dev_dbg(dev, "No SEV firmware file present\n"); return -1; @@ -776,6 +781,14 @@ static int sev_update_firmware(struct device *dev) data->len = firmware->size; ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + + /* + * A quirk for fixing the committed TCB version, when upgrading from + * earlier firmware version than 1.50. + */ + if (!ret && !sev_version_greater_or_equal(1, 50)) + ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); + if (ret) dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); else @@ -1285,8 +1298,7 @@ void sev_pci_init(void) if (sev_get_api_version()) goto err; - if (sev_version_greater_or_equal(0, 15) && - sev_update_firmware(sev->dev) == 0) + if (sev_update_firmware(sev->dev) == 0) sev_get_api_version(); /* If an init_ex_path is provided rely on INIT_EX for PSP initialization -- cgit v1.2.3 From 450df3ecef4df9d94f7ca0d68527944418b2d1ed Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 11 Aug 2022 20:07:05 +0800 Subject: crypto: cavium - Fix comment typo The double `the' is duplicated in the comment, remove one. Signed-off-by: Jason Wang Signed-off-by: Herbert Xu --- drivers/crypto/cavium/cpt/cpt_hw_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h index 8ec6edc69f3f..ae4791a8ec4a 100644 --- a/drivers/crypto/cavium/cpt/cpt_hw_types.h +++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h @@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s { * Word0 * reserved_20_63:44 [63:20] Reserved. * dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add - * to the CPT instruction doorbell count. Readback value is the the + * to the CPT instruction doorbell count. Readback value is the * current number of pending doorbell requests. If counter overflows * CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to * zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF], -- cgit v1.2.3 From 4f336045276b26c1620d0cb64d4af39ec508f436 Mon Sep 17 00:00:00 2001 From: Yang Shen Date: Sat, 13 Aug 2022 17:57:52 +0800 Subject: crypto: hisilicon/zip - optimization for performance 1.Remove some useless steps during doing requests. 2.Adjust the possibility of branch prediction. Signed-off-by: Yang Shen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 06a2d6e81ae9..6b3f8da150ad 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -183,7 +183,7 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type) int ret; ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); - if (ret != head_size) { + if (unlikely(ret != head_size)) { pr_err("the head size of buffer is wrong (%d)!\n", ret); return -ENOMEM; } @@ -193,11 +193,11 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type) static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) { - if (!acomp_req->src || !acomp_req->slen) + if (unlikely(!acomp_req->src || !acomp_req->slen)) return -EINVAL; - if (req_type == HZIP_ALG_TYPE_GZIP && - acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT) + if (unlikely(req_type == HZIP_ALG_TYPE_GZIP && + acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) return -EINVAL; switch (req_type) { @@ -230,6 +230,8 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, } set_bit(req_id, req_q->req_bitmap); + write_unlock(&req_q->req_lock); + req_cache = q + req_id; req_cache->req_id = req_id; req_cache->req = req; @@ -242,8 +244,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, req_cache->dskip = 0; } - write_unlock(&req_q->req_lock); - return req_cache; } @@ -254,7 +254,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, write_lock(&req_q->req_lock); clear_bit(req->req_id, req_q->req_bitmap); - memset(req, 0, sizeof(struct hisi_zip_req)); write_unlock(&req_q->req_lock); } @@ -339,7 +338,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_sqe zip_sqe; int ret; - if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) + if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)) return -EINVAL; req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, @@ -365,7 +364,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, /* send command to start a task */ atomic64_inc(&dfx->send_cnt); ret = hisi_qp_send(qp, &zip_sqe); - if (ret < 0) { + if (unlikely(ret < 0)) { atomic64_inc(&dfx->send_busy_cnt); ret = -EAGAIN; dev_dbg_ratelimited(dev, "failed to send request!\n"); @@ -417,7 +416,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) atomic64_inc(&dfx->recv_cnt); status = ops->get_status(sqe); - if (status != 0 && status != HZIP_NC_ERR) { + if (unlikely(status != 0 && status != HZIP_NC_ERR)) { dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, sqe->produced); @@ -450,7 +449,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req) /* let's output compression head now */ head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); - if (head_size < 0) { + if (unlikely(head_size < 0)) { dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", head_size); return head_size; @@ -461,7 +460,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req) return PTR_ERR(req); ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { + if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); hisi_zip_remove_req(qp_ctx, req); } @@ -478,7 +477,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) int head_size, ret; head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); - if (head_size < 0) { + if (unlikely(head_size < 0)) { dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", head_size); return head_size; @@ -489,7 +488,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) return PTR_ERR(req); ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { + if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", ret); hisi_zip_remove_req(qp_ctx, req); -- cgit v1.2.3 From 6d9a899557c8751372da99d137eaaa9cdbe81f41 Mon Sep 17 00:00:00 2001 From: Yang Shen Date: Sat, 13 Aug 2022 18:19:39 +0800 Subject: crypto: hisilicon/zip - some misc cleanup Some cleanup for code: 1. Change names for easy to understand. 2. Unify the variables type. 3. Use the right return value. Signed-off-by: Yang Shen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip.h | 2 +- drivers/crypto/hisilicon/zip/zip_crypto.c | 30 +++++++++++++++--------------- drivers/crypto/hisilicon/zip/zip_main.c | 10 +++++++--- 3 files changed, 23 insertions(+), 19 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 3dfd3bac5a33..f289656e9ac0 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -81,7 +81,7 @@ struct hisi_zip_sqe { u32 rsvd1[4]; }; -int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); +int zip_create_qps(struct hisi_qp **qps, int qp_num, int node); int hisi_zip_register_to_crypto(struct hisi_qm *qm); void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 6b3f8da150ad..a6c914d527eb 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -135,7 +135,7 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); -static u16 get_extra_field_size(const u8 *start) +static u32 get_extra_field_size(const u8 *start) { return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; } @@ -167,7 +167,7 @@ static u32 __get_gzip_head_size(const u8 *src) return size; } -static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) +static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl) { char buf[HZIP_GZIP_HEAD_BUF]; @@ -497,7 +497,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) return ret; } -static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, +static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx, int alg_type, int req_type) { struct device *dev = &qp->qm->pdev->dev; @@ -505,7 +505,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, qp->req_type = req_type; qp->alg_type = alg_type; - qp->qp_ctx = ctx; + qp->qp_ctx = qp_ctx; ret = hisi_qm_start_qp(qp, 0); if (ret < 0) { @@ -513,15 +513,15 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, return ret; } - ctx->qp = qp; + qp_ctx->qp = qp; return 0; } -static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) +static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx) { - hisi_qm_stop_qp(ctx->qp); - hisi_qm_free_qps(&ctx->qp, 1); + hisi_qm_stop_qp(qp_ctx->qp); + hisi_qm_free_qps(&qp_ctx->qp, 1); } static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { @@ -593,7 +593,7 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) { int i; - for (i = 1; i >= 0; i--) + for (i = 0; i < HZIP_CTX_Q_NUM; i++) hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); } @@ -612,7 +612,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) if (i == 0) return ret; - goto err_free_loop0; + goto err_free_comp_q; } rwlock_init(&req_q->req_lock); @@ -621,19 +621,19 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) if (!req_q->q) { ret = -ENOMEM; if (i == 0) - goto err_free_bitmap; + goto err_free_comp_bitmap; else - goto err_free_loop1; + goto err_free_decomp_bitmap; } } return 0; -err_free_loop1: +err_free_decomp_bitmap: bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); -err_free_loop0: +err_free_comp_q: kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q); -err_free_bitmap: +err_free_comp_bitmap: bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); return ret; } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index c3303d99acac..04c8a4c65d77 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -586,8 +586,9 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp, return len; tbuf[len] = '\0'; - if (kstrtoul(tbuf, 0, &val)) - return -EFAULT; + ret = kstrtoul(tbuf, 0, &val); + if (ret) + return ret; ret = hisi_qm_get_dfx_access(qm); if (ret) @@ -976,7 +977,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) qm->err_ini = &hisi_zip_err_ini; qm->err_ini->err_info_init(qm); - hisi_zip_set_user_domain_and_cache(qm); + ret = hisi_zip_set_user_domain_and_cache(qm); + if (ret) + return ret; + hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); -- cgit v1.2.3 From 582b05bba481d5798ef884f1396285ab47e426e1 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 13 Aug 2022 18:34:21 +0800 Subject: crypto: hisilicon/hpre - change return type of hpre_cluster_inqry_write() hpre_cluster_inqry_write() always returns 0. So change the type of hpre_cluster_inqry_write() to void. Signed-off-by: Weili Qian Signed-off-by: Yang Shen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 9d529df0eab9..8e0e87cede6b 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -708,7 +708,7 @@ static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); } -static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) +static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) { struct hisi_qm *qm = hpre_file_to_qm(file); int cluster_index = file->index - HPRE_CLUSTER_CTRL; @@ -716,8 +716,6 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) HPRE_CLSTR_ADDR_INTRVL; writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); - - return 0; } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, @@ -792,9 +790,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, goto err_input; break; case HPRE_CLUSTER_CTRL: - ret = hpre_cluster_inqry_write(file, val); - if (ret) - goto err_input; + hpre_cluster_inqry_write(file, val); break; default: ret = -EINVAL; -- cgit v1.2.3 From 116be08f6e4e385733d42360a33c3d883d2dd702 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 13 Aug 2022 18:34:52 +0800 Subject: crypto: hisilicon/qm - fix missing destroy qp_idr In the function hisi_qm_memory_init(), if resource alloc fails after idr_init, the initialized qp_idr needs to be destroyed. Signed-off-by: Weili Qian Signed-off-by: Yang Shen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 9701434a8ef3..728dcee0b1ff 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -6139,8 +6139,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) GFP_ATOMIC); dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); if (!qm->qdma.va) { - ret = -ENOMEM; - goto err_alloc_qdma; + ret = -ENOMEM; + goto err_destroy_idr; } QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); @@ -6156,7 +6156,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) err_alloc_qp_array: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); -err_alloc_qdma: +err_destroy_idr: + idr_destroy(&qm->qp_idr); kfree(qm->factor); return ret; -- cgit v1.2.3 From 1129d2d533195993aa0b0d0cd1c868950e01770d Mon Sep 17 00:00:00 2001 From: Junchong Pan Date: Sat, 13 Aug 2022 18:35:15 +0800 Subject: crypto: hisilicon/qm - remove unneeded data storage The dump_show() is used to output hardware information for error locating. It is not need to apply for memory to temporarily store the converted data. It can directly output the data. Therefore, remove some unnecessary code. Signed-off-by: Junchong Pan Signed-off-by: Yang Shen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 80 +++++++++++-------------------------------- 1 file changed, 20 insertions(+), 60 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 728dcee0b1ff..927d9fa7b6b2 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1857,39 +1857,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, kfree(ctx_addr); } -static int dump_show(struct hisi_qm *qm, void *info, +static void dump_show(struct hisi_qm *qm, void *info, unsigned int info_size, char *info_name) { struct device *dev = &qm->pdev->dev; - u8 *info_buf, *info_curr = info; + u8 *info_curr = info; u32 i; #define BYTE_PER_DW 4 - info_buf = kzalloc(info_size, GFP_KERNEL); - if (!info_buf) - return -ENOMEM; - - for (i = 0; i < info_size; i++, info_curr++) { - if (i % BYTE_PER_DW == 0) - info_buf[i + 3UL] = *info_curr; - else if (i % BYTE_PER_DW == 1) - info_buf[i + 1UL] = *info_curr; - else if (i % BYTE_PER_DW == 2) - info_buf[i - 1] = *info_curr; - else if (i % BYTE_PER_DW == 3) - info_buf[i - 3] = *info_curr; - } - dev_info(dev, "%s DUMP\n", info_name); - for (i = 0; i < info_size; i += BYTE_PER_DW) { + for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) { pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, - info_buf[i], info_buf[i + 1UL], - info_buf[i + 2UL], info_buf[i + 3UL]); + *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr)); } - - kfree(info_buf); - - return 0; } static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) @@ -1929,23 +1909,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s) if (qm->sqc) { sqc_curr = qm->sqc + qp_id; - ret = dump_show(qm, sqc_curr, sizeof(*sqc), - "SOFT SQC"); - if (ret) - dev_info(dev, "Show soft sqc failed!\n"); + dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); } up_read(&qm->qps_lock); - goto err_free_ctx; + goto free_ctx; } - ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); - if (ret) - dev_info(dev, "Show hw sqc failed!\n"); + dump_show(qm, sqc, sizeof(*sqc), "SQC"); -err_free_ctx: +free_ctx: qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); - return ret; + return 0; } static int qm_cqc_dump(struct hisi_qm *qm, const char *s) @@ -1975,23 +1950,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s) if (qm->cqc) { cqc_curr = qm->cqc + qp_id; - ret = dump_show(qm, cqc_curr, sizeof(*cqc), - "SOFT CQC"); - if (ret) - dev_info(dev, "Show soft cqc failed!\n"); + dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); } up_read(&qm->qps_lock); - goto err_free_ctx; + goto free_ctx; } - ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); - if (ret) - dev_info(dev, "Show hw cqc failed!\n"); + dump_show(qm, cqc, sizeof(*cqc), "CQC"); -err_free_ctx: +free_ctx: qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); - return ret; + return 0; } static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, @@ -2015,9 +1985,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, if (ret) goto err_free_ctx; - ret = dump_show(qm, xeqc, size, name); - if (ret) - dev_info(dev, "Show hw %s failed!\n", name); + dump_show(qm, xeqc, size, name); err_free_ctx: qm_ctx_free(qm, size, xeqc, &xeqc_dma); @@ -2066,7 +2034,6 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, static int qm_sq_dump(struct hisi_qm *qm, char *s) { - struct device *dev = &qm->pdev->dev; void *sqe, *sqe_curr; struct hisi_qp *qp; u32 qp_id, sqe_id; @@ -2086,18 +2053,15 @@ static int qm_sq_dump(struct hisi_qm *qm, char *s) memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, qm->debug.sqe_mask_len); - ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); - if (ret) - dev_info(dev, "Show sqe failed!\n"); + dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); kfree(sqe); - return ret; + return 0; } static int qm_cq_dump(struct hisi_qm *qm, char *s) { - struct device *dev = &qm->pdev->dev; struct qm_cqe *cqe_curr; struct hisi_qp *qp; u32 qp_id, cqe_id; @@ -2109,11 +2073,9 @@ static int qm_cq_dump(struct hisi_qm *qm, char *s) qp = &qm->qp_array[qp_id]; cqe_curr = qp->cqe + cqe_id; - ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); - if (ret) - dev_info(dev, "Show cqe failed!\n"); + dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); - return ret; + return 0; } static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, @@ -2150,9 +2112,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, goto err_unlock; } - ret = dump_show(qm, xeqe, size, name); - if (ret) - dev_info(dev, "Show %s failed!\n", name); + dump_show(qm, xeqe, size, name); err_unlock: up_read(&qm->qps_lock); -- cgit v1.2.3 From 6a088a2cbcaf40747cf2881df47f4f5d65acd7ab Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 13 Aug 2022 18:35:45 +0800 Subject: crypto: hisilicon/qm - remove unneeded hardware cache write back Data in the hardware cache needs to be written back to the memory before the queue memory is released. Currently, the queue memory is applied for when the driver is loaded and released when the driver is removed. Therefore, the hardware cache does not need to be written back when process puts queue. Signed-off-by: Weili Qian Signed-off-by: Yang Shen Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 927d9fa7b6b2..b2e7abff1b8a 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3246,7 +3246,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; - hisi_qm_cache_wb(qp->qm); hisi_qm_release_qp(qp); } -- cgit v1.2.3 From 90cb3ca2fa4f5c4c3fbeb9014584b69b1ba26242 Mon Sep 17 00:00:00 2001 From: Tuo Cao <91tuocao@gmail.com> Date: Sun, 14 Aug 2022 21:00:54 +0800 Subject: crypto: artpec6 - move spin_lock_bh to spin_lock in tasklet it is unnecessary to call spin_lock_bh in a tasklet. Signed-off-by: Tuo Cao <91tuocao@gmail.com> Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 9ad188cffd0d..b4820594ab80 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data) return; } - spin_lock_bh(&ac->queue_lock); + spin_lock(&ac->queue_lock); list_for_each_entry_safe(req, n, &ac->pending, list) { struct artpec6_crypto_dma_descriptors *dma = req->dma; @@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data) artpec6_crypto_process_queue(ac, &complete_in_progress); - spin_unlock_bh(&ac->queue_lock); + spin_unlock(&ac->queue_lock); /* Perform the completion callbacks without holding the queue lock * to allow new request submissions from the callbacks. -- cgit v1.2.3 From d8da2da21fdb1f5964c11c00f0cc84fb0edf31d0 Mon Sep 17 00:00:00 2001 From: Jacky Li Date: Tue, 16 Aug 2022 19:32:08 +0000 Subject: crypto: ccp - Initialize PSP when reading psp data file failed Currently the OS fails the PSP initialization when the file specified at 'init_ex_path' does not exist or has invalid content. However the SEV spec just requires users to allocate 32KB of 0xFF in the file, which can be taken care of by the OS easily. To improve the robustness during the PSP init, leverage the retry mechanism and continue the init process: Before the first INIT_EX call, if the content is invalid or missing, continue the process by feeding those contents into PSP instead of aborting. PSP will then override it with 32KB 0xFF and return SEV_RET_SECURE_DATA_INVALID status code. In the second INIT_EX call, this 32KB 0xFF content will then be fed and PSP will write the valid data to the file. In order to do this, sev_read_init_ex_file should only be called once for the first INIT_EX call. Calling it again for the second INIT_EX call will cause the invalid file content overwriting the valid 32KB 0xFF data provided by PSP in the first INIT_EX call. Co-developed-by: Peter Gonda Signed-off-by: Peter Gonda Signed-off-by: Jacky Li Reported-by: Alper Gun Acked-by: David Rientjes Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index b292641c8a99..8512101f0bdf 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -211,18 +211,24 @@ static int sev_read_init_ex_file(void) if (IS_ERR(fp)) { int ret = PTR_ERR(fp); - dev_err(sev->dev, - "SEV: could not open %s for read, error %d\n", - init_ex_path, ret); + if (ret == -ENOENT) { + dev_info(sev->dev, + "SEV: %s does not exist and will be created later.\n", + init_ex_path); + ret = 0; + } else { + dev_err(sev->dev, + "SEV: could not open %s for read, error %d\n", + init_ex_path, ret); + } return ret; } nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); if (nread != NV_LENGTH) { - dev_err(sev->dev, - "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n", + dev_info(sev->dev, + "SEV: could not read %u bytes to non volatile memory area, ret %ld\n", NV_LENGTH, nread); - return -EIO; } dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); @@ -410,17 +416,12 @@ static int __sev_init_locked(int *error) static int __sev_init_ex_locked(int *error) { struct sev_data_init_ex data; - int ret; memset(&data, 0, sizeof(data)); data.length = sizeof(data); data.nv_address = __psp_pa(sev_init_ex_buffer); data.nv_len = NV_LENGTH; - ret = sev_read_init_ex_file(); - if (ret) - return ret; - if (sev_es_tmr) { /* * Do not include the encryption mask on the physical @@ -439,7 +440,7 @@ static int __sev_platform_init_locked(int *error) { struct psp_device *psp = psp_master; struct sev_device *sev; - int rc, psp_ret = -1; + int rc = 0, psp_ret = -1; int (*init_function)(int *error); if (!psp || !psp->sev_data) @@ -450,8 +451,15 @@ static int __sev_platform_init_locked(int *error) if (sev->state == SEV_STATE_INIT) return 0; - init_function = sev_init_ex_buffer ? __sev_init_ex_locked : - __sev_init_locked; + if (sev_init_ex_buffer) { + init_function = __sev_init_ex_locked; + rc = sev_read_init_ex_file(); + if (rc) + return rc; + } else { + init_function = __sev_init_locked; + } + rc = init_function(&psp_ret); if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { /* -- cgit v1.2.3 From efb4b01c1c993d245e6608076684ff2162cf9dc6 Mon Sep 17 00:00:00 2001 From: Jacky Li Date: Tue, 16 Aug 2022 19:32:09 +0000 Subject: crypto: ccp - Fail the PSP initialization when writing psp data file failed Currently the OS continues the PSP initialization when there is a write failure to the init_ex_file. Therefore, the userspace would be told that SEV is properly INIT'd even though the psp data file is not updated. This is problematic because later when asked for the SEV data, the OS won't be able to provide it. Fixes: 3d725965f836 ("crypto: ccp - Add SEV_INIT_EX support") Reported-by: Peter Gonda Reported-by: kernel test robot Signed-off-by: Jacky Li Acked-by: David Rientjes Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 8512101f0bdf..06fc7156c04f 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -237,7 +237,7 @@ static int sev_read_init_ex_file(void) return 0; } -static void sev_write_init_ex_file(void) +static int sev_write_init_ex_file(void) { struct sev_device *sev = psp_master->sev_data; struct file *fp; @@ -247,14 +247,16 @@ static void sev_write_init_ex_file(void) lockdep_assert_held(&sev_cmd_mutex); if (!sev_init_ex_buffer) - return; + return 0; fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); if (IS_ERR(fp)) { + int ret = PTR_ERR(fp); + dev_err(sev->dev, - "SEV: could not open file for write, error %ld\n", - PTR_ERR(fp)); - return; + "SEV: could not open file for write, error %d\n", + ret); + return ret; } nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); @@ -265,18 +267,20 @@ static void sev_write_init_ex_file(void) dev_err(sev->dev, "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", NV_LENGTH, nwrite); - return; + return -EIO; } dev_dbg(sev->dev, "SEV: write successful to NV file\n"); + + return 0; } -static void sev_write_init_ex_file_if_required(int cmd_id) +static int sev_write_init_ex_file_if_required(int cmd_id) { lockdep_assert_held(&sev_cmd_mutex); if (!sev_init_ex_buffer) - return; + return 0; /* * Only a few platform commands modify the SPI/NV area, but none of the @@ -291,10 +295,10 @@ static void sev_write_init_ex_file_if_required(int cmd_id) case SEV_CMD_PEK_GEN: break; default: - return; + return 0; } - sev_write_init_ex_file(); + return sev_write_init_ex_file(); } static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) @@ -367,7 +371,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) cmd, reg & PSP_CMDRESP_ERR_MASK); ret = -EIO; } else { - sev_write_init_ex_file_if_required(cmd); + ret = sev_write_init_ex_file_if_required(cmd); } print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, -- cgit v1.2.3 From 108713a713c7e4b7d07e6cd9b808503d5bb7089b Mon Sep 17 00:00:00 2001 From: Neal Liu Date: Thu, 18 Aug 2022 11:59:52 +0800 Subject: crypto: aspeed - Add HACE hash driver Hash and Crypto Engine (HACE) is designed to accelerate the throughput of hash data digest, encryption, and decryption. Basically, HACE can be divided into two independently engines - Hash Engine and Crypto Engine. This patch aims to add HACE hash engine driver for hash accelerator. Signed-off-by: Neal Liu Signed-off-by: Johnny Huang Reviewed-by: Dhananjay Phadke Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 + drivers/crypto/Makefile | 1 + drivers/crypto/aspeed/Kconfig | 32 + drivers/crypto/aspeed/Makefile | 6 + drivers/crypto/aspeed/aspeed-hace-hash.c | 1389 ++++++++++++++++++++++++++++++ drivers/crypto/aspeed/aspeed-hace.c | 206 +++++ drivers/crypto/aspeed/aspeed-hace.h | 186 ++++ 7 files changed, 1821 insertions(+) create mode 100644 drivers/crypto/aspeed/Kconfig create mode 100644 drivers/crypto/aspeed/Makefile create mode 100644 drivers/crypto/aspeed/aspeed-hace-hash.c create mode 100644 drivers/crypto/aspeed/aspeed-hace.c create mode 100644 drivers/crypto/aspeed/aspeed-hace.h (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 3e6aa319920b..f6fe1945edd6 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -818,5 +818,6 @@ config CRYPTO_DEV_SA2UL acceleration for cryptographic algorithms on these devices. source "drivers/crypto/keembay/Kconfig" +source "drivers/crypto/aspeed/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index f81703a86b98..116de173a66c 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/ +obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig new file mode 100644 index 000000000000..03e3fc61498e --- /dev/null +++ b/drivers/crypto/aspeed/Kconfig @@ -0,0 +1,32 @@ +config CRYPTO_DEV_ASPEED + tristate "Support for Aspeed cryptographic engine driver" + depends on ARCH_ASPEED + help + Hash and Crypto Engine (HACE) is designed to accelerate the + throughput of hash data digest, encryption and decryption. + + Select y here to have support for the cryptographic driver + available on Aspeed SoC. + +config CRYPTO_DEV_ASPEED_DEBUG + bool "Enable Aspeed crypto debug messages" + depends on CRYPTO_DEV_ASPEED + help + Print Aspeed crypto debugging messages if you use this + option to ask for those messages. + Avoid enabling this option for production build to + minimize driver timing. + +config CRYPTO_DEV_ASPEED_HACE_HASH + bool "Enable Aspeed Hash & Crypto Engine (HACE) hash" + depends on CRYPTO_DEV_ASPEED + select CRYPTO_ENGINE + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_HMAC + help + Select here to enable Aspeed Hash & Crypto Engine (HACE) + hash driver. + Supports multiple message digest standards, including + SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on. diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile new file mode 100644 index 000000000000..8bc8d4fed5a9 --- /dev/null +++ b/drivers/crypto/aspeed/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o +aspeed_crypto-objs := aspeed-hace.o \ + $(hace-hash-y) + +obj-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) += aspeed-hace-hash.o +hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c new file mode 100644 index 000000000000..0a44ffc0e13b --- /dev/null +++ b/drivers/crypto/aspeed/aspeed-hace-hash.c @@ -0,0 +1,1389 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021 Aspeed Technology Inc. + */ + +#include "aspeed-hace.h" + +#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG +#define AHASH_DBG(h, fmt, ...) \ + dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#else +#define AHASH_DBG(h, fmt, ...) \ + dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#endif + +/* Initialization Vectors for SHA-family */ +static const __be32 sha1_iv[8] = { + cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), + cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), + cpu_to_be32(SHA1_H4), 0, 0, 0 +}; + +static const __be32 sha224_iv[8] = { + cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), + cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), + cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), + cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), +}; + +static const __be32 sha256_iv[8] = { + cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), + cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), + cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), + cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), +}; + +static const __be64 sha384_iv[8] = { + cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), + cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), + cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), + cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7) +}; + +static const __be64 sha512_iv[8] = { + cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), + cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), + cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), + cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7) +}; + +static const __be32 sha512_224_iv[16] = { + cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL), + cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL), + cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL), + cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL), + cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL), + cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL), + cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL), + cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL) +}; + +static const __be32 sha512_256_iv[16] = { + cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL), + cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL), + cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL), + cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL), + cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL), + cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL), + cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL), + cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL) +}; + +/* The purpose of this padding is to ensure that the padded message is a + * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). + * The bit "1" is appended at the end of the message followed by + * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or + * 128 bits block (SHA384/SHA512) equals to the message length in bits + * is appended. + * + * For SHA1/SHA224/SHA256, padlen is calculated as followed: + * - if message length < 56 bytes then padlen = 56 - message length + * - else padlen = 64 + 56 - message length + * + * For SHA384/SHA512, padlen is calculated as followed: + * - if message length < 112 bytes then padlen = 112 - message length + * - else padlen = 128 + 112 - message length + */ +static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev, + struct aspeed_sham_reqctx *rctx) +{ + unsigned int index, padlen; + __be64 bits[2]; + + AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags); + + switch (rctx->flags & SHA_FLAGS_MASK) { + case SHA_FLAGS_SHA1: + case SHA_FLAGS_SHA224: + case SHA_FLAGS_SHA256: + bits[0] = cpu_to_be64(rctx->digcnt[0] << 3); + index = rctx->bufcnt & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); + *(rctx->buffer + rctx->bufcnt) = 0x80; + memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1); + memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8); + rctx->bufcnt += padlen + 8; + break; + default: + bits[1] = cpu_to_be64(rctx->digcnt[0] << 3); + bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 | + rctx->digcnt[0] >> 61); + index = rctx->bufcnt & 0x7f; + padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); + *(rctx->buffer + rctx->bufcnt) = 0x80; + memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1); + memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16); + rctx->bufcnt += padlen + 16; + break; + } +} + +/* + * Prepare DMA buffer before hardware engine + * processing. + */ +static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + int length, remain; + + length = rctx->total + rctx->bufcnt; + remain = length % rctx->block_size; + + AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain); + + if (rctx->bufcnt) + memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt); + + if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) { + scatterwalk_map_and_copy(hash_engine->ahash_src_addr + + rctx->bufcnt, rctx->src_sg, + rctx->offset, rctx->total - remain, 0); + rctx->offset += rctx->total - remain; + + } else { + dev_warn(hace_dev->dev, "Hash data length is too large\n"); + return -EINVAL; + } + + scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, + rctx->offset, remain, 0); + + rctx->bufcnt = remain; + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, + SHA512_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); + return -ENOMEM; + } + + hash_engine->src_length = length - remain; + hash_engine->src_dma = hash_engine->ahash_src_dma_addr; + hash_engine->digest_dma = rctx->digest_dma_addr; + + return 0; +} + +/* + * Prepare DMA buffer as SG list buffer before + * hardware engine processing. + */ +static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct aspeed_sg_list *src_list; + struct scatterlist *s; + int length, remain, sg_len, i; + int rc = 0; + + remain = (rctx->total + rctx->bufcnt) % rctx->block_size; + length = rctx->total + rctx->bufcnt - remain; + + AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x, %s:0x%x\n", + "rctx total", rctx->total, "bufcnt", rctx->bufcnt, + "length", length, "remain", remain); + + sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, + DMA_TO_DEVICE); + if (!sg_len) { + dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); + rc = -ENOMEM; + goto end; + } + + src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr; + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, + SHA512_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); + rc = -ENOMEM; + goto free_src_sg; + } + + if (rctx->bufcnt != 0) { + rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, + rctx->buffer, + rctx->block_size * 2, + DMA_TO_DEVICE); + if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { + dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); + rc = -ENOMEM; + goto free_rctx_digest; + } + + src_list[0].phy_addr = rctx->buffer_dma_addr; + src_list[0].len = rctx->bufcnt; + length -= src_list[0].len; + + /* Last sg list */ + if (length == 0) + src_list[0].len |= HASH_SG_LAST_LIST; + + src_list[0].phy_addr = cpu_to_le32(src_list[0].phy_addr); + src_list[0].len = cpu_to_le32(src_list[0].len); + src_list++; + } + + if (length != 0) { + for_each_sg(rctx->src_sg, s, sg_len, i) { + src_list[i].phy_addr = sg_dma_address(s); + + if (length > sg_dma_len(s)) { + src_list[i].len = sg_dma_len(s); + length -= sg_dma_len(s); + + } else { + /* Last sg list */ + src_list[i].len = length; + src_list[i].len |= HASH_SG_LAST_LIST; + length = 0; + } + + src_list[i].phy_addr = cpu_to_le32(src_list[i].phy_addr); + src_list[i].len = cpu_to_le32(src_list[i].len); + } + } + + if (length != 0) { + rc = -EINVAL; + goto free_rctx_buffer; + } + + rctx->offset = rctx->total - remain; + hash_engine->src_length = rctx->total + rctx->bufcnt - remain; + hash_engine->src_dma = hash_engine->ahash_src_dma_addr; + hash_engine->digest_dma = rctx->digest_dma_addr; + + return 0; + +free_rctx_buffer: + if (rctx->bufcnt != 0) + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, + rctx->block_size * 2, DMA_TO_DEVICE); +free_rctx_digest: + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +free_src_sg: + dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, + DMA_TO_DEVICE); +end: + return rc; +} + +static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + + AHASH_DBG(hace_dev, "\n"); + + hash_engine->flags &= ~CRYPTO_FLAGS_BUSY; + + crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0); + + return 0; +} + +/* + * Copy digest to the corresponding request result. + * This function will be called at final() stage. + */ +static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + + AHASH_DBG(hace_dev, "\n"); + + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); + + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, + rctx->block_size * 2, DMA_TO_DEVICE); + + memcpy(req->result, rctx->digest, rctx->digsize); + + return aspeed_ahash_complete(hace_dev); +} + +/* + * Trigger hardware engines to do the math. + */ +static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev, + aspeed_hace_fn_t resume) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + + AHASH_DBG(hace_dev, "src_dma:0x%x, digest_dma:0x%x, length:0x%x\n", + hash_engine->src_dma, hash_engine->digest_dma, + hash_engine->src_length); + + rctx->cmd |= HASH_CMD_INT_ENABLE; + hash_engine->resume = resume; + + ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC); + ast_hace_write(hace_dev, hash_engine->digest_dma, + ASPEED_HACE_HASH_DIGEST_BUFF); + ast_hace_write(hace_dev, hash_engine->digest_dma, + ASPEED_HACE_HASH_KEY_BUFF); + ast_hace_write(hace_dev, hash_engine->src_length, + ASPEED_HACE_HASH_DATA_LEN); + + /* Memory barrier to ensure all data setup before engine starts */ + mb(); + + ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD); + + return -EINPROGRESS; +} + +/* + * HMAC resume aims to do the second pass produces + * the final HMAC code derived from the inner hash + * result and the outer key. + */ +static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_sha_hmac_ctx *bctx = tctx->base; + int rc = 0; + + AHASH_DBG(hace_dev, "\n"); + + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); + + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, + rctx->block_size * 2, DMA_TO_DEVICE); + + /* o key pad + hash sum 1 */ + memcpy(rctx->buffer, bctx->opad, rctx->block_size); + memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize); + + rctx->bufcnt = rctx->block_size + rctx->digsize; + rctx->digcnt[0] = rctx->block_size + rctx->digsize; + + aspeed_ahash_fill_padding(hace_dev, rctx); + memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize); + + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, + SHA512_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); + rc = -ENOMEM; + goto end; + } + + rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer, + rctx->block_size * 2, + DMA_TO_DEVICE); + if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { + dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); + rc = -ENOMEM; + goto free_rctx_digest; + } + + hash_engine->src_dma = rctx->buffer_dma_addr; + hash_engine->src_length = rctx->bufcnt; + hash_engine->digest_dma = rctx->digest_dma_addr; + + return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); + +free_rctx_digest: + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +end: + return rc; +} + +static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + int rc = 0; + + AHASH_DBG(hace_dev, "\n"); + + aspeed_ahash_fill_padding(hace_dev, rctx); + + rctx->digest_dma_addr = dma_map_single(hace_dev->dev, + rctx->digest, + SHA512_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { + dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); + rc = -ENOMEM; + goto end; + } + + rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, + rctx->buffer, + rctx->block_size * 2, + DMA_TO_DEVICE); + if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { + dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); + rc = -ENOMEM; + goto free_rctx_digest; + } + + hash_engine->src_dma = rctx->buffer_dma_addr; + hash_engine->src_length = rctx->bufcnt; + hash_engine->digest_dma = rctx->digest_dma_addr; + + if (rctx->flags & SHA_FLAGS_HMAC) + return aspeed_hace_ahash_trigger(hace_dev, + aspeed_ahash_hmac_resume); + + return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); + +free_rctx_digest: + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +end: + return rc; +} + +static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + + AHASH_DBG(hace_dev, "\n"); + + dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, + DMA_TO_DEVICE); + + if (rctx->bufcnt != 0) + dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, + rctx->block_size * 2, + DMA_TO_DEVICE); + + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); + + scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset, + rctx->total - rctx->offset, 0); + + rctx->bufcnt = rctx->total - rctx->offset; + rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL; + + if (rctx->flags & SHA_FLAGS_FINUP) + return aspeed_ahash_req_final(hace_dev); + + return aspeed_ahash_complete(hace_dev); +} + +static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + + AHASH_DBG(hace_dev, "\n"); + + dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, + SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); + + if (rctx->flags & SHA_FLAGS_FINUP) + return aspeed_ahash_req_final(hace_dev); + + return aspeed_ahash_complete(hace_dev); +} + +static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + aspeed_hace_fn_t resume; + int ret; + + AHASH_DBG(hace_dev, "\n"); + + if (hace_dev->version == AST2600_VERSION) { + rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL; + resume = aspeed_ahash_update_resume_sg; + + } else { + resume = aspeed_ahash_update_resume; + } + + ret = hash_engine->dma_prepare(hace_dev); + if (ret) + return ret; + + return aspeed_hace_ahash_trigger(hace_dev, resume); +} + +static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev, + struct ahash_request *req) +{ + return crypto_transfer_hash_request_to_engine( + hace_dev->crypt_engine_hash, req); +} + +static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + struct aspeed_engine_hash *hash_engine; + int ret = 0; + + hash_engine = &hace_dev->hash_engine; + hash_engine->flags |= CRYPTO_FLAGS_BUSY; + + if (rctx->op == SHA_OP_UPDATE) + ret = aspeed_ahash_req_update(hace_dev); + else if (rctx->op == SHA_OP_FINAL) + ret = aspeed_ahash_req_final(hace_dev); + + if (ret != -EINPROGRESS) + return ret; + + return 0; +} + +static int aspeed_ahash_prepare_request(struct crypto_engine *engine, + void *areq) +{ + struct ahash_request *req = ahash_request_cast(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + struct aspeed_engine_hash *hash_engine; + + hash_engine = &hace_dev->hash_engine; + hash_engine->req = req; + + if (hace_dev->version == AST2600_VERSION) + hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg; + else + hash_engine->dma_prepare = aspeed_ahash_dma_prepare; + + return 0; +} + +static int aspeed_sham_update(struct ahash_request *req) +{ + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + + AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes); + + rctx->total = req->nbytes; + rctx->src_sg = req->src; + rctx->offset = 0; + rctx->src_nents = sg_nents(req->src); + rctx->op = SHA_OP_UPDATE; + + rctx->digcnt[0] += rctx->total; + if (rctx->digcnt[0] < rctx->total) + rctx->digcnt[1]++; + + if (rctx->bufcnt + rctx->total < rctx->block_size) { + scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, + rctx->src_sg, rctx->offset, + rctx->total, 0); + rctx->bufcnt += rctx->total; + + return 0; + } + + return aspeed_hace_hash_handle_queue(hace_dev, req); +} + +static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags, + const u8 *data, unsigned int len, u8 *out) +{ + SHASH_DESC_ON_STACK(shash, tfm); + + shash->tfm = tfm; + + return crypto_shash_digest(shash, data, len, out); +} + +static int aspeed_sham_final(struct ahash_request *req) +{ + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + + AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n", + req->nbytes, rctx->total); + rctx->op = SHA_OP_FINAL; + + return aspeed_hace_hash_handle_queue(hace_dev, req); +} + +static int aspeed_sham_finup(struct ahash_request *req) +{ + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + int rc1, rc2; + + AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes); + + rctx->flags |= SHA_FLAGS_FINUP; + + rc1 = aspeed_sham_update(req); + if (rc1 == -EINPROGRESS || rc1 == -EBUSY) + return rc1; + + /* + * final() has to be always called to cleanup resources + * even if update() failed, except EINPROGRESS + */ + rc2 = aspeed_sham_final(req); + + return rc1 ? : rc2; +} + +static int aspeed_sham_init(struct ahash_request *req) +{ + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + struct aspeed_sha_hmac_ctx *bctx = tctx->base; + + AHASH_DBG(hace_dev, "%s: digest size:%d\n", + crypto_tfm_alg_name(&tfm->base), + crypto_ahash_digestsize(tfm)); + + rctx->cmd = HASH_CMD_ACC_MODE; + rctx->flags = 0; + + switch (crypto_ahash_digestsize(tfm)) { + case SHA1_DIGEST_SIZE: + rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP; + rctx->flags |= SHA_FLAGS_SHA1; + rctx->digsize = SHA1_DIGEST_SIZE; + rctx->block_size = SHA1_BLOCK_SIZE; + rctx->sha_iv = sha1_iv; + rctx->ivsize = 32; + memcpy(rctx->digest, sha1_iv, rctx->ivsize); + break; + case SHA224_DIGEST_SIZE: + rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP; + rctx->flags |= SHA_FLAGS_SHA224; + rctx->digsize = SHA224_DIGEST_SIZE; + rctx->block_size = SHA224_BLOCK_SIZE; + rctx->sha_iv = sha224_iv; + rctx->ivsize = 32; + memcpy(rctx->digest, sha224_iv, rctx->ivsize); + break; + case SHA256_DIGEST_SIZE: + rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP; + rctx->flags |= SHA_FLAGS_SHA256; + rctx->digsize = SHA256_DIGEST_SIZE; + rctx->block_size = SHA256_BLOCK_SIZE; + rctx->sha_iv = sha256_iv; + rctx->ivsize = 32; + memcpy(rctx->digest, sha256_iv, rctx->ivsize); + break; + case SHA384_DIGEST_SIZE: + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 | + HASH_CMD_SHA_SWAP; + rctx->flags |= SHA_FLAGS_SHA384; + rctx->digsize = SHA384_DIGEST_SIZE; + rctx->block_size = SHA384_BLOCK_SIZE; + rctx->sha_iv = (const __be32 *)sha384_iv; + rctx->ivsize = 64; + memcpy(rctx->digest, sha384_iv, rctx->ivsize); + break; + case SHA512_DIGEST_SIZE: + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 | + HASH_CMD_SHA_SWAP; + rctx->flags |= SHA_FLAGS_SHA512; + rctx->digsize = SHA512_DIGEST_SIZE; + rctx->block_size = SHA512_BLOCK_SIZE; + rctx->sha_iv = (const __be32 *)sha512_iv; + rctx->ivsize = 64; + memcpy(rctx->digest, sha512_iv, rctx->ivsize); + break; + default: + dev_warn(tctx->hace_dev->dev, "digest size %d not support\n", + crypto_ahash_digestsize(tfm)); + return -EINVAL; + } + + rctx->bufcnt = 0; + rctx->total = 0; + rctx->digcnt[0] = 0; + rctx->digcnt[1] = 0; + + /* HMAC init */ + if (tctx->flags & SHA_FLAGS_HMAC) { + rctx->digcnt[0] = rctx->block_size; + rctx->bufcnt = rctx->block_size; + memcpy(rctx->buffer, bctx->ipad, rctx->block_size); + rctx->flags |= SHA_FLAGS_HMAC; + } + + return 0; +} + +static int aspeed_sha512s_init(struct ahash_request *req) +{ + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + struct aspeed_sha_hmac_ctx *bctx = tctx->base; + + AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm)); + + rctx->cmd = HASH_CMD_ACC_MODE; + rctx->flags = 0; + + switch (crypto_ahash_digestsize(tfm)) { + case SHA224_DIGEST_SIZE: + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 | + HASH_CMD_SHA_SWAP; + rctx->flags |= SHA_FLAGS_SHA512_224; + rctx->digsize = SHA224_DIGEST_SIZE; + rctx->block_size = SHA512_BLOCK_SIZE; + rctx->sha_iv = sha512_224_iv; + rctx->ivsize = 64; + memcpy(rctx->digest, sha512_224_iv, rctx->ivsize); + break; + case SHA256_DIGEST_SIZE: + rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 | + HASH_CMD_SHA_SWAP; + rctx->flags |= SHA_FLAGS_SHA512_256; + rctx->digsize = SHA256_DIGEST_SIZE; + rctx->block_size = SHA512_BLOCK_SIZE; + rctx->sha_iv = sha512_256_iv; + rctx->ivsize = 64; + memcpy(rctx->digest, sha512_256_iv, rctx->ivsize); + break; + default: + dev_warn(tctx->hace_dev->dev, "digest size %d not support\n", + crypto_ahash_digestsize(tfm)); + return -EINVAL; + } + + rctx->bufcnt = 0; + rctx->total = 0; + rctx->digcnt[0] = 0; + rctx->digcnt[1] = 0; + + /* HMAC init */ + if (tctx->flags & SHA_FLAGS_HMAC) { + rctx->digcnt[0] = rctx->block_size; + rctx->bufcnt = rctx->block_size; + memcpy(rctx->buffer, bctx->ipad, rctx->block_size); + rctx->flags |= SHA_FLAGS_HMAC; + } + + return 0; +} + +static int aspeed_sham_digest(struct ahash_request *req) +{ + return aspeed_sham_init(req) ? : aspeed_sham_finup(req); +} + +static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + struct aspeed_sha_hmac_ctx *bctx = tctx->base; + int ds = crypto_shash_digestsize(bctx->shash); + int bs = crypto_shash_blocksize(bctx->shash); + int err = 0; + int i; + + AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base), + keylen); + + if (keylen > bs) { + err = aspeed_sham_shash_digest(bctx->shash, + crypto_shash_get_flags(bctx->shash), + key, keylen, bctx->ipad); + if (err) + return err; + keylen = ds; + + } else { + memcpy(bctx->ipad, key, keylen); + } + + memset(bctx->ipad + keylen, 0, bs - keylen); + memcpy(bctx->opad, bctx->ipad, bs); + + for (i = 0; i < bs; i++) { + bctx->ipad[i] ^= HMAC_IPAD_VALUE; + bctx->opad[i] ^= HMAC_OPAD_VALUE; + } + + return err; +} + +static int aspeed_sham_cra_init(struct crypto_tfm *tfm) +{ + struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); + struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm); + struct aspeed_hace_alg *ast_alg; + + ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash); + tctx->hace_dev = ast_alg->hace_dev; + tctx->flags = 0; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct aspeed_sham_reqctx)); + + if (ast_alg->alg_base) { + /* hmac related */ + struct aspeed_sha_hmac_ctx *bctx = tctx->base; + + tctx->flags |= SHA_FLAGS_HMAC; + bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(bctx->shash)) { + dev_warn(ast_alg->hace_dev->dev, + "base driver '%s' could not be loaded.\n", + ast_alg->alg_base); + return PTR_ERR(bctx->shash); + } + } + + tctx->enginectx.op.do_one_request = aspeed_ahash_do_request; + tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request; + tctx->enginectx.op.unprepare_request = NULL; + + return 0; +} + +static void aspeed_sham_cra_exit(struct crypto_tfm *tfm) +{ + struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm); + struct aspeed_hace_dev *hace_dev = tctx->hace_dev; + + AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm)); + + if (tctx->flags & SHA_FLAGS_HMAC) { + struct aspeed_sha_hmac_ctx *bctx = tctx->base; + + crypto_free_shash(bctx->shash); + } +} + +static int aspeed_sham_export(struct ahash_request *req, void *out) +{ + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + + memcpy(out, rctx, sizeof(*rctx)); + + return 0; +} + +static int aspeed_sham_import(struct ahash_request *req, const void *in) +{ + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + + memcpy(rctx, in, sizeof(*rctx)); + + return 0; +} + +struct aspeed_hace_alg aspeed_ahash_algs[] = { + { + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "sha1", + .cra_driver_name = "aspeed-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "sha256", + .cra_driver_name = "aspeed-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "sha224", + .cra_driver_name = "aspeed-sha224", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg_base = "sha1", + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .setkey = aspeed_sham_setkey, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "aspeed-hmac-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + + sizeof(struct aspeed_sha_hmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg_base = "sha224", + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .setkey = aspeed_sham_setkey, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "aspeed-hmac-sha224", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + + sizeof(struct aspeed_sha_hmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg_base = "sha256", + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .setkey = aspeed_sham_setkey, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "aspeed-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + + sizeof(struct aspeed_sha_hmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, +}; + +struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { + { + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "sha384", + .cra_driver_name = "aspeed-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "sha512", + .cra_driver_name = "aspeed-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg.ahash = { + .init = aspeed_sha512s_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "sha512_224", + .cra_driver_name = "aspeed-sha512_224", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg.ahash = { + .init = aspeed_sha512s_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "sha512_256", + .cra_driver_name = "aspeed-sha512_256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg_base = "sha384", + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .setkey = aspeed_sham_setkey, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "aspeed-hmac-sha384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + + sizeof(struct aspeed_sha_hmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg_base = "sha512", + .alg.ahash = { + .init = aspeed_sham_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .setkey = aspeed_sham_setkey, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "aspeed-hmac-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + + sizeof(struct aspeed_sha_hmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg_base = "sha512_224", + .alg.ahash = { + .init = aspeed_sha512s_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .setkey = aspeed_sham_setkey, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "hmac(sha512_224)", + .cra_driver_name = "aspeed-hmac-sha512_224", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + + sizeof(struct aspeed_sha_hmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, + { + .alg_base = "sha512_256", + .alg.ahash = { + .init = aspeed_sha512s_init, + .update = aspeed_sham_update, + .final = aspeed_sham_final, + .finup = aspeed_sham_finup, + .digest = aspeed_sham_digest, + .setkey = aspeed_sham_setkey, + .export = aspeed_sham_export, + .import = aspeed_sham_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct aspeed_sham_reqctx), + .base = { + .cra_name = "hmac(sha512_256)", + .cra_driver_name = "aspeed-hmac-sha512_256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + + sizeof(struct aspeed_sha_hmac_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = aspeed_sham_cra_init, + .cra_exit = aspeed_sham_cra_exit, + } + } + }, + }, +}; + +void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) + crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash); + + if (hace_dev->version != AST2600_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) + crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); +} + +void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev) +{ + int rc, i; + + AHASH_DBG(hace_dev, "\n"); + + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) { + aspeed_ahash_algs[i].hace_dev = hace_dev; + rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash); + if (rc) { + AHASH_DBG(hace_dev, "Failed to register %s\n", + aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name); + } + } + + if (hace_dev->version != AST2600_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) { + aspeed_ahash_algs_g6[i].hace_dev = hace_dev; + rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); + if (rc) { + AHASH_DBG(hace_dev, "Failed to register %s\n", + aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name); + } + } +} diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c new file mode 100644 index 000000000000..23a66f481b62 --- /dev/null +++ b/drivers/crypto/aspeed/aspeed-hace.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021 Aspeed Technology Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "aspeed-hace.h" + +#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG +#define HACE_DBG(d, fmt, ...) \ + dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#else +#define HACE_DBG(d, fmt, ...) \ + dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#endif + +/* HACE interrupt service routine */ +static irqreturn_t aspeed_hace_irq(int irq, void *dev) +{ + struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev; + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + u32 sts; + + sts = ast_hace_read(hace_dev, ASPEED_HACE_STS); + ast_hace_write(hace_dev, sts, ASPEED_HACE_STS); + + HACE_DBG(hace_dev, "irq status: 0x%x\n", sts); + + if (sts & HACE_HASH_ISR) { + if (hash_engine->flags & CRYPTO_FLAGS_BUSY) + tasklet_schedule(&hash_engine->done_task); + else + dev_warn(hace_dev->dev, "HASH no active requests.\n"); + } + + return IRQ_HANDLED; +} + +static void aspeed_hace_hash_done_task(unsigned long data) +{ + struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + + hash_engine->resume(hace_dev); +} + +static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev) +{ +#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH + aspeed_register_hace_hash_algs(hace_dev); +#endif +} + +static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev) +{ +#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH + aspeed_unregister_hace_hash_algs(hace_dev); +#endif +} + +static const struct of_device_id aspeed_hace_of_matches[] = { + { .compatible = "aspeed,ast2500-hace", .data = (void *)5, }, + { .compatible = "aspeed,ast2600-hace", .data = (void *)6, }, + {}, +}; + +static int aspeed_hace_probe(struct platform_device *pdev) +{ + const struct of_device_id *hace_dev_id; + struct aspeed_engine_hash *hash_engine; + struct aspeed_hace_dev *hace_dev; + struct resource *res; + int rc; + + hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev), + GFP_KERNEL); + if (!hace_dev) + return -ENOMEM; + + hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev); + if (!hace_dev_id) { + dev_err(&pdev->dev, "Failed to match hace dev id\n"); + return -EINVAL; + } + + hace_dev->dev = &pdev->dev; + hace_dev->version = (unsigned long)hace_dev_id->data; + hash_engine = &hace_dev->hash_engine; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + platform_set_drvdata(pdev, hace_dev); + + hace_dev->regs = devm_ioremap_resource(&pdev->dev, res); + if (!hace_dev->regs) { + dev_err(&pdev->dev, "Failed to map resources\n"); + return -ENOMEM; + } + + /* Get irq number and register it */ + hace_dev->irq = platform_get_irq(pdev, 0); + if (!hace_dev->irq) { + dev_err(&pdev->dev, "Failed to get interrupt\n"); + return -ENXIO; + } + + rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0, + dev_name(&pdev->dev), hace_dev); + if (rc) { + dev_err(&pdev->dev, "Failed to request interrupt\n"); + return rc; + } + + /* Get clk and enable it */ + hace_dev->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hace_dev->clk)) { + dev_err(&pdev->dev, "Failed to get clk\n"); + return -ENODEV; + } + + rc = clk_prepare_enable(hace_dev->clk); + if (rc) { + dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc); + return rc; + } + + /* Initialize crypto hardware engine structure for hash */ + hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev, + true); + if (!hace_dev->crypt_engine_hash) { + rc = -ENOMEM; + goto clk_exit; + } + + rc = crypto_engine_start(hace_dev->crypt_engine_hash); + if (rc) + goto err_engine_hash_start; + + tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task, + (unsigned long)hace_dev); + + /* Allocate DMA buffer for hash engine input used */ + hash_engine->ahash_src_addr = + dmam_alloc_coherent(&pdev->dev, + ASPEED_HASH_SRC_DMA_BUF_LEN, + &hash_engine->ahash_src_dma_addr, + GFP_KERNEL); + if (!hash_engine->ahash_src_addr) { + dev_err(&pdev->dev, "Failed to allocate dma buffer\n"); + rc = -ENOMEM; + goto err_engine_hash_start; + } + + aspeed_hace_register(hace_dev); + + dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n"); + + return 0; + +err_engine_hash_start: + crypto_engine_exit(hace_dev->crypt_engine_hash); +clk_exit: + clk_disable_unprepare(hace_dev->clk); + + return rc; +} + +static int aspeed_hace_remove(struct platform_device *pdev) +{ + struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev); + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + + aspeed_hace_unregister(hace_dev); + + crypto_engine_exit(hace_dev->crypt_engine_hash); + + tasklet_kill(&hash_engine->done_task); + + clk_disable_unprepare(hace_dev->clk); + + return 0; +} + +MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches); + +static struct platform_driver aspeed_hace_driver = { + .probe = aspeed_hace_probe, + .remove = aspeed_hace_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = aspeed_hace_of_matches, + }, +}; + +module_platform_driver(aspeed_hace_driver); + +MODULE_AUTHOR("Neal Liu "); +MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h new file mode 100644 index 000000000000..3494ff22f69d --- /dev/null +++ b/drivers/crypto/aspeed/aspeed-hace.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __ASPEED_HACE_H__ +#define __ASPEED_HACE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/***************************** + * * + * HACE register definitions * + * * + * ***************************/ + +#define ASPEED_HACE_STS 0x1C /* HACE Status Register */ +#define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */ +#define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */ +#define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */ +#define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */ +#define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */ + +/* interrupt status reg */ +#define HACE_HASH_ISR BIT(9) +#define HACE_HASH_BUSY BIT(0) + +/* hash cmd reg */ +#define HASH_CMD_MBUS_REQ_SYNC_EN BIT(20) +#define HASH_CMD_HASH_SRC_SG_CTRL BIT(18) +#define HASH_CMD_SHA512_224 (0x3 << 10) +#define HASH_CMD_SHA512_256 (0x2 << 10) +#define HASH_CMD_SHA384 (0x1 << 10) +#define HASH_CMD_SHA512 (0) +#define HASH_CMD_INT_ENABLE BIT(9) +#define HASH_CMD_HMAC (0x1 << 7) +#define HASH_CMD_ACC_MODE (0x2 << 7) +#define HASH_CMD_HMAC_KEY (0x3 << 7) +#define HASH_CMD_SHA1 (0x2 << 4) +#define HASH_CMD_SHA224 (0x4 << 4) +#define HASH_CMD_SHA256 (0x5 << 4) +#define HASH_CMD_SHA512_SER (0x6 << 4) +#define HASH_CMD_SHA_SWAP (0x2 << 2) + +#define HASH_SG_LAST_LIST BIT(31) + +#define CRYPTO_FLAGS_BUSY BIT(1) + +#define SHA_OP_UPDATE 1 +#define SHA_OP_FINAL 2 + +#define SHA_FLAGS_SHA1 BIT(0) +#define SHA_FLAGS_SHA224 BIT(1) +#define SHA_FLAGS_SHA256 BIT(2) +#define SHA_FLAGS_SHA384 BIT(3) +#define SHA_FLAGS_SHA512 BIT(4) +#define SHA_FLAGS_SHA512_224 BIT(5) +#define SHA_FLAGS_SHA512_256 BIT(6) +#define SHA_FLAGS_HMAC BIT(8) +#define SHA_FLAGS_FINUP BIT(9) +#define SHA_FLAGS_MASK (0xff) + +#define ASPEED_CRYPTO_SRC_DMA_BUF_LEN 0xa000 +#define ASPEED_CRYPTO_DST_DMA_BUF_LEN 0xa000 +#define ASPEED_CRYPTO_GCM_TAG_OFFSET 0x9ff0 +#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000 +#define ASPEED_HASH_QUEUE_LENGTH 50 + +struct aspeed_hace_dev; + +typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *); + +struct aspeed_sg_list { + __le32 len; + __le32 phy_addr; +}; + +struct aspeed_engine_hash { + struct tasklet_struct done_task; + unsigned long flags; + struct ahash_request *req; + + /* input buffer */ + void *ahash_src_addr; + dma_addr_t ahash_src_dma_addr; + + dma_addr_t src_dma; + dma_addr_t digest_dma; + + size_t src_length; + + /* callback func */ + aspeed_hace_fn_t resume; + aspeed_hace_fn_t dma_prepare; +}; + +struct aspeed_sha_hmac_ctx { + struct crypto_shash *shash; + u8 ipad[SHA512_BLOCK_SIZE]; + u8 opad[SHA512_BLOCK_SIZE]; +}; + +struct aspeed_sham_ctx { + struct crypto_engine_ctx enginectx; + + struct aspeed_hace_dev *hace_dev; + unsigned long flags; /* hmac flag */ + + struct aspeed_sha_hmac_ctx base[0]; +}; + +struct aspeed_sham_reqctx { + unsigned long flags; /* final update flag should no use*/ + unsigned long op; /* final or update */ + u32 cmd; /* trigger cmd */ + + /* walk state */ + struct scatterlist *src_sg; + int src_nents; + unsigned int offset; /* offset in current sg */ + unsigned int total; /* per update length */ + + size_t digsize; + size_t block_size; + size_t ivsize; + const __be32 *sha_iv; + + /* remain data buffer */ + u8 buffer[SHA512_BLOCK_SIZE * 2]; + dma_addr_t buffer_dma_addr; + size_t bufcnt; /* buffer counter */ + + /* output buffer */ + u8 digest[SHA512_DIGEST_SIZE] __aligned(64); + dma_addr_t digest_dma_addr; + u64 digcnt[2]; +}; + +struct aspeed_hace_dev { + void __iomem *regs; + struct device *dev; + int irq; + struct clk *clk; + unsigned long version; + + struct crypto_engine *crypt_engine_hash; + + struct aspeed_engine_hash hash_engine; +}; + +struct aspeed_hace_alg { + struct aspeed_hace_dev *hace_dev; + + const char *alg_base; + + union { + struct skcipher_alg skcipher; + struct ahash_alg ahash; + } alg; +}; + +enum aspeed_version { + AST2500_VERSION = 5, + AST2600_VERSION +}; + +#define ast_hace_write(hace, val, offset) \ + writel((val), (hace)->regs + (offset)) +#define ast_hace_read(hace, offset) \ + readl((hace)->regs + (offset)) + +void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev); +void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev); + +#endif -- cgit v1.2.3 From 62f58b1637b7c8df34afaa548cb4b03d31c0764b Mon Sep 17 00:00:00 2001 From: Neal Liu Date: Thu, 18 Aug 2022 11:59:56 +0800 Subject: crypto: aspeed - add HACE crypto driver Add HACE crypto driver to support symmetric-key encryption and decryption with multiple modes of operation. Signed-off-by: Neal Liu Signed-off-by: Johnny Huang Reviewed-by: Dhananjay Phadke Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/Kconfig | 17 + drivers/crypto/aspeed/Makefile | 7 +- drivers/crypto/aspeed/aspeed-hace-crypto.c | 1135 ++++++++++++++++++++++++++++ drivers/crypto/aspeed/aspeed-hace.c | 84 +- drivers/crypto/aspeed/aspeed-hace.h | 112 +++ 5 files changed, 1352 insertions(+), 3 deletions(-) create mode 100644 drivers/crypto/aspeed/aspeed-hace-crypto.c (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig index 03e3fc61498e..001bf2e09a72 100644 --- a/drivers/crypto/aspeed/Kconfig +++ b/drivers/crypto/aspeed/Kconfig @@ -30,3 +30,20 @@ config CRYPTO_DEV_ASPEED_HACE_HASH hash driver. Supports multiple message digest standards, including SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on. + +config CRYPTO_DEV_ASPEED_HACE_CRYPTO + bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto" + depends on CRYPTO_DEV_ASPEED + select CRYPTO_ENGINE + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_ECB + select CRYPTO_CBC + select CRYPTO_CFB + select CRYPTO_OFB + select CRYPTO_CTR + help + Select here to enable Aspeed Hash & Crypto Engine (HACE) + crypto driver. + Supports AES/DES symmetric-key encryption and decryption + with ECB/CBC/CFB/OFB/CTR options. diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile index 8bc8d4fed5a9..421e2ca9c53e 100644 --- a/drivers/crypto/aspeed/Makefile +++ b/drivers/crypto/aspeed/Makefile @@ -1,6 +1,9 @@ obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o -aspeed_crypto-objs := aspeed-hace.o \ - $(hace-hash-y) +aspeed_crypto-objs := aspeed-hace.o \ + $(hace-hash-y) \ + $(hace-crypto-y) obj-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) += aspeed-hace-hash.o hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o +obj-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) += aspeed-hace-crypto.o +hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c new file mode 100644 index 000000000000..ba6158f5cf18 --- /dev/null +++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c @@ -0,0 +1,1135 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021 Aspeed Technology Inc. + */ + +#include "aspeed-hace.h" + +#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG +#define CIPHER_DBG(h, fmt, ...) \ + dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#else +#define CIPHER_DBG(h, fmt, ...) \ + dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) +#endif + +static int aspeed_crypto_do_fallback(struct skcipher_request *areq) +{ + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); + skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, + areq->base.complete, areq->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, + areq->cryptlen, areq->iv); + + if (rctx->enc_cmd & HACE_CMD_ENCRYPT) + err = crypto_skcipher_encrypt(&rctx->fallback_req); + else + err = crypto_skcipher_decrypt(&rctx->fallback_req); + + return err; +} + +static bool aspeed_crypto_need_fallback(struct skcipher_request *areq) +{ + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq); + + if (areq->cryptlen == 0) + return true; + + if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) && + !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE)) + return true; + + if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) && + !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE)) + return true; + + return false; +} + +static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev, + struct skcipher_request *req) +{ + if (hace_dev->version == AST2500_VERSION && + aspeed_crypto_need_fallback(req)) { + CIPHER_DBG(hace_dev, "SW fallback\n"); + return aspeed_crypto_do_fallback(req); + } + + return crypto_transfer_skcipher_request_to_engine( + hace_dev->crypt_engine_crypto, req); +} + +static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *req = skcipher_request_cast(areq); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; + struct aspeed_engine_crypto *crypto_engine; + int rc; + + crypto_engine = &hace_dev->crypto_engine; + crypto_engine->req = req; + crypto_engine->flags |= CRYPTO_FLAGS_BUSY; + + rc = ctx->start(hace_dev); + + if (rc != -EINPROGRESS) + return -EIO; + + return 0; +} + +static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err) +{ + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; + struct aspeed_cipher_reqctx *rctx; + struct skcipher_request *req; + + CIPHER_DBG(hace_dev, "\n"); + + req = crypto_engine->req; + rctx = skcipher_request_ctx(req); + + if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { + if (rctx->enc_cmd & HACE_CMD_DES_SELECT) + memcpy(req->iv, crypto_engine->cipher_ctx + + DES_KEY_SIZE, DES_KEY_SIZE); + else + memcpy(req->iv, crypto_engine->cipher_ctx, + AES_BLOCK_SIZE); + } + + crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY; + + crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req, + err); + + return err; +} + +static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; + struct device *dev = hace_dev->dev; + struct aspeed_cipher_reqctx *rctx; + struct skcipher_request *req; + + CIPHER_DBG(hace_dev, "\n"); + + req = crypto_engine->req; + rctx = skcipher_request_ctx(req); + + if (req->src == req->dst) { + dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE); + } + + return aspeed_sk_complete(hace_dev, 0); +} + +static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; + struct aspeed_cipher_reqctx *rctx; + struct skcipher_request *req; + struct scatterlist *out_sg; + int nbytes = 0; + int rc = 0; + + req = crypto_engine->req; + rctx = skcipher_request_ctx(req); + out_sg = req->dst; + + /* Copy output buffer to dst scatter-gather lists */ + nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents, + crypto_engine->cipher_addr, req->cryptlen); + if (!nbytes) { + dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n", + "nbytes", nbytes, "cryptlen", req->cryptlen); + rc = -EINVAL; + } + + CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n", + "nbytes", nbytes, "req->cryptlen", req->cryptlen, + "nb_out_sg", rctx->dst_nents, + "cipher addr", crypto_engine->cipher_addr); + + return aspeed_sk_complete(hace_dev, rc); +} + +static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; + struct aspeed_cipher_reqctx *rctx; + struct skcipher_request *req; + struct scatterlist *in_sg; + int nbytes; + + req = crypto_engine->req; + rctx = skcipher_request_ctx(req); + in_sg = req->src; + + nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents, + crypto_engine->cipher_addr, req->cryptlen); + + CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n", + "nbytes", nbytes, "req->cryptlen", req->cryptlen, + "nb_in_sg", rctx->src_nents, + "cipher addr", crypto_engine->cipher_addr); + + if (!nbytes) { + dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n", + "nbytes", nbytes, "cryptlen", req->cryptlen); + return -EINVAL; + } + + crypto_engine->resume = aspeed_sk_transfer; + + /* Trigger engines */ + ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, + ASPEED_HACE_SRC); + ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, + ASPEED_HACE_DEST); + ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); + ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); + + return -EINPROGRESS; +} + +static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; + struct aspeed_sg_list *src_list, *dst_list; + dma_addr_t src_dma_addr, dst_dma_addr; + struct aspeed_cipher_reqctx *rctx; + struct skcipher_request *req; + struct scatterlist *s; + int src_sg_len; + int dst_sg_len; + int total, i; + int rc; + + CIPHER_DBG(hace_dev, "\n"); + + req = crypto_engine->req; + rctx = skcipher_request_ctx(req); + + rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL | + HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN; + + /* BIDIRECTIONAL */ + if (req->dst == req->src) { + src_sg_len = dma_map_sg(hace_dev->dev, req->src, + rctx->src_nents, DMA_BIDIRECTIONAL); + dst_sg_len = src_sg_len; + if (!src_sg_len) { + dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); + return -EINVAL; + } + + } else { + src_sg_len = dma_map_sg(hace_dev->dev, req->src, + rctx->src_nents, DMA_TO_DEVICE); + if (!src_sg_len) { + dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); + return -EINVAL; + } + + dst_sg_len = dma_map_sg(hace_dev->dev, req->dst, + rctx->dst_nents, DMA_FROM_DEVICE); + if (!dst_sg_len) { + dev_warn(hace_dev->dev, "dma_map_sg() dst error\n"); + rc = -EINVAL; + goto free_req_src; + } + } + + src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr; + src_dma_addr = crypto_engine->cipher_dma_addr; + total = req->cryptlen; + + for_each_sg(req->src, s, src_sg_len, i) { + src_list[i].phy_addr = sg_dma_address(s); + + if (total > sg_dma_len(s)) { + src_list[i].len = sg_dma_len(s); + total -= src_list[i].len; + + } else { + /* last sg list */ + src_list[i].len = total; + src_list[i].len |= BIT(31); + total = 0; + } + + src_list[i].phy_addr = cpu_to_le32(src_list[i].phy_addr); + src_list[i].len = cpu_to_le32(src_list[i].len); + } + + if (total != 0) { + rc = -EINVAL; + goto free_req; + } + + if (req->dst == req->src) { + dst_list = src_list; + dst_dma_addr = src_dma_addr; + + } else { + dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr; + dst_dma_addr = crypto_engine->dst_sg_dma_addr; + total = req->cryptlen; + + for_each_sg(req->dst, s, dst_sg_len, i) { + dst_list[i].phy_addr = sg_dma_address(s); + + if (total > sg_dma_len(s)) { + dst_list[i].len = sg_dma_len(s); + total -= dst_list[i].len; + + } else { + /* last sg list */ + dst_list[i].len = total; + dst_list[i].len |= BIT(31); + total = 0; + } + + dst_list[i].phy_addr = cpu_to_le32(dst_list[i].phy_addr); + dst_list[i].len = cpu_to_le32(dst_list[i].len); + + } + + dst_list[dst_sg_len].phy_addr = 0; + dst_list[dst_sg_len].len = 0; + } + + if (total != 0) { + rc = -EINVAL; + goto free_req; + } + + crypto_engine->resume = aspeed_sk_transfer_sg; + + /* Memory barrier to ensure all data setup before engine starts */ + mb(); + + /* Trigger engines */ + ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC); + ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST); + ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); + ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); + + return -EINPROGRESS; + +free_req: + if (req->dst == req->src) { + dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, + DMA_BIDIRECTIONAL); + + } else { + dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, + DMA_TO_DEVICE); + dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, + DMA_TO_DEVICE); + } + + return rc; + +free_req_src: + dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); + + return rc; +} + +static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev) +{ + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; + struct aspeed_cipher_reqctx *rctx; + struct crypto_skcipher *cipher; + struct aspeed_cipher_ctx *ctx; + struct skcipher_request *req; + + CIPHER_DBG(hace_dev, "\n"); + + req = crypto_engine->req; + rctx = skcipher_request_ctx(req); + cipher = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(cipher); + + /* enable interrupt */ + rctx->enc_cmd |= HACE_CMD_ISR_EN; + + rctx->dst_nents = sg_nents(req->dst); + rctx->src_nents = sg_nents(req->src); + + ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma, + ASPEED_HACE_CONTEXT); + + if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { + if (rctx->enc_cmd & HACE_CMD_DES_SELECT) + memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE, + req->iv, DES_BLOCK_SIZE); + else + memcpy(crypto_engine->cipher_ctx, req->iv, + AES_BLOCK_SIZE); + } + + if (hace_dev->version == AST2600_VERSION) { + memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len); + + return aspeed_sk_start_sg(hace_dev); + } + + memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH); + + return aspeed_sk_start(hace_dev); +} + +static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd) +{ + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; + u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; + + CIPHER_DBG(hace_dev, "\n"); + + if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { + if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) + return -EINVAL; + } + + rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | + HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE | + HACE_CMD_CONTEXT_SAVE_ENABLE; + + return aspeed_hace_crypto_handle_queue(hace_dev, req); +} + +static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; + int rc; + + CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen); + + if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { + dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen); + return -EINVAL; + } + + if (keylen == DES_KEY_SIZE) { + rc = crypto_des_verify_key(tfm, key); + if (rc) + return rc; + + } else if (keylen == DES3_EDE_KEY_SIZE) { + rc = crypto_des3_ede_verify_key(tfm, key); + if (rc) + return rc; + } + + memcpy(ctx->key, key, keylen); + ctx->key_len = keylen; + + crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); +} + +static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | + HACE_CMD_TRIPLE_DES); +} + +static int aspeed_des_ctr_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_ctr_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_ofb_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_ofb_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_cfb_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_cfb_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_cbc_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_cbc_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_ecb_decrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_des_ecb_encrypt(struct skcipher_request *req) +{ + return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | + HACE_CMD_SINGLE_DES); +} + +static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd) +{ + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; + u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; + + if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { + if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) + return -EINVAL; + } + + CIPHER_DBG(hace_dev, "%s\n", + (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt"); + + cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | + HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE; + + switch (ctx->key_len) { + case AES_KEYSIZE_128: + cmd |= HACE_CMD_AES128; + break; + case AES_KEYSIZE_192: + cmd |= HACE_CMD_AES192; + break; + case AES_KEYSIZE_256: + cmd |= HACE_CMD_AES256; + break; + default: + return -EINVAL; + } + + rctx->enc_cmd = cmd; + + return aspeed_hace_crypto_handle_queue(hace_dev, req); +} + +static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, + unsigned int keylen) +{ + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; + struct crypto_aes_ctx gen_aes_key; + + CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8)); + + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; + + if (ctx->hace_dev->version == AST2500_VERSION) { + aes_expandkey(&gen_aes_key, key, keylen); + memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH); + + } else { + memcpy(ctx->key, key, keylen); + } + + ctx->key_len = keylen; + + crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + + return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); +} + +static int aspeed_aes_ctr_decrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR); +} + +static int aspeed_aes_ctr_encrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR); +} + +static int aspeed_aes_ofb_decrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB); +} + +static int aspeed_aes_ofb_encrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB); +} + +static int aspeed_aes_cfb_decrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB); +} + +static int aspeed_aes_cfb_encrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB); +} + +static int aspeed_aes_cbc_decrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC); +} + +static int aspeed_aes_cbc_encrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC); +} + +static int aspeed_aes_ecb_decrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB); +} + +static int aspeed_aes_ecb_encrypt(struct skcipher_request *req) +{ + return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB); +} + +static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm) +{ + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + const char *name = crypto_tfm_alg_name(&tfm->base); + struct aspeed_hace_alg *crypto_alg; + + + crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher); + ctx->hace_dev = crypto_alg->hace_dev; + ctx->start = aspeed_hace_skcipher_trigger; + + CIPHER_DBG(ctx->hace_dev, "%s\n", name); + + ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->fallback_tfm)) { + dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", + name, PTR_ERR(ctx->fallback_tfm)); + return PTR_ERR(ctx->fallback_tfm); + } + + crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) + + crypto_skcipher_reqsize(ctx->fallback_tfm)); + + ctx->enginectx.op.do_one_request = aspeed_crypto_do_request; + ctx->enginectx.op.prepare_request = NULL; + ctx->enginectx.op.unprepare_request = NULL; + + return 0; +} + +static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm) +{ + struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct aspeed_hace_dev *hace_dev = ctx->hace_dev; + + CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base)); + crypto_free_skcipher(ctx->fallback_tfm); +} + +struct aspeed_hace_alg aspeed_crypto_algs[] = { + { + .alg.skcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aspeed_aes_setkey, + .encrypt = aspeed_aes_ecb_encrypt, + .decrypt = aspeed_aes_ecb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "aspeed-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aspeed_aes_setkey, + .encrypt = aspeed_aes_cbc_encrypt, + .decrypt = aspeed_aes_cbc_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "aspeed-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aspeed_aes_setkey, + .encrypt = aspeed_aes_cfb_encrypt, + .decrypt = aspeed_aes_cfb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "cfb(aes)", + .cra_driver_name = "aspeed-cfb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aspeed_aes_setkey, + .encrypt = aspeed_aes_ofb_encrypt, + .decrypt = aspeed_aes_ofb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ofb(aes)", + .cra_driver_name = "aspeed-ofb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_des_ecb_encrypt, + .decrypt = aspeed_des_ecb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "aspeed-ecb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_des_cbc_encrypt, + .decrypt = aspeed_des_cbc_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "aspeed-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_des_cfb_encrypt, + .decrypt = aspeed_des_cfb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "cfb(des)", + .cra_driver_name = "aspeed-cfb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_des_ofb_encrypt, + .decrypt = aspeed_des_ofb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ofb(des)", + .cra_driver_name = "aspeed-ofb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_tdes_ecb_encrypt, + .decrypt = aspeed_tdes_ecb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "aspeed-ecb-tdes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_tdes_cbc_encrypt, + .decrypt = aspeed_tdes_cbc_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "aspeed-cbc-tdes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_tdes_cfb_encrypt, + .decrypt = aspeed_tdes_cfb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "cfb(des3_ede)", + .cra_driver_name = "aspeed-cfb-tdes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_tdes_ofb_encrypt, + .decrypt = aspeed_tdes_ofb_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ofb(des3_ede)", + .cra_driver_name = "aspeed-ofb-tdes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, +}; + +struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { + { + .alg.skcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aspeed_aes_setkey, + .encrypt = aspeed_aes_ctr_encrypt, + .decrypt = aspeed_aes_ctr_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "aspeed-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_des_ctr_encrypt, + .decrypt = aspeed_des_ctr_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ctr(des)", + .cra_driver_name = "aspeed-ctr-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + { + .alg.skcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = aspeed_des_setkey, + .encrypt = aspeed_tdes_ctr_encrypt, + .decrypt = aspeed_tdes_ctr_decrypt, + .init = aspeed_crypto_cra_init, + .exit = aspeed_crypto_cra_exit, + .base = { + .cra_name = "ctr(des3_ede)", + .cra_driver_name = "aspeed-ctr-tdes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), + .cra_alignmask = 0x0f, + .cra_module = THIS_MODULE, + } + } + }, + +}; + +void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) + crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher); + + if (hace_dev->version != AST2600_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) + crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); +} + +void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) +{ + int rc, i; + + CIPHER_DBG(hace_dev, "\n"); + + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) { + aspeed_crypto_algs[i].hace_dev = hace_dev; + rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher); + if (rc) { + CIPHER_DBG(hace_dev, "Failed to register %s\n", + aspeed_crypto_algs[i].alg.skcipher.base.cra_name); + } + } + + if (hace_dev->version != AST2600_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) { + aspeed_crypto_algs_g6[i].hace_dev = hace_dev; + rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); + if (rc) { + CIPHER_DBG(hace_dev, "Failed to register %s\n", + aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name); + } + } +} diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c index 23a66f481b62..4fefc9e69d72 100644 --- a/drivers/crypto/aspeed/aspeed-hace.c +++ b/drivers/crypto/aspeed/aspeed-hace.c @@ -25,6 +25,7 @@ static irqreturn_t aspeed_hace_irq(int irq, void *dev) { struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev; + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; u32 sts; @@ -40,9 +41,24 @@ static irqreturn_t aspeed_hace_irq(int irq, void *dev) dev_warn(hace_dev->dev, "HASH no active requests.\n"); } + if (sts & HACE_CRYPTO_ISR) { + if (crypto_engine->flags & CRYPTO_FLAGS_BUSY) + tasklet_schedule(&crypto_engine->done_task); + else + dev_warn(hace_dev->dev, "CRYPTO no active requests.\n"); + } + return IRQ_HANDLED; } +static void aspeed_hace_crypto_done_task(unsigned long data) +{ + struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; + + crypto_engine->resume(hace_dev); +} + static void aspeed_hace_hash_done_task(unsigned long data) { struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; @@ -56,6 +72,9 @@ static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev) #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH aspeed_register_hace_hash_algs(hace_dev); #endif +#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO + aspeed_register_hace_crypto_algs(hace_dev); +#endif } static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev) @@ -63,6 +82,9 @@ static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev) #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH aspeed_unregister_hace_hash_algs(hace_dev); #endif +#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO + aspeed_unregister_hace_crypto_algs(hace_dev); +#endif } static const struct of_device_id aspeed_hace_of_matches[] = { @@ -73,6 +95,7 @@ static const struct of_device_id aspeed_hace_of_matches[] = { static int aspeed_hace_probe(struct platform_device *pdev) { + struct aspeed_engine_crypto *crypto_engine; const struct of_device_id *hace_dev_id; struct aspeed_engine_hash *hash_engine; struct aspeed_hace_dev *hace_dev; @@ -93,6 +116,7 @@ static int aspeed_hace_probe(struct platform_device *pdev) hace_dev->dev = &pdev->dev; hace_dev->version = (unsigned long)hace_dev_id->data; hash_engine = &hace_dev->hash_engine; + crypto_engine = &hace_dev->crypto_engine; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -146,6 +170,21 @@ static int aspeed_hace_probe(struct platform_device *pdev) tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task, (unsigned long)hace_dev); + /* Initialize crypto hardware engine structure for crypto */ + hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev, + true); + if (!hace_dev->crypt_engine_crypto) { + rc = -ENOMEM; + goto err_engine_hash_start; + } + + rc = crypto_engine_start(hace_dev->crypt_engine_crypto); + if (rc) + goto err_engine_crypto_start; + + tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task, + (unsigned long)hace_dev); + /* Allocate DMA buffer for hash engine input used */ hash_engine->ahash_src_addr = dmam_alloc_coherent(&pdev->dev, @@ -155,7 +194,45 @@ static int aspeed_hace_probe(struct platform_device *pdev) if (!hash_engine->ahash_src_addr) { dev_err(&pdev->dev, "Failed to allocate dma buffer\n"); rc = -ENOMEM; - goto err_engine_hash_start; + goto err_engine_crypto_start; + } + + /* Allocate DMA buffer for crypto engine context used */ + crypto_engine->cipher_ctx = + dmam_alloc_coherent(&pdev->dev, + PAGE_SIZE, + &crypto_engine->cipher_ctx_dma, + GFP_KERNEL); + if (!crypto_engine->cipher_ctx) { + dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n"); + rc = -ENOMEM; + goto err_engine_crypto_start; + } + + /* Allocate DMA buffer for crypto engine input used */ + crypto_engine->cipher_addr = + dmam_alloc_coherent(&pdev->dev, + ASPEED_CRYPTO_SRC_DMA_BUF_LEN, + &crypto_engine->cipher_dma_addr, + GFP_KERNEL); + if (!crypto_engine->cipher_addr) { + dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n"); + rc = -ENOMEM; + goto err_engine_crypto_start; + } + + /* Allocate DMA buffer for crypto engine output used */ + if (hace_dev->version == AST2600_VERSION) { + crypto_engine->dst_sg_addr = + dmam_alloc_coherent(&pdev->dev, + ASPEED_CRYPTO_DST_DMA_BUF_LEN, + &crypto_engine->dst_sg_dma_addr, + GFP_KERNEL); + if (!crypto_engine->dst_sg_addr) { + dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n"); + rc = -ENOMEM; + goto err_engine_crypto_start; + } } aspeed_hace_register(hace_dev); @@ -164,6 +241,8 @@ static int aspeed_hace_probe(struct platform_device *pdev) return 0; +err_engine_crypto_start: + crypto_engine_exit(hace_dev->crypt_engine_crypto); err_engine_hash_start: crypto_engine_exit(hace_dev->crypt_engine_hash); clk_exit: @@ -175,13 +254,16 @@ clk_exit: static int aspeed_hace_remove(struct platform_device *pdev) { struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev); + struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; aspeed_hace_unregister(hace_dev); crypto_engine_exit(hace_dev->crypt_engine_hash); + crypto_engine_exit(hace_dev->crypt_engine_crypto); tasklet_kill(&hash_engine->done_task); + tasklet_kill(&crypto_engine->done_task); clk_disable_unprepare(hace_dev->clk); diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h index 3494ff22f69d..f2cde23b56ae 100644 --- a/drivers/crypto/aspeed/aspeed-hace.h +++ b/drivers/crypto/aspeed/aspeed-hace.h @@ -7,9 +7,12 @@ #include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -24,15 +27,75 @@ * HACE register definitions * * * * ***************************/ +#define ASPEED_HACE_SRC 0x00 /* Crypto Data Source Base Address Register */ +#define ASPEED_HACE_DEST 0x04 /* Crypto Data Destination Base Address Register */ +#define ASPEED_HACE_CONTEXT 0x08 /* Crypto Context Buffer Base Address Register */ +#define ASPEED_HACE_DATA_LEN 0x0C /* Crypto Data Length Register */ +#define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */ + +/* G5 */ +#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */ +/* G6 */ +#define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */ +#define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */ #define ASPEED_HACE_STS 0x1C /* HACE Status Register */ + #define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */ #define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */ #define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */ #define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */ #define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */ +/* crypto cmd */ +#define HACE_CMD_SINGLE_DES 0 +#define HACE_CMD_TRIPLE_DES BIT(17) +#define HACE_CMD_AES_SELECT 0 +#define HACE_CMD_DES_SELECT BIT(16) +#define HACE_CMD_ISR_EN BIT(12) +#define HACE_CMD_CONTEXT_SAVE_ENABLE (0) +#define HACE_CMD_CONTEXT_SAVE_DISABLE BIT(9) +#define HACE_CMD_AES (0) +#define HACE_CMD_DES (0) +#define HACE_CMD_RC4 BIT(8) +#define HACE_CMD_DECRYPT (0) +#define HACE_CMD_ENCRYPT BIT(7) + +#define HACE_CMD_ECB (0x0 << 4) +#define HACE_CMD_CBC (0x1 << 4) +#define HACE_CMD_CFB (0x2 << 4) +#define HACE_CMD_OFB (0x3 << 4) +#define HACE_CMD_CTR (0x4 << 4) +#define HACE_CMD_OP_MODE_MASK (0x7 << 4) + +#define HACE_CMD_AES128 (0x0 << 2) +#define HACE_CMD_AES192 (0x1 << 2) +#define HACE_CMD_AES256 (0x2 << 2) +#define HACE_CMD_OP_CASCADE (0x3) +#define HACE_CMD_OP_INDEPENDENT (0x1) + +/* G5 */ +#define HACE_CMD_RI_WO_DATA_ENABLE (0) +#define HACE_CMD_RI_WO_DATA_DISABLE BIT(11) +#define HACE_CMD_CONTEXT_LOAD_ENABLE (0) +#define HACE_CMD_CONTEXT_LOAD_DISABLE BIT(10) +/* G6 */ +#define HACE_CMD_AES_KEY_FROM_OTP BIT(24) +#define HACE_CMD_GHASH_TAG_XOR_EN BIT(23) +#define HACE_CMD_GHASH_PAD_LEN_INV BIT(22) +#define HACE_CMD_GCM_TAG_ADDR_SEL BIT(21) +#define HACE_CMD_MBUS_REQ_SYNC_EN BIT(20) +#define HACE_CMD_DES_SG_CTRL BIT(19) +#define HACE_CMD_SRC_SG_CTRL BIT(18) +#define HACE_CMD_CTR_IV_AES_96 (0x1 << 14) +#define HACE_CMD_CTR_IV_DES_32 (0x1 << 14) +#define HACE_CMD_CTR_IV_AES_64 (0x2 << 14) +#define HACE_CMD_CTR_IV_AES_32 (0x3 << 14) +#define HACE_CMD_AES_KEY_HW_EXP BIT(13) +#define HACE_CMD_GCM (0x5 << 4) + /* interrupt status reg */ +#define HACE_CRYPTO_ISR BIT(12) #define HACE_HASH_ISR BIT(9) #define HACE_HASH_BUSY BIT(0) @@ -77,6 +140,9 @@ #define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000 #define ASPEED_HASH_QUEUE_LENGTH 50 +#define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \ + HACE_CMD_OFB | HACE_CMD_CTR) + struct aspeed_hace_dev; typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *); @@ -147,6 +213,48 @@ struct aspeed_sham_reqctx { u64 digcnt[2]; }; +struct aspeed_engine_crypto { + struct tasklet_struct done_task; + unsigned long flags; + struct skcipher_request *req; + + /* context buffer */ + void *cipher_ctx; + dma_addr_t cipher_ctx_dma; + + /* input buffer, could be single/scatter-gather lists */ + void *cipher_addr; + dma_addr_t cipher_dma_addr; + + /* output buffer, only used in scatter-gather lists */ + void *dst_sg_addr; + dma_addr_t dst_sg_dma_addr; + + /* callback func */ + aspeed_hace_fn_t resume; +}; + +struct aspeed_cipher_ctx { + struct crypto_engine_ctx enginectx; + + struct aspeed_hace_dev *hace_dev; + int key_len; + u8 key[AES_MAX_KEYLENGTH]; + + /* callback func */ + aspeed_hace_fn_t start; + + struct crypto_skcipher *fallback_tfm; +}; + +struct aspeed_cipher_reqctx { + int enc_cmd; + int src_nents; + int dst_nents; + + struct skcipher_request fallback_req; /* keep at the end */ +}; + struct aspeed_hace_dev { void __iomem *regs; struct device *dev; @@ -155,8 +263,10 @@ struct aspeed_hace_dev { unsigned long version; struct crypto_engine *crypt_engine_hash; + struct crypto_engine *crypt_engine_crypto; struct aspeed_engine_hash hash_engine; + struct aspeed_engine_crypto crypto_engine; }; struct aspeed_hace_alg { @@ -182,5 +292,7 @@ enum aspeed_version { void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev); void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev); +void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev); +void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev); #endif -- cgit v1.2.3 From 28855860057ac0d51607a0b1386cb6c2cd1ad0d4 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 18 Aug 2022 23:00:03 +0200 Subject: crypto: drivers - move from strlcpy with unused retval to strscpy Follow the advice of the below link and prefer 'strscpy' in this subsystem. Conversion is 1:1 because the return value is not used. Generated by a coccinelle script. Link: https://lore.kernel.org/r/CAHk-=wgfRnXz0W3D37d01q3JFkr_i_uTL=V6A6G1oUZcprmknw@mail.gmail.com/ Signed-off-by: Wolfram Sang Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c | 6 +++--- drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 4 ++-- drivers/crypto/qat/qat_common/adf_cfg.c | 6 +++--- drivers/crypto/qat/qat_common/adf_ctl_drv.c | 2 +- drivers/crypto/qat/qat_common/adf_transport_debug.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index 40b482198ebc..23c6edc70914 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -97,7 +97,7 @@ static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps, static void set_ucode_filename(struct otx_cpt_ucode *ucode, const char *filename) { - strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH); + strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH); } static char *get_eng_type_str(int eng_type) @@ -138,7 +138,7 @@ static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type) u32 i, val = 0; u8 nn; - strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); + strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); for (i = 0; i < strlen(tmp_ver_str); i++) tmp_ver_str[i] = tolower(tmp_ver_str[i]); @@ -1328,7 +1328,7 @@ static ssize_t ucode_load_store(struct device *dev, eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr); err_msg = "Invalid engine group format"; - strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH); + strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH); start = tmp_buf; has_se = has_ie = has_ae = false; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index f10050fead16..1577986677f6 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -68,7 +68,7 @@ static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp) static void set_ucode_filename(struct otx2_cpt_ucode *ucode, const char *filename) { - strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH); + strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH); } static char *get_eng_type_str(int eng_type) @@ -126,7 +126,7 @@ static int get_ucode_type(struct device *dev, int i, val = 0; u8 nn; - strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ); + strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ); for (i = 0; i < strlen(tmp_ver_str); i++) tmp_ver_str[i] = tolower(tmp_ver_str[i]); diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index e61b3e13db3b..1931e5b37f2b 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -251,13 +251,13 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev, return -ENOMEM; INIT_LIST_HEAD(&key_val->list); - strlcpy(key_val->key, key, sizeof(key_val->key)); + strscpy(key_val->key, key, sizeof(key_val->key)); if (type == ADF_DEC) { snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "%ld", (*((long *)val))); } else if (type == ADF_STR) { - strlcpy(key_val->val, (char *)val, sizeof(key_val->val)); + strscpy(key_val->val, (char *)val, sizeof(key_val->val)); } else if (type == ADF_HEX) { snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES, "0x%lx", (unsigned long)val); @@ -315,7 +315,7 @@ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name) if (!sec) return -ENOMEM; - strlcpy(sec->name, name, sizeof(sec->name)); + strscpy(sec->name, name, sizeof(sec->name)); INIT_LIST_HEAD(&sec->param_head); down_write(&cfg->lock); list_add_tail(&sec->list, &cfg->sec_list); diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index e8ac932bbaab..508c18edd692 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -363,7 +363,7 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, dev_info.num_logical_accel = hw_data->num_logical_accel; dev_info.banks_per_accel = hw_data->num_banks / hw_data->num_logical_accel; - strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); + strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); dev_info.instance_id = hw_data->instance_id; dev_info.type = hw_data->dev_class->type; dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index e69e5907f595..08bca1c506c0 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -96,7 +96,7 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) if (!ring_debug) return -ENOMEM; - strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); + strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name)); snprintf(entry_name, sizeof(entry_name), "ring_%02d", ring->ring_number); -- cgit v1.2.3 From 545665ad1e84eb8f047018a2f607e78cef29c7fa Mon Sep 17 00:00:00 2001 From: Jack Wang Date: Fri, 19 Aug 2022 08:07:49 +0200 Subject: crypto: gemini - Fix error check for dma_map_sg dma_map_sg return 0 on error. Cc: Corentin Labbe Cc: Hans Ulli Kroll Cc: Linus Walleij Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Jack Wang Reviewed-by: Linus Walleij Signed-off-by: Herbert Xu --- drivers/crypto/gemini/sl3516-ce-cipher.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c index 14d0d83d388d..34fea8aa91b6 100644 --- a/drivers/crypto/gemini/sl3516-ce-cipher.c +++ b/drivers/crypto/gemini/sl3516-ce-cipher.c @@ -149,7 +149,7 @@ static int sl3516_ce_cipher(struct skcipher_request *areq) if (areq->src == areq->dst) { nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL); - if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) { + if (!nr_sgs || nr_sgs > MAXDESC / 2) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; @@ -158,14 +158,14 @@ static int sl3516_ce_cipher(struct skcipher_request *areq) } else { nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) { + if (!nr_sgs || nr_sgs > MAXDESC / 2) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; } nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE); - if (nr_sgd <= 0 || nr_sgd > MAXDESC) { + if (!nr_sgd || nr_sgd > MAXDESC) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; -- cgit v1.2.3 From 66f0b6b7d839b49e19a856a90b70656dccc4b844 Mon Sep 17 00:00:00 2001 From: Jack Wang Date: Fri, 19 Aug 2022 08:07:50 +0200 Subject: crypto: sahara - Fix error check for dma_map_sg dma_map_sg return 0 on error, it returns the number of DMA address segments mapped (this may be shorter than passed in if some elements of the scatter/gather list are physically or virtually adjacent and an IOMMU maps them with a single entry). Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Jack Wang Signed-off-by: Herbert Xu --- drivers/crypto/sahara.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index b07ae4ba165e..7ab20fb95166 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -487,13 +487,13 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); - if (ret != dev->nb_in_sg) { + if (!ret) { dev_err(dev->device, "couldn't map in sg\n"); goto unmap_in; } ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_FROM_DEVICE); - if (ret != dev->nb_out_sg) { + if (!ret) { dev_err(dev->device, "couldn't map out sg\n"); goto unmap_out; } -- cgit v1.2.3 From 417f62f6402c985dde661c6b09734fddb33d9b1a Mon Sep 17 00:00:00 2001 From: Jack Wang Date: Fri, 19 Aug 2022 08:07:51 +0200 Subject: crypto: qce - Fix dma_map_sg error check dma_map_sg return 0 on error, fix the error check and return -EIO to caller. Cc: Thara Gopinath Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Cc: linux-arm-msm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Jack Wang Signed-off-by: Herbert Xu --- drivers/crypto/qce/aead.c | 4 ++-- drivers/crypto/qce/sha.c | 8 +++++--- drivers/crypto/qce/skcipher.c | 8 ++++---- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 97a530171f07..6eb4d2e35629 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -450,8 +450,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req) if (ret) return ret; dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); - if (dst_nents < 0) { - ret = dst_nents; + if (!dst_nents) { + ret = -EIO; goto error_free; } diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 59159f5e64e5..37bafd7aeb79 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -97,14 +97,16 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) } ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); - if (ret < 0) - return ret; + if (!ret) + return -EIO; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); - if (ret < 0) + if (!ret) { + ret = -EIO; goto error_unmap_src; + } ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, &rctx->result_sg, 1, qce_ahash_done, async_req); diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 3d27cd5210ef..5b493fdc1e74 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -124,15 +124,15 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) rctx->dst_sg = rctx->dst_tbl.sgl; dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); - if (dst_nents < 0) { - ret = dst_nents; + if (!dst_nents) { + ret = -EIO; goto error_free; } if (diff_dst) { src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); - if (src_nents < 0) { - ret = src_nents; + if (!src_nents) { + ret = -EIO; goto error_unmap_dst; } rctx->src_sg = req->src; -- cgit v1.2.3 From 45fa321e7de604146603e24ee5bf3c0b766efe46 Mon Sep 17 00:00:00 2001 From: Jack Wang Date: Fri, 19 Aug 2022 08:07:52 +0200 Subject: crypto: amlogic - Fix dma_map_sg error check dma_map_sg return 0 on error. Cc: Corentin Labbe Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Cc: linux-amlogic@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Jack Wang Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/amlogic-gxl-cipher.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index e79514fce731..af017a087ebf 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -177,7 +177,7 @@ static int meson_cipher(struct skcipher_request *areq) if (areq->src == areq->dst) { nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL); - if (nr_sgs < 0) { + if (!nr_sgs) { dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); err = -EINVAL; goto theend; @@ -186,14 +186,14 @@ static int meson_cipher(struct skcipher_request *areq) } else { nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); - if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) { + if (!nr_sgs || nr_sgs > MAXDESC - 3) { dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); err = -EINVAL; goto theend; } nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE); - if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) { + if (!nr_sgd || nr_sgd > MAXDESC - 3) { dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd); err = -EINVAL; goto theend; -- cgit v1.2.3 From 2b02187bdb0bb75000850bd0309e70eb8664159e Mon Sep 17 00:00:00 2001 From: Jack Wang Date: Fri, 19 Aug 2022 08:07:53 +0200 Subject: crypto: allwinner - Fix dma_map_sg error check dma_map_sg return 0 on error. Cc: Corentin Labbe Cc: Herbert Xu Cc: "David S. Miller" Cc: Chen-Yu Tsai Cc: Jernej Skrabec Cc: Samuel Holland Cc: Dan Carpenter Cc: Minghao Chi Cc: Peng Wu Cc: Alexey Khoroshilov Cc: linux-crypto@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-sunxi@lists.linux.dev Cc: linux-kernel@vger.kernel.org Signed-off-by: Jack Wang Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c | 6 +++--- drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 2 +- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 4 ++-- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 74b4e910a38d..be7f46faef7e 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -208,7 +208,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req if (areq->src == areq->dst) { nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); - if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + if (!nr_sgs || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; @@ -216,13 +216,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req nr_sgd = nr_sgs; } else { nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + if (!nr_sgs || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); - if (nr_sgd <= 0 || nr_sgd > MAX_SG) { + if (!nr_sgd || nr_sgd > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 8b5b9b9d04c3..0e6843ec197f 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -389,7 +389,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) cet->t_asym_ctl = 0; nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + if (!nr_sgs || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 910d6751644c..fdcc98cdecaa 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -232,13 +232,13 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) nr_sgd = nr_sgs; } else { nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 8) { + if (!nr_sgs || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); - if (nr_sgd <= 0 || nr_sgd > 8) { + if (!nr_sgd || nr_sgd > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 36a82b22953c..fcb8c41cc957 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -527,7 +527,7 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) rctx->method = ss->variant->alg_hash[algt->ss_algo_id]; nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > MAX_SG) { + if (!nr_sgs || nr_sgs > MAX_SG) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; -- cgit v1.2.3 From 9b32fed8d6715207a0a2cd42f48a147d203a7576 Mon Sep 17 00:00:00 2001 From: Jack Wang Date: Fri, 19 Aug 2022 08:07:54 +0200 Subject: crypto: ccree - Fix dma_map_sg error check dma_map_sg return 0 on error, and dma_map_error is not supposed to use here. Cc: Gilad Ben-Yossef Cc: Herbert Xu Cc: "David S. Miller" Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Jack Wang Signed-off-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_buffer_mgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 6140e4927322..9efd88f871d1 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -274,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, } ret = dma_map_sg(dev, sg, *nents, direction); - if (dma_mapping_error(dev, ret)) { + if (!ret) { *nents = 0; dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); return -ENOMEM; -- cgit v1.2.3 From d03e89b3eba46121e8cbf2753b02be407810991b Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Fri, 19 Aug 2022 07:46:18 +0000 Subject: crypto: hisilicon/qm - no judgment in the back process Judgment should not be added in the back process. So clean it. Signed-off-by: Kai Ye Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b2e7abff1b8a..c180ad33d2f8 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1357,11 +1357,9 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, return 0; back_sqc_cqc: - for (i = SQC_VFT; i <= CQC_VFT; i++) { - ret = qm_set_vft_common(qm, i, fun_num, 0, 0); - if (ret) - return ret; - } + for (i = SQC_VFT; i <= CQC_VFT; i++) + qm_set_vft_common(qm, i, fun_num, 0, 0); + return ret; } -- cgit v1.2.3 From 5530acc8b9bfcfa5f90909b940be39a84c350033 Mon Sep 17 00:00:00 2001 From: Robert Elliott Date: Sat, 20 Aug 2022 13:41:40 -0500 Subject: crypto: Kconfig - remove AES_ARM64 ref by SA2UL Remove the CRYPTO_AES_ARM64 selection by the TI security accelerator driver (SA2UL), which leads to this problem when running make allmodconfig for arm (32-bit): WARNING: unmet direct dependencies detected for CRYPTO_AES_ARM64 Depends on [n]: CRYPTO [=y] && ARM64 Selected by [m]: - CRYPTO_DEV_SA2UL [=m] && CRYPTO [=y] && CRYPTO_HW [=y] && (ARCH_K3 || COMPILE_TEST [=y]) Fixes: 7694b6ca649fe ("crypto: sa2ul - Add crypto driver") Signed-off-by: Robert Elliott Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index f6fe1945edd6..72c58b21ba62 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -804,7 +804,6 @@ config CRYPTO_DEV_SA2UL depends on ARCH_K3 || COMPILE_TEST select ARM64_CRYPTO select CRYPTO_AES - select CRYPTO_AES_ARM64 select CRYPTO_ALGAPI select CRYPTO_AUTHENC select CRYPTO_SHA1 -- cgit v1.2.3 From 4a329fecc9aaebb27a53fa7abfa53bbc2ee42f3f Mon Sep 17 00:00:00 2001 From: Robert Elliott Date: Sat, 20 Aug 2022 13:41:41 -0500 Subject: crypto: Kconfig - submenus for arm and arm64 Move ARM- and ARM64-accelerated menus into a submenu under the Crypto API menu (paralleling all the architectures). Make each submenu always appear if the corresponding architecture is supported. Get rid of the ARM_CRYPTO and ARM64_CRYPTO symbols. The "ARM Accelerated" or "ARM64 Accelerated" entry disappears from: General setup ---> Platform selection ---> Kernel Features ---> Boot options ---> Power management options ---> CPU Power Management ---> [*] ACPI (Advanced Configuration and Power Interface) Support ---> [*] Virtualization ---> [*] ARM Accelerated Cryptographic Algorithms ---> (or) [*] ARM64 Accelerated Cryptographic Algorithms ---> ... -*- Cryptographic API ---> Library routines ---> Kernel hacking ---> and moves into the Cryptographic API menu, which now contains: ... Accelerated Cryptographic Algorithms for CPU (arm) ---> (or) Accelerated Cryptographic Algorithms for CPU (arm64) ---> [*] Hardware crypto devices ---> ... Suggested-by: Eric Biggers Signed-off-by: Robert Elliott Signed-off-by: Herbert Xu --- drivers/crypto/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 72c58b21ba62..55e75fbb658e 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -802,7 +802,6 @@ source "drivers/crypto/amlogic/Kconfig" config CRYPTO_DEV_SA2UL tristate "Support for TI security accelerator" depends on ARCH_K3 || COMPILE_TEST - select ARM64_CRYPTO select CRYPTO_AES select CRYPTO_ALGAPI select CRYPTO_AUTHENC -- cgit v1.2.3 From fb1e1257b0cb93328f2909755b0fa2dc582cc508 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 Sep 2022 18:15:53 +0800 Subject: Revert "crypto: gemini - Fix error check for dma_map_sg" This reverts commit 545665ad1e84eb8f047018a2f607e78cef29c7fa. The original code was correct and arguably more robust than the patched version. Signed-off-by: Herbert Xu --- drivers/crypto/gemini/sl3516-ce-cipher.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c index 34fea8aa91b6..14d0d83d388d 100644 --- a/drivers/crypto/gemini/sl3516-ce-cipher.c +++ b/drivers/crypto/gemini/sl3516-ce-cipher.c @@ -149,7 +149,7 @@ static int sl3516_ce_cipher(struct skcipher_request *areq) if (areq->src == areq->dst) { nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL); - if (!nr_sgs || nr_sgs > MAXDESC / 2) { + if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; @@ -158,14 +158,14 @@ static int sl3516_ce_cipher(struct skcipher_request *areq) } else { nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); - if (!nr_sgs || nr_sgs > MAXDESC / 2) { + if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; } nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE); - if (!nr_sgd || nr_sgd > MAXDESC) { + if (nr_sgd <= 0 || nr_sgd > MAXDESC) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; -- cgit v1.2.3 From 2ad548ebb85cc416587ba68c0e530a2f00b2273a Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 Sep 2022 18:19:27 +0800 Subject: Revert "crypto: allwinner - Fix dma_map_sg error check" This reverts commit 2b02187bdb0bb75000850bd0309e70eb8664159e. The original code was correct and arguably more robust than the patched version. Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c | 6 +++--- drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c | 2 +- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c | 4 ++-- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index be7f46faef7e..74b4e910a38d 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -208,7 +208,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req if (areq->src == areq->dst) { nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); - if (!nr_sgs || nr_sgs > MAX_SG) { + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; @@ -216,13 +216,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req nr_sgd = nr_sgs; } else { nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - if (!nr_sgs || nr_sgs > MAX_SG) { + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); - if (!nr_sgd || nr_sgd > MAX_SG) { + if (nr_sgd <= 0 || nr_sgd > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 0e6843ec197f..8b5b9b9d04c3 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -389,7 +389,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) cet->t_asym_ctl = 0; nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); - if (!nr_sgs || nr_sgs > MAX_SG) { + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index fdcc98cdecaa..910d6751644c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -232,13 +232,13 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) nr_sgd = nr_sgs; } else { nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); - if (!nr_sgs || nr_sgs > 8) { + if (nr_sgs <= 0 || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); - if (!nr_sgd || nr_sgd > 8) { + if (nr_sgd <= 0 || nr_sgd > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index fcb8c41cc957..36a82b22953c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -527,7 +527,7 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) rctx->method = ss->variant->alg_hash[algt->ss_algo_id]; nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); - if (!nr_sgs || nr_sgs > MAX_SG) { + if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; -- cgit v1.2.3 From 8c8e5b6ae43a55d0f6098606c311123f1a39d112 Mon Sep 17 00:00:00 2001 From: wangjianli Date: Tue, 23 Aug 2022 21:52:23 +0800 Subject: crypto: n2 - fix repeated words in comments Delete the redundant word 'to'. Signed-off-by: wangjianli Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 3b0bf6fea491..31e24df18877 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1494,7 +1494,7 @@ static void n2_unregister_algs(void) * * So we have to back-translate, going through the 'intr' and 'ino' * property tables of the n2cp MDESC node, matching it with the OF - * 'interrupts' property entries, in order to to figure out which + * 'interrupts' property entries, in order to figure out which * devino goes to which already-translated IRQ. */ static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, -- cgit v1.2.3 From 8e971e06b00bf995dd5f2c6fa9c068bc23cb5e56 Mon Sep 17 00:00:00 2001 From: wangjianli Date: Tue, 23 Aug 2022 21:53:53 +0800 Subject: crypto: marvell/octeontx - fix repeated words in comments Delete the redundant word 'is'. Signed-off-by: wangjianli Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h index b8bdb9f134f3..205eacac4a34 100644 --- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h +++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h @@ -403,7 +403,7 @@ union otx_cptx_pf_exe_bist_status { * big-endian format in memory. * iqb_ldwb:1 [7:7](R/W) Instruction load don't write back. * 0 = The hardware issues NCB transient load (LDT) towards the cache, - * which if the line hits and is is dirty will cause the line to be + * which if the line hits and is dirty will cause the line to be * written back before being replaced. * 1 = The hardware issues NCB LDWB read-and-invalidate command towards * the cache when fetching the last word of instructions; as a result the -- cgit v1.2.3 From 0e831f3d2fd973d5f6645ef3911c594af9ddd485 Mon Sep 17 00:00:00 2001 From: wangjianli Date: Tue, 23 Aug 2022 21:57:30 +0800 Subject: crypto: bcm - fix repeated words in comments Delete the redundant word 'in'. Signed-off-by: wangjianli Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 71281a3bdbdc..d6d87332140a 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -231,7 +231,7 @@ struct iproc_ctx_s { /* * shash descriptor - needed to perform incremental hashing in - * in software, when hw doesn't support it. + * software, when hw doesn't support it. */ struct shash_desc *shash; -- cgit v1.2.3 From cc40b04c08400d86d2d6ea0159e0617e717f729c Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Thu, 25 Aug 2022 12:32:16 +0200 Subject: crypto: qat - fix default value of WDT timer The QAT HW supports an hardware mechanism to detect an accelerator hang. The reporting of a hang occurs after a watchdog timer (WDT) expires. The value of the WDT set previously was too small and was causing false positives. Change the default value of the WDT to 0x7000000ULL to avoid this. Fixes: 1c4d9d5bbb5a ("crypto: qat - enable detection of accelerators hang") Reviewed-by: Giovanni Cabiddu Signed-off-by: Lucas Segarra Fernandez Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_gen4_hw_data.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h index 43b8f864806b..4fb4b3df5a18 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h @@ -107,7 +107,7 @@ do { \ * Timeout is in cycles. Clock speed may vary across products but this * value should be a few milli-seconds. */ -#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000 +#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL #define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000 #define ADF_SSMWDTL_OFFSET 0x54 #define ADF_SSMWDTH_OFFSET 0x5C -- cgit v1.2.3 From 31b39755e32568b43c80814c5e13d7b1ab796d73 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 26 Aug 2022 19:05:31 +0800 Subject: crypto: aspeed - Enable compile testing This driver compile tests just fine. Signed-off-by: Herbert Xu Reviewed-by: Neal Liu Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig index 001bf2e09a72..ae3eb0eb57f6 100644 --- a/drivers/crypto/aspeed/Kconfig +++ b/drivers/crypto/aspeed/Kconfig @@ -1,6 +1,6 @@ config CRYPTO_DEV_ASPEED tristate "Support for Aspeed cryptographic engine driver" - depends on ARCH_ASPEED + depends on ARCH_ASPEED || COMPILE_TEST help Hash and Crypto Engine (HACE) is designed to accelerate the throughput of hash data digest, encryption and decryption. -- cgit v1.2.3 From 95b66bc4e789c5698b973e92235c2e901c7546e0 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 27 Aug 2022 18:26:57 +0800 Subject: crypto: hisilicon/qm - check mailbox operation result After the mailbox operation is complete, the result may be unsuccessful. It needs to check the status bits of the mailbox register, if it fails, -EIO is returned. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c180ad33d2f8..1c661c89308d 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -36,6 +36,7 @@ #define QM_MB_PING_ALL_VFS 0xffff #define QM_MB_CMD_DATA_SHIFT 32 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) +#define QM_MB_STATUS_MASK GENMASK(12, 9) /* sqc shift */ #define QM_SQ_HOP_NUM_SHIFT 0 @@ -728,8 +729,12 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) { + int ret; + u32 val; + if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); + ret = -EBUSY; goto mb_busy; } @@ -737,6 +742,14 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); + ret = -ETIMEDOUT; + goto mb_busy; + } + + val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); + if (val & QM_MB_STATUS_MASK) { + dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); + ret = -EIO; goto mb_busy; } @@ -744,7 +757,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) mb_busy: atomic64_inc(&qm->debug.dfx.mb_err_cnt); - return -EBUSY; + return ret; } int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, -- cgit v1.2.3 From 5afc904f443de2afd31c4e0686ba178beede86fe Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 27 Aug 2022 18:27:37 +0800 Subject: crypto: hisilicon/qm - fix missing put dfx access In function qm_cmd_write(), if function returns from branch 'atomic_read(&qm->status.flags) == QM_STOP', the got dfx access is forgotten to put. Fixes: 607c191b371d ("crypto: hisilicon - support runtime PM for accelerator device") Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 1c661c89308d..fd9fb159048f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2216,8 +2216,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, return ret; /* Judge if the instance is being reset. */ - if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) - return 0; + if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) { + ret = 0; + goto put_dfx_access; + } if (count > QM_DBG_WRITE_LEN) { ret = -ENOSPC; -- cgit v1.2.3 From fa2bf6e35091e66fc83af1aebea06a78a5a2fde4 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 27 Aug 2022 18:27:56 +0800 Subject: crypto: hisilicon/qm - return failure if vfs_num exceeds total VFs The accelerator drivers supports users to enable VFs through the module parameter 'vfs_num'. If the number of VFs to be enabled exceeds the total VFs, all VFs are enabled. Change it to the same as enabling VF through the 'sriov_numvfs' file. Returns -ERANGE if the number of VFs to be enabled exceeds total VFs. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index fd9fb159048f..54bbd7fa57cc 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4766,7 +4766,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) goto err_put_sync; } - num_vfs = min_t(int, max_vfs, total_vfs); + if (max_vfs > total_vfs) { + pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); + ret = -ERANGE; + goto err_put_sync; + } + + num_vfs = max_vfs; ret = qm_vf_q_assign(qm, num_vfs); if (ret) { pci_err(pdev, "Can't assign queues for VF!\n"); -- cgit v1.2.3 From 2be570849efc4e1ceda6a8384184f493f558f188 Mon Sep 17 00:00:00 2001 From: Neal Liu Date: Mon, 5 Sep 2022 10:54:33 +0800 Subject: crypto: aspeed - fix build module error If CONFIG_MODULES=y and CONFIG_CRYPTO_DEV_ASPEED=m, build modpost would be failed. Error messages: ERROR: modpost: "aspeed_register_hace_hash_algs" [drivers/crypto/aspeed/aspeed_crypto.ko] undefined! ERROR: modpost: "aspeed_unregister_hace_hash_algs" [drivers/crypto/aspeed/aspeed_crypto.ko] undefined! Change build sequence to fix this. Reported-by: kernel test robot Signed-off-by: Neal Liu Tested-by: Sudip Mukherjee Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/Makefile | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile index 421e2ca9c53e..3be78cec0ecb 100644 --- a/drivers/crypto/aspeed/Makefile +++ b/drivers/crypto/aspeed/Makefile @@ -1,9 +1,6 @@ +hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace.o aspeed-hace-hash.o +hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace.o aspeed-hace-crypto.o + obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o -aspeed_crypto-objs := aspeed-hace.o \ - $(hace-hash-y) \ +aspeed_crypto-objs := $(hace-hash-y) \ $(hace-crypto-y) - -obj-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) += aspeed-hace-hash.o -hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o -obj-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) += aspeed-hace-crypto.o -hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o -- cgit v1.2.3 From aa450316c662f599189e0910444cf064c4f31602 Mon Sep 17 00:00:00 2001 From: Neal Liu Date: Wed, 7 Sep 2022 11:34:31 +0800 Subject: crypto: aspeed: fix format unexpected build warning This fixes the following similar build warning when enabling compile test: aspeed-hace-hash.c:188:9: warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'size_t' {aka 'long unsigned int'} [-Wformat=] Reported-by: kernel test robot Signed-off-by: Neal Liu Acked-by: Randy Dunlap Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-hace-hash.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c index 0a44ffc0e13b..5a8a3a611dd4 100644 --- a/drivers/crypto/aspeed/aspeed-hace-hash.c +++ b/drivers/crypto/aspeed/aspeed-hace-hash.c @@ -185,7 +185,7 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev) remain = (rctx->total + rctx->bufcnt) % rctx->block_size; length = rctx->total + rctx->bufcnt - remain; - AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x, %s:0x%x\n", + AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n", "rctx total", rctx->total, "bufcnt", rctx->bufcnt, "length", length, "remain", remain); @@ -324,8 +324,8 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev, struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); - AHASH_DBG(hace_dev, "src_dma:0x%x, digest_dma:0x%x, length:0x%x\n", - hash_engine->src_dma, hash_engine->digest_dma, + AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n", + &hash_engine->src_dma, &hash_engine->digest_dma, hash_engine->src_length); rctx->cmd |= HASH_CMD_INT_ENABLE; -- cgit v1.2.3 From efc96d43ec38381dacbd51679c2d01438511371d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 6 Sep 2022 13:05:35 +0800 Subject: crypto: aspeed - Fix sparse warnings This patch fixes a bunch of bit endianness warnings and two missing static modifiers. Signed-off-by: Herbert Xu Reviewed-by: Neal Liu Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-hace-crypto.c | 42 ++++++++++++++---------------- drivers/crypto/aspeed/aspeed-hace-hash.c | 38 ++++++++++++++------------- 2 files changed, 40 insertions(+), 40 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c index ba6158f5cf18..ef73b0028b4d 100644 --- a/drivers/crypto/aspeed/aspeed-hace-crypto.c +++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c @@ -258,21 +258,20 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) total = req->cryptlen; for_each_sg(req->src, s, src_sg_len, i) { - src_list[i].phy_addr = sg_dma_address(s); + u32 phy_addr = sg_dma_address(s); + u32 len = sg_dma_len(s); - if (total > sg_dma_len(s)) { - src_list[i].len = sg_dma_len(s); - total -= src_list[i].len; - - } else { + if (total > len) + total -= len; + else { /* last sg list */ - src_list[i].len = total; - src_list[i].len |= BIT(31); + len = total; + len |= BIT(31); total = 0; } - src_list[i].phy_addr = cpu_to_le32(src_list[i].phy_addr); - src_list[i].len = cpu_to_le32(src_list[i].len); + src_list[i].phy_addr = cpu_to_le32(phy_addr); + src_list[i].len = cpu_to_le32(len); } if (total != 0) { @@ -290,21 +289,20 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) total = req->cryptlen; for_each_sg(req->dst, s, dst_sg_len, i) { - dst_list[i].phy_addr = sg_dma_address(s); - - if (total > sg_dma_len(s)) { - dst_list[i].len = sg_dma_len(s); - total -= dst_list[i].len; + u32 phy_addr = sg_dma_address(s); + u32 len = sg_dma_len(s); - } else { + if (total > len) + total -= len; + else { /* last sg list */ - dst_list[i].len = total; - dst_list[i].len |= BIT(31); + len = total; + len |= BIT(31); total = 0; } - dst_list[i].phy_addr = cpu_to_le32(dst_list[i].phy_addr); - dst_list[i].len = cpu_to_le32(dst_list[i].len); + dst_list[i].phy_addr = cpu_to_le32(phy_addr); + dst_list[i].len = cpu_to_le32(len); } @@ -731,7 +729,7 @@ static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm) crypto_free_skcipher(ctx->fallback_tfm); } -struct aspeed_hace_alg aspeed_crypto_algs[] = { +static struct aspeed_hace_alg aspeed_crypto_algs[] = { { .alg.skcipher = { .min_keysize = AES_MIN_KEY_SIZE, @@ -1019,7 +1017,7 @@ struct aspeed_hace_alg aspeed_crypto_algs[] = { }, }; -struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { +static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { { .alg.skcipher = { .ivsize = AES_BLOCK_SIZE, diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c index 5a8a3a611dd4..935135229ebd 100644 --- a/drivers/crypto/aspeed/aspeed-hace-hash.c +++ b/drivers/crypto/aspeed/aspeed-hace-hash.c @@ -208,6 +208,9 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev) } if (rctx->bufcnt != 0) { + u32 phy_addr; + u32 len; + rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer, rctx->block_size * 2, @@ -218,36 +221,35 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev) goto free_rctx_digest; } - src_list[0].phy_addr = rctx->buffer_dma_addr; - src_list[0].len = rctx->bufcnt; - length -= src_list[0].len; + phy_addr = rctx->buffer_dma_addr; + len = rctx->bufcnt; + length -= len; /* Last sg list */ if (length == 0) - src_list[0].len |= HASH_SG_LAST_LIST; + len |= HASH_SG_LAST_LIST; - src_list[0].phy_addr = cpu_to_le32(src_list[0].phy_addr); - src_list[0].len = cpu_to_le32(src_list[0].len); + src_list[0].phy_addr = cpu_to_le32(phy_addr); + src_list[0].len = cpu_to_le32(len); src_list++; } if (length != 0) { for_each_sg(rctx->src_sg, s, sg_len, i) { - src_list[i].phy_addr = sg_dma_address(s); - - if (length > sg_dma_len(s)) { - src_list[i].len = sg_dma_len(s); - length -= sg_dma_len(s); + u32 phy_addr = sg_dma_address(s); + u32 len = sg_dma_len(s); - } else { + if (length > len) + length -= len; + else { /* Last sg list */ - src_list[i].len = length; - src_list[i].len |= HASH_SG_LAST_LIST; + len = length; + len |= HASH_SG_LAST_LIST; length = 0; } - src_list[i].phy_addr = cpu_to_le32(src_list[i].phy_addr); - src_list[i].len = cpu_to_le32(src_list[i].len); + src_list[i].phy_addr = cpu_to_le32(phy_addr); + src_list[i].len = cpu_to_le32(len); } } @@ -913,7 +915,7 @@ static int aspeed_sham_import(struct ahash_request *req, const void *in) return 0; } -struct aspeed_hace_alg aspeed_ahash_algs[] = { +static struct aspeed_hace_alg aspeed_ahash_algs[] = { { .alg.ahash = { .init = aspeed_sham_init, @@ -1099,7 +1101,7 @@ struct aspeed_hace_alg aspeed_ahash_algs[] = { }, }; -struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { +static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { { .alg.ahash = { .init = aspeed_sham_init, -- cgit v1.2.3 From dc377e013bec20dfedaec5d1f6327e4d57da359b Mon Sep 17 00:00:00 2001 From: Sun Ke Date: Tue, 30 Aug 2022 11:13:47 +0800 Subject: crypto: aspeed - fix return value check in aspeed_hace_probe() In case of error, the function devm_ioremap_resource() returns ERR_PTR() not NULL. The NULL test in the return value check must be replaced with IS_ERR(). Fixes: 108713a713c7 ("crypto: aspeed - Add HACE hash driver") Signed-off-by: Sun Ke Reviewed-by: Neal Liu Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-hace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c index 4fefc9e69d72..3f880aafb6a2 100644 --- a/drivers/crypto/aspeed/aspeed-hace.c +++ b/drivers/crypto/aspeed/aspeed-hace.c @@ -123,9 +123,9 @@ static int aspeed_hace_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hace_dev); hace_dev->regs = devm_ioremap_resource(&pdev->dev, res); - if (!hace_dev->regs) { + if (IS_ERR(hace_dev->regs)) { dev_err(&pdev->dev, "Failed to map resources\n"); - return -ENOMEM; + return PTR_ERR(hace_dev->regs); } /* Get irq number and register it */ -- cgit v1.2.3 From 24ddd4e1dbfcf530b89c3d994789cc2e57cbe713 Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Thu, 1 Sep 2022 07:43:48 +0000 Subject: crypto: octeontx - Remove the unneeded result variable Return the value cptvf_send_msg_to_pf_timeout() directly instead of storing it in another redundant variable. Reported-by: Zeal Robot Signed-off-by: ye xingchen Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c index 5663787c7a62..90fdafb7c468 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c @@ -159,12 +159,10 @@ static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf, int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf) { struct otx_cpt_mbox mbx = {}; - int ret; mbx.msg = OTX_CPT_MSG_READY; - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); - return ret; + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); } /* @@ -174,13 +172,11 @@ int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf) int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf) { struct otx_cpt_mbox mbx = {}; - int ret; mbx.msg = OTX_CPT_MSG_QLEN; mbx.data = cptvf->qsize; - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); - return ret; + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); } /* @@ -208,14 +204,12 @@ int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group) int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf) { struct otx_cpt_mbox mbx = {}; - int ret; mbx.msg = OTX_CPT_MSG_VQ_PRIORITY; /* Convey group of the VF */ mbx.data = cptvf->priority; - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); - return ret; + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); } /* @@ -224,12 +218,10 @@ int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf) int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf) { struct otx_cpt_mbox mbx = {}; - int ret; mbx.msg = OTX_CPT_MSG_VF_UP; - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); - return ret; + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); } /* @@ -238,10 +230,8 @@ int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf) int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf) { struct otx_cpt_mbox mbx = {}; - int ret; mbx.msg = OTX_CPT_MSG_VF_DOWN; - ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx); - return ret; + return cptvf_send_msg_to_pf_timeout(cptvf, &mbx); } -- cgit v1.2.3 From 68dbe80f5b510c66c800b9e8055235c5b07e37d1 Mon Sep 17 00:00:00 2001 From: Koba Ko Date: Thu, 1 Sep 2022 22:47:12 +0800 Subject: crypto: ccp - Release dma channels before dmaengine unrgister A warning is shown during shutdown, __dma_async_device_channel_unregister called while 2 clients hold a reference WARNING: CPU: 15 PID: 1 at drivers/dma/dmaengine.c:1110 __dma_async_device_channel_unregister+0xb7/0xc0 Call dma_release_channel for occupied channles before dma_async_device_unregister. Fixes: 54cce8ecb925 ("crypto: ccp - ccp_dmaengine_unregister release dma channels") Reported-by: kernel test robot Signed-off-by: Koba Ko Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-dmaengine.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 7d4b4ad1db1f..9f753cb4f5f1 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -641,6 +641,10 @@ static void ccp_dma_release(struct ccp_device *ccp) for (i = 0; i < ccp->cmd_q_count; i++) { chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; + + if (dma_chan->client_count) + dma_release_channel(dma_chan); + tasklet_kill(&chan->cleanup_tasklet); list_del_rcu(&dma_chan->device_node); } @@ -766,8 +770,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) if (!dmaengine) return; - dma_async_device_unregister(dma_dev); ccp_dma_release(ccp); + dma_async_device_unregister(dma_dev); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); -- cgit v1.2.3 From 96d3e6f05f5b7ec1cadb6a26c05093efcf062432 Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Fri, 2 Sep 2022 07:30:55 +0000 Subject: crypto: nx - Remove the unneeded result variable Return the value set_msg_len() directly instead of storing it in another redundant variable. Reported-by: Zeal Robot Signed-off-by: ye xingchen Reviewed-by: Breno Leitao Signed-off-by: Herbert Xu --- drivers/crypto/nx/nx-aes-ccm.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 3793885f928d..c843f4c6f684 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -134,7 +134,6 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize, unsigned int cryptlen, u8 *b0) { unsigned int l, lp, m = authsize; - int rc; memcpy(b0, iv, 16); @@ -148,9 +147,7 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize, if (assoclen) *b0 |= 64; - rc = set_msg_len(b0 + 16 - l, cryptlen, l); - - return rc; + return set_msg_len(b0 + 16 - l, cryptlen, l); } static int generate_pat(u8 *iv, -- cgit v1.2.3 From 664593407e936b6438fbfaaf98876910fd31cf9a Mon Sep 17 00:00:00 2001 From: Peter Harliman Liem Date: Tue, 6 Sep 2022 10:51:28 +0800 Subject: crypto: inside-secure - Change swab to swab32 The use of swab() is causing failures in 64-bit arch, as it translates to __swab64() instead of the intended __swab32(). It eventually causes wrong results in xcbcmac & cmac algo. Fixes: 78cf1c8bfcb8 ("crypto: inside-secure - Move ipad/opad into safexcel_context") Signed-off-by: Peter Harliman Liem Acked-by: Antoine Tenart Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_hash.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index bc60b5802256..2124416742f8 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -383,7 +383,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, u32 x; x = ipad[i] ^ ipad[i + 4]; - cache[i] ^= swab(x); + cache[i] ^= swab32(x); } } cache_len = AES_BLOCK_SIZE; @@ -821,7 +821,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) u32 *result = (void *)areq->result; /* K3 */ - result[i] = swab(ctx->base.ipad.word[i + 4]); + result[i] = swab32(ctx->base.ipad.word[i + 4]); } areq->result[0] ^= 0x80; // 10- padding crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result); @@ -2106,7 +2106,7 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE, "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++) - ctx->base.ipad.word[i] = swab(key_tmp[i]); + ctx->base.ipad.word[i] = swab32(key_tmp[i]); crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & @@ -2189,7 +2189,7 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, return ret; for (i = 0; i < len / sizeof(u32); i++) - ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]); + ctx->base.ipad.word[i + 8] = swab32(aes.key_enc[i]); /* precompute the CMAC key material */ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); -- cgit v1.2.3 From 0413623c27a380d0da7240717f9435d24776b985 Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Fri, 9 Sep 2022 06:28:11 +0000 Subject: crypto: hisilicon/sec - delete redundant blank lines Some coding style fixes in sec crypto file. Signed-off-by: Kai Ye Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 77c9f13cf69a..ead061089e0c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -1679,7 +1679,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) aead_req->out_mac, authsize, a_req->cryptlen + a_req->assoclen); - if (unlikely(sz != authsize)) { dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; @@ -1966,7 +1965,6 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } - static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx, struct sec_req *sreq) { -- cgit v1.2.3 From 82f00b24f532557fb0e15a6a2747859e4b70c4bd Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 9 Sep 2022 17:46:55 +0800 Subject: crypto: hisilicon/qm - get hardware features from hardware registers Before hardware V3, hardwares do not provide the feature registers, driver resolves hardware differences based on the hardware version. As a result, the driver does not support the new hardware. Hardware V3 and later versions support to obtain hardware features, such as power-gating management and doorbell isolation, through the hardware registers. To be compatible with later hardware versions, the features of the current device is obtained by reading the hardware registers instead of the hardware version. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 4 +- drivers/crypto/hisilicon/qm.c | 196 ++++++++++++++++++++---------- drivers/crypto/hisilicon/sec2/sec_main.c | 4 +- drivers/crypto/hisilicon/zip/zip_main.c | 4 +- 4 files changed, 141 insertions(+), 67 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 8e0e87cede6b..d484105b2716 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -457,7 +457,7 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm) u32 val; int ret; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; /* Enable prefetch */ @@ -478,7 +478,7 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm) u32 val; int ret; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 54bbd7fa57cc..fa77b469761b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -87,9 +87,7 @@ #define QM_DB_CMD_SHIFT_V1 16 #define QM_DB_INDEX_SHIFT_V1 32 #define QM_DB_PRIORITY_SHIFT_V1 48 -#define QM_QUE_ISO_CFG_V 0x0030 #define QM_PAGE_SIZE 0x0034 -#define QM_QUE_ISO_EN 0x100154 #define QM_CAPBILITY 0x100158 #define QM_QP_NUN_MASK GENMASK(10, 0) #define QM_QP_DB_INTERVAL 0x10000 @@ -206,6 +204,8 @@ #define MAX_WAIT_COUNTS 1000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 +#define QM_FUNC_CAPS_REG 0x3100 +#define QM_CAPBILITY_VERSION GENMASK(7, 0) #define PCI_BAR_2 2 #define PCI_BAR_4 4 @@ -330,6 +330,22 @@ enum qm_mb_cmd { QM_VF_GET_QOS, }; +static const struct hisi_qm_cap_info qm_cap_info_comm[] = { + {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, + {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, + {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, + {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, + {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, +}; + +static const struct hisi_qm_cap_info qm_cap_info_pf[] = { + {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, +}; + +static const struct hisi_qm_cap_info qm_cap_info_vf[] = { + {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, +}; + struct qm_cqe { __le32 rsvd0; __le16 cmd_id; @@ -427,10 +443,7 @@ struct hisi_qm_hw_ops { void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); void (*hw_error_uninit)(struct hisi_qm *qm); enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); - int (*stop_qp)(struct hisi_qp *qp); int (*set_msi)(struct hisi_qm *qm, bool set); - int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd); - int (*ping_pf)(struct hisi_qm *qm, u64 cmd); }; struct qm_dfx_item { @@ -841,6 +854,36 @@ static int qm_dev_mem_reset(struct hisi_qm *qm) POLL_TIMEOUT); } +/** + * hisi_qm_get_hw_info() - Get device information. + * @qm: The qm which want to get information. + * @info_table: Array for storing device information. + * @index: Index in info_table. + * @is_read: Whether read from reg, 0: not support read from reg. + * + * This function returns device information the caller needs. + */ +u32 hisi_qm_get_hw_info(struct hisi_qm *qm, + const struct hisi_qm_cap_info *info_table, + u32 index, bool is_read) +{ + u32 val; + + switch (qm->ver) { + case QM_HW_V1: + return info_table[index].v1_val; + case QM_HW_V2: + return info_table[index].v2_val; + default: + if (!is_read) + return info_table[index].v3_val; + + val = readl(qm->io_base + info_table[index].offset); + return (val >> info_table[index].shift) & info_table[index].mask; + } +} +EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); + static u32 qm_get_irq_num_v1(struct hisi_qm *qm) { return QM_IRQ_NUM_V1; @@ -867,7 +910,7 @@ static int qm_pm_get_sync(struct hisi_qm *qm) struct device *dev = &qm->pdev->dev; int ret; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return 0; ret = pm_runtime_resume_and_get(dev); @@ -883,7 +926,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_mark_last_busy(dev); @@ -1164,7 +1207,7 @@ static void qm_init_prefetch(struct hisi_qm *qm) struct device *dev = &qm->pdev->dev; u32 page_type = 0x0; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; switch (PAGE_SIZE) { @@ -1283,7 +1326,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, } break; case SHAPER_VFT: - if (qm->ver >= QM_HW_V3) { + if (factor) { tmp = factor->cir_b | (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | @@ -1301,10 +1344,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, u32 fun_num, u32 base, u32 number) { - struct qm_shaper_factor *factor = &qm->factor[fun_num]; + struct qm_shaper_factor *factor = NULL; unsigned int val; int ret; + if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + factor = &qm->factor[fun_num]; + ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT); @@ -1362,7 +1408,7 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, } /* init default shaper qos val */ - if (qm->ver >= QM_HW_V3) { + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { ret = qm_shaper_init_vft(qm, fun_num); if (ret) goto back_sqc_cqc; @@ -2466,7 +2512,7 @@ static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) u64 val; u32 i; - if (!qm->vfs_num || qm->ver < QM_HW_V3) + if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return 0; while (true) { @@ -2751,10 +2797,7 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { .hw_error_init = qm_hw_error_init_v3, .hw_error_uninit = qm_hw_error_uninit_v3, .hw_error_handle = qm_hw_error_handle_v2, - .stop_qp = qm_stop_qp, .set_msi = qm_set_msi_v3, - .ping_all_vfs = qm_ping_all_vfs, - .ping_pf = qm_ping_pf, }; static void *qm_get_avail_sqe(struct hisi_qp *qp) @@ -3051,8 +3094,8 @@ static int qm_drain_qp(struct hisi_qp *qp) return 0; /* Kunpeng930 supports drain qp by device */ - if (qm->ops->stop_qp) { - ret = qm->ops->stop_qp(qp); + if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { + ret = qm_stop_qp(qp); if (ret) dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); return ret; @@ -3282,7 +3325,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, if (qm->ver == QM_HW_V1) { if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) return -EINVAL; - } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) { + } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) return -EINVAL; @@ -3436,7 +3479,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm) if (qm->ver == QM_HW_V1) mmio_page_nr = QM_DOORBELL_PAGE_NR; - else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) + else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) mmio_page_nr = QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; else @@ -3598,7 +3641,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) init_rwsem(&qm->qps_lock); qm->qp_in_used = 0; qm->misc_ctl = false; - if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) { + if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); } @@ -3608,7 +3651,7 @@ static void qm_cmd_uninit(struct hisi_qm *qm) { u32 val; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return; val = readl(qm->io_base + QM_IFC_INT_MASK); @@ -3620,7 +3663,7 @@ static void qm_cmd_init(struct hisi_qm *qm) { u32 val; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return; /* Clear communication interrupt source */ @@ -3636,7 +3679,7 @@ static void qm_put_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; - if (qm->use_db_isolation) + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) iounmap(qm->db_io_base); iounmap(qm->io_base); @@ -3686,7 +3729,9 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm) } idr_destroy(&qm->qp_idr); - kfree(qm->factor); + + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + kfree(qm->factor); } /** @@ -4469,12 +4514,10 @@ static int qm_vf_read_qos(struct hisi_qm *qm) qm->mb_qos = 0; /* vf ping pf to get function qos */ - if (qm->ops->ping_pf) { - ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS); - if (ret) { - pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); - return ret; - } + ret = qm_ping_pf(qm, QM_VF_GET_QOS); + if (ret) { + pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); + return ret; } while (true) { @@ -4646,14 +4689,14 @@ static const struct file_operations qm_algqos_fops = { * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. * @qm: The qm for which we want to add debugfs files. * - * Create function qos debugfs files. + * Create function qos debugfs files, VF ping PF to get function qos. */ static void hisi_qm_set_algqos_init(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, qm, &qm_algqos_fops); - else + else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, qm, &qm_algqos_fops); } @@ -4701,7 +4744,7 @@ void hisi_qm_debug_init(struct hisi_qm *qm) &qm_atomic64_ops); } - if (qm->ver >= QM_HW_V3) + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) hisi_qm_set_algqos_init(qm); } EXPORT_SYMBOL_GPL(hisi_qm_debug_init); @@ -4824,7 +4867,9 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) pci_disable_sriov(pdev); /* clear vf function shaper configure array */ - memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs); + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs); + ret = qm_clear_vft_config(qm); if (ret) return ret; @@ -5048,8 +5093,8 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, return 0; /* Kunpeng930 supports to notify VFs to stop before PF reset */ - if (qm->ops->ping_all_vfs) { - ret = qm->ops->ping_all_vfs(qm, cmd); + if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { + ret = qm_ping_all_vfs(qm, cmd); if (ret) pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); } else { @@ -5240,8 +5285,8 @@ static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) } /* Kunpeng930 supports to notify VFs to start after PF reset. */ - if (qm->ops->ping_all_vfs) { - ret = qm->ops->ping_all_vfs(qm, cmd); + if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { + ret = qm_ping_all_vfs(qm, cmd); if (ret) pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); } else { @@ -5687,7 +5732,7 @@ err_prepare: hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); out: pci_save_state(pdev); - ret = qm->ops->ping_pf(qm, cmd); + ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); } @@ -5705,7 +5750,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm) cmd = QM_VF_START_FAIL; } - ret = qm->ops->ping_pf(qm, cmd); + ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); @@ -5910,7 +5955,7 @@ static int qm_get_qp_num(struct hisi_qm *qm) qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & QM_QP_NUN_MASK; - if (qm->use_db_isolation) + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK; else @@ -5926,6 +5971,39 @@ static int qm_get_qp_num(struct hisi_qm *qm) return 0; } +static void qm_get_hw_caps(struct hisi_qm *qm) +{ + const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? + qm_cap_info_pf : qm_cap_info_vf; + u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : + ARRAY_SIZE(qm_cap_info_vf); + u32 val, i; + + /* Doorbell isolate register is a independent register. */ + val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); + if (val) + set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); + + if (qm->ver >= QM_HW_V3) { + val = readl(qm->io_base + QM_FUNC_CAPS_REG); + qm->cap_ver = val & QM_CAPBILITY_VERSION; + } + + /* Get PF/VF common capbility */ + for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { + val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); + if (val) + set_bit(qm_cap_info_comm[i].type, &qm->caps); + } + + /* Get PF/VF different capbility */ + for (i = 0; i < size; i++) { + val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); + if (val) + set_bit(cap_info[i].type, &qm->caps); + } +} + static int qm_get_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5945,16 +6023,8 @@ static int qm_get_pci_res(struct hisi_qm *qm) goto err_request_mem_regions; } - if (qm->ver > QM_HW_V2) { - if (qm->fun_type == QM_HW_PF) - qm->use_db_isolation = readl(qm->io_base + - QM_QUE_ISO_EN) & BIT(0); - else - qm->use_db_isolation = readl(qm->io_base + - QM_QUE_ISO_CFG_V) & BIT(0); - } - - if (qm->use_db_isolation) { + qm_get_hw_caps(qm); + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { qm->db_interval = QM_QP_DB_INTERVAL; qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); qm->db_io_base = ioremap(qm->db_phys_base, @@ -5978,7 +6048,7 @@ static int qm_get_pci_res(struct hisi_qm *qm) return 0; err_db_ioremap: - if (qm->use_db_isolation) + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) iounmap(qm->db_io_base); err_ioremap: iounmap(qm->io_base); @@ -6095,12 +6165,15 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) int ret, total_func, i; size_t off = 0; - total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; - qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); - if (!qm->factor) - return -ENOMEM; - for (i = 0; i < total_func; i++) - qm->factor[i].func_qos = QM_QOS_MAX_VAL; + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { + total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; + qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); + if (!qm->factor) + return -ENOMEM; + + for (i = 0; i < total_func; i++) + qm->factor[i].func_qos = QM_QOS_MAX_VAL; + } #define QM_INIT_BUF(qm, type, num) do { \ (qm)->type = ((qm)->qdma.va + (off)); \ @@ -6136,7 +6209,8 @@ err_alloc_qp_array: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); err_destroy_idr: idr_destroy(&qm->qp_idr); - kfree(qm->factor); + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + kfree(qm->factor); return ret; } @@ -6279,7 +6353,7 @@ void hisi_qm_pm_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); @@ -6298,7 +6372,7 @@ void hisi_qm_pm_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_get_noresume(dev); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 2c0be91c0b09..1ec3b06345fd 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -415,7 +415,7 @@ static void sec_open_sva_prefetch(struct hisi_qm *qm) u32 val; int ret; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; /* Enable prefetch */ @@ -435,7 +435,7 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm) u32 val; int ret; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 04c8a4c65d77..36809bae6334 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -348,7 +348,7 @@ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) u32 val; int ret; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; /* Enable prefetch */ @@ -368,7 +368,7 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm) u32 val; int ret; - if (qm->ver < QM_HW_V3) + if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); -- cgit v1.2.3 From 129a9f340172b4f3857260a7a7bb9d7b3496ba50 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 9 Sep 2022 17:46:56 +0800 Subject: crypto: hisilicon/qm - get qp num and depth from hardware registers Hardware V3 and later versions can obtain qp num and depth supported by the hardware from registers. To be compatible with later hardware versions, get qp num and depth from registers instead of fixed marcos. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 4 +- drivers/crypto/hisilicon/qm.c | 166 ++++++++++++++++------------ drivers/crypto/hisilicon/sec2/sec.h | 5 +- drivers/crypto/hisilicon/sec2/sec_crypto.c | 148 +++++++++++++++---------- drivers/crypto/hisilicon/sec2/sec_main.c | 1 - drivers/crypto/hisilicon/zip/zip_crypto.c | 6 +- drivers/crypto/hisilicon/zip/zip_main.c | 1 - 7 files changed, 198 insertions(+), 133 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 3ba6f15deafc..2aa91a4f77da 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -147,7 +147,7 @@ static int hpre_alloc_req_id(struct hpre_ctx *ctx) int id; spin_lock_irqsave(&ctx->req_lock, flags); - id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); + id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC); spin_unlock_irqrestore(&ctx->req_lock, flags); return id; @@ -488,7 +488,7 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; - ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH); + ret = hpre_ctx_set(ctx, qp, qp->sq_depth); if (ret) hpre_stop_qp_and_put(qp); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index fa77b469761b..b3216ee627e5 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -78,6 +78,9 @@ #define QM_EQ_OVERFLOW 1 #define QM_CQE_ERROR 2 +#define QM_XQ_DEPTH_SHIFT 16 +#define QM_XQ_DEPTH_MASK GENMASK(15, 0) + #define QM_DOORBELL_CMD_SQ 0 #define QM_DOORBELL_CMD_CQ 1 #define QM_DOORBELL_CMD_EQ 2 @@ -88,8 +91,6 @@ #define QM_DB_INDEX_SHIFT_V1 32 #define QM_DB_PRIORITY_SHIFT_V1 48 #define QM_PAGE_SIZE 0x0034 -#define QM_CAPBILITY 0x100158 -#define QM_QP_NUN_MASK GENMASK(10, 0) #define QM_QP_DB_INTERVAL 0x10000 #define QM_MEM_START_INIT 0x100040 @@ -222,7 +223,6 @@ #define WAIT_PERIOD 20 #define REMOVE_WAIT_DELAY 10 #define QM_SQE_ADDR_MASK GENMASK(7, 0) -#define QM_EQ_DEPTH (1024 * 2) #define QM_DRIVER_REMOVING 0 #define QM_RST_SCHED 1 @@ -271,8 +271,8 @@ ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) -#define QM_MK_CQC_DW3_V2(cqe_sz) \ - ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) +#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ + ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) #define QM_MK_SQC_W13(priority, orders, alg_type) \ (((priority) << QM_SQ_PRIORITY_SHIFT) | \ @@ -285,8 +285,8 @@ ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) -#define QM_MK_SQC_DW3_V2(sqe_sz) \ - ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) +#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ + ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) #define INIT_QC_COMMON(qc, base, pasid) do { \ (qc)->head = 0; \ @@ -330,6 +330,13 @@ enum qm_mb_cmd { QM_VF_GET_QOS, }; +enum qm_basic_type { + QM_TOTAL_QP_NUM_CAP = 0x0, + QM_FUNC_MAX_QP_CAP, + QM_XEQ_DEPTH_CAP, + QM_QP_DEPTH_CAP, +}; + static const struct hisi_qm_cap_info qm_cap_info_comm[] = { {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, @@ -346,6 +353,13 @@ static const struct hisi_qm_cap_info qm_cap_info_vf[] = { {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, }; +static const struct hisi_qm_cap_info qm_basic_info[] = { + {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, + {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, + {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800}, + {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, +}; + struct qm_cqe { __le32 rsvd0; __le16 cmd_id; @@ -884,6 +898,16 @@ u32 hisi_qm_get_hw_info(struct hisi_qm *qm, } EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); +static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, + u16 *high_bits, enum qm_basic_type type) +{ + u32 depth; + + depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); + *high_bits = depth & QM_XQ_DEPTH_MASK; + *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; +} + static u32 qm_get_irq_num_v1(struct hisi_qm *qm) { return QM_IRQ_NUM_V1; @@ -935,7 +959,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm) static void qm_cq_head_update(struct hisi_qp *qp) { - if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { + if (qp->qp_status.cq_head == qp->cq_depth - 1) { qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; qp->qp_status.cq_head = 0; } else { @@ -967,6 +991,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) { struct hisi_qm *qm = poll_data->qm; struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; + u16 eq_depth = qm->eq_depth; int eqe_num = 0; u16 cqn; @@ -975,7 +1000,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) poll_data->qp_finish_id[eqe_num] = cqn; eqe_num++; - if (qm->status.eq_head == QM_EQ_DEPTH - 1) { + if (qm->status.eq_head == eq_depth - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; eqe = qm->eqe; qm->status.eq_head = 0; @@ -984,7 +1009,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) qm->status.eq_head++; } - if (eqe_num == (QM_EQ_DEPTH >> 1) - 1) + if (eqe_num == (eq_depth >> 1) - 1) break; } @@ -1124,6 +1149,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) { struct hisi_qm *qm = data; struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; + u16 aeq_depth = qm->aeq_depth; u32 type, qp_id; while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { @@ -1148,7 +1174,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) break; } - if (qm->status.aeq_head == QM_Q_DEPTH - 1) { + if (qm->status.aeq_head == aeq_depth - 1) { qm->status.aeqc_phase = !qm->status.aeqc_phase; aeqe = qm->aeqe; qm->status.aeq_head = 0; @@ -2050,7 +2076,7 @@ err_free_ctx: } static int q_dump_param_parse(struct hisi_qm *qm, char *s, - u32 *e_id, u32 *q_id) + u32 *e_id, u32 *q_id, u16 q_depth) { struct device *dev = &qm->pdev->dev; unsigned int qp_num = qm->qp_num; @@ -2076,8 +2102,8 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, } ret = kstrtou32(presult, 0, e_id); - if (ret || *e_id >= QM_Q_DEPTH) { - dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); + if (ret || *e_id >= q_depth) { + dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); return -EINVAL; } @@ -2091,21 +2117,22 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s, static int qm_sq_dump(struct hisi_qm *qm, char *s) { + u16 sq_depth = qm->qp_array->cq_depth; void *sqe, *sqe_curr; struct hisi_qp *qp; u32 qp_id, sqe_id; int ret; - ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); + ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); if (ret) return ret; - sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); + sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); if (!sqe) return -ENOMEM; qp = &qm->qp_array[qp_id]; - memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); + memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, qm->debug.sqe_mask_len); @@ -2124,7 +2151,7 @@ static int qm_cq_dump(struct hisi_qm *qm, char *s) u32 qp_id, cqe_id; int ret; - ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); + ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); if (ret) return ret; @@ -2150,11 +2177,11 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, if (ret) return -EINVAL; - if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) { - dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1); + if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) { + dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1); return -EINVAL; - } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) { - dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); + } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) { + dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1); return -EINVAL; } @@ -2805,7 +2832,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp) struct hisi_qp_status *qp_status = &qp->qp_status; u16 sq_tail = qp_status->sq_tail; - if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1)) + if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) return NULL; return qp->sqe + sq_tail * qp->qm->sqe_size; @@ -2846,7 +2873,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) qp = &qm->qp_array[qp_id]; hisi_qm_unset_hw_reset(qp); - memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); + memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); qp->event_cb = NULL; qp->req_cb = NULL; @@ -2927,9 +2954,9 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); if (ver == QM_HW_V1) { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); - sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); + sqc->w8 = cpu_to_le16(qp->sq_depth - 1); } else { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); + sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); sqc->w8 = 0; /* rand_qc */ } sqc->cq_num = cpu_to_le16(qp_id); @@ -2970,9 +2997,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) if (ver == QM_HW_V1) { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); - cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); + cqc->w8 = cpu_to_le16(qp->cq_depth - 1); } else { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE)); + cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); cqc->w8 = 0; /* rand_qc */ } cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); @@ -3059,13 +3086,14 @@ static void qp_stop_fail_cb(struct hisi_qp *qp) { int qp_used = atomic_read(&qp->qp_status.used); u16 cur_tail = qp->qp_status.sq_tail; - u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH; + u16 sq_depth = qp->sq_depth; + u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; struct hisi_qm *qm = qp->qm; u16 pos; int i; for (i = 0; i < qp_used; i++) { - pos = (i + cur_head) % QM_Q_DEPTH; + pos = (i + cur_head) % sq_depth; qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); atomic_dec(&qp->qp_status.used); } @@ -3213,7 +3241,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) { struct hisi_qp_status *qp_status = &qp->qp_status; u16 sq_tail = qp_status->sq_tail; - u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; + u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; void *sqe = qm_get_avail_sqe(qp); if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || @@ -3442,6 +3470,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm) struct uacce_device *uacce; unsigned long mmio_page_nr; unsigned long dus_page_nr; + u16 sq_depth, cq_depth; struct uacce_interface interface = { .flags = UACCE_DEV_SVA, .ops = &uacce_qm_ops, @@ -3485,9 +3514,11 @@ static int qm_alloc_uacce(struct hisi_qm *qm) else mmio_page_nr = qm->db_interval / PAGE_SIZE; + qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); + /* Add one more page for device or qp status */ - dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >> + dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + + sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> PAGE_SHIFT; uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; @@ -3592,10 +3623,11 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) kfree(qm->qp_array); } -static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) +static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, + u16 sq_depth, u16 cq_depth) { struct device *dev = &qm->pdev->dev; - size_t off = qm->sqe_size * QM_Q_DEPTH; + size_t off = qm->sqe_size * sq_depth; struct hisi_qp *qp; int ret = -ENOMEM; @@ -3615,6 +3647,8 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) qp->cqe = qp->qdma.va + off; qp->cqe_dma = qp->qdma.dma + off; qp->qdma.size = dma_size; + qp->sq_depth = sq_depth; + qp->cq_depth = cq_depth; qp->qm = qm; qp->qp_id = id; @@ -3858,7 +3892,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); if (qm->ver == QM_HW_V1) eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); - eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); + eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), DMA_TO_DEVICE); @@ -3887,7 +3921,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm) aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); - aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); + aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), DMA_TO_DEVICE); @@ -5947,19 +5981,21 @@ EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); static int qm_get_qp_num(struct hisi_qm *qm) { - if (qm->ver == QM_HW_V1) - qm->ctrl_qp_num = QM_QNUM_V1; - else if (qm->ver == QM_HW_V2) - qm->ctrl_qp_num = QM_QNUM_V2; - else - qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & - QM_QP_NUN_MASK; + bool is_db_isolation; - if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) - qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> - QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK; - else - qm->max_qp_num = qm->ctrl_qp_num; + /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ + if (qm->fun_type == QM_HW_VF) { + if (qm->ver != QM_HW_V1) + /* v2 starts to support get vft by mailbox */ + return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + + return 0; + } + + is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); + qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); + qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, + QM_FUNC_MAX_QP_CAP, is_db_isolation); /* check if qp number is valid */ if (qm->qp_num > qm->max_qp_num) { @@ -6039,11 +6075,9 @@ static int qm_get_pci_res(struct hisi_qm *qm) qm->db_interval = 0; } - if (qm->fun_type == QM_HW_PF) { - ret = qm_get_qp_num(qm); - if (ret) - goto err_db_ioremap; - } + ret = qm_get_qp_num(qm); + if (ret) + goto err_db_ioremap; return 0; @@ -6126,6 +6160,7 @@ static int hisi_qm_init_work(struct hisi_qm *qm) static int hisi_qp_alloc_memory(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; + u16 sq_depth, cq_depth; size_t qp_dma_size; int i, ret; @@ -6139,13 +6174,14 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) return -ENOMEM; } + qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); + /* one more page for device or qp statuses */ - qp_dma_size = qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH; + qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { qm->poll_data[i].qm = qm; - ret = hisi_qp_memory_init(qm, qp_dma_size, i); + ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); if (ret) goto err_init_qp_mem; @@ -6182,8 +6218,9 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) } while (0) idr_init(&qm->qp_idr); - qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + - QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + + qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + + QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, @@ -6194,8 +6231,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) goto err_destroy_idr; } - QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH); - QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, eqe, qm->eq_depth); + QM_INIT_BUF(qm, aeqe, qm->aeq_depth); QM_INIT_BUF(qm, sqc, qm->qp_num); QM_INIT_BUF(qm, cqc, qm->qp_num); @@ -6257,13 +6294,6 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_pci_init; - if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_irq_register; - } - if (qm->fun_type == QM_HW_PF) { qm_disable_clock_gate(qm); ret = qm_dev_mem_reset(qm); diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index d2a0bc93e752..04e034abc5e8 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -17,6 +17,7 @@ struct sec_alg_res { dma_addr_t a_ivin_dma; u8 *out_mac; dma_addr_t out_mac_dma; + u16 depth; }; /* Cipher request of SEC private */ @@ -115,9 +116,9 @@ struct sec_cipher_ctx { /* SEC queue context which defines queue's relatives */ struct sec_qp_ctx { struct hisi_qp *qp; - struct sec_req *req_list[QM_Q_DEPTH]; + struct sec_req **req_list; struct idr req_idr; - struct sec_alg_res res[QM_Q_DEPTH]; + struct sec_alg_res *res; struct sec_ctx *ctx; spinlock_t req_lock; struct list_head backlog; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index ead061089e0c..eb23bffdcac8 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -59,14 +59,14 @@ #define SEC_ICV_MASK 0x000E #define SEC_SQE_LEN_RATE_MASK 0x3 -#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) +#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 #define SEC_MAX_AAD_LEN 65535 #define SEC_MAX_CCM_AAD_LEN 65279 -#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) +#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) #define SEC_PBUF_SZ 512 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ @@ -74,11 +74,11 @@ #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ SEC_MAX_MAC_LEN * 2) #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) -#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) -#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ - SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) -#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ - SEC_PBUF_LEFT_SZ) +#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) +#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ + SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM)) +#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ + SEC_PBUF_LEFT_SZ(depth)) #define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 @@ -128,9 +128,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) int req_id; spin_lock_bh(&qp_ctx->req_lock); - - req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, - 0, QM_Q_DEPTH, GFP_ATOMIC); + req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { dev_err(req->ctx->dev, "alloc req id fail!\n"); @@ -148,7 +146,7 @@ static void sec_free_req_id(struct sec_req *req) struct sec_qp_ctx *qp_ctx = req->qp_ctx; int req_id = req->req_id; - if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { + if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { dev_err(req->ctx->dev, "free request id invalid!\n"); return; } @@ -300,14 +298,15 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) /* Get DMA memory resources */ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) { + u16 q_depth = res->depth; int i; - res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, + res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), &res->c_ivin_dma, GFP_KERNEL); if (!res->c_ivin) return -ENOMEM; - for (i = 1; i < QM_Q_DEPTH; i++) { + for (i = 1; i < q_depth; i++) { res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; } @@ -318,20 +317,21 @@ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) { if (res->c_ivin) - dma_free_coherent(dev, SEC_TOTAL_IV_SZ, + dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), res->c_ivin, res->c_ivin_dma); } static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) { + u16 q_depth = res->depth; int i; - res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, + res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), &res->a_ivin_dma, GFP_KERNEL); if (!res->a_ivin) return -ENOMEM; - for (i = 1; i < QM_Q_DEPTH; i++) { + for (i = 1; i < q_depth; i++) { res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; } @@ -342,20 +342,21 @@ static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res) { if (res->a_ivin) - dma_free_coherent(dev, SEC_TOTAL_IV_SZ, + dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), res->a_ivin, res->a_ivin_dma); } static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) { + u16 q_depth = res->depth; int i; - res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, + res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, &res->out_mac_dma, GFP_KERNEL); if (!res->out_mac) return -ENOMEM; - for (i = 1; i < QM_Q_DEPTH; i++) { + for (i = 1; i < q_depth; i++) { res[i].out_mac_dma = res->out_mac_dma + i * (SEC_MAX_MAC_LEN << 1); res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); @@ -367,14 +368,14 @@ static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) { if (res->out_mac) - dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, + dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, res->out_mac, res->out_mac_dma); } static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) { if (res->pbuf) - dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, + dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), res->pbuf, res->pbuf_dma); } @@ -384,10 +385,12 @@ static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) */ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) { + u16 q_depth = res->depth; + int size = SEC_PBUF_PAGE_NUM(q_depth); int pbuf_page_offset; int i, j, k; - res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, + res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), &res->pbuf_dma, GFP_KERNEL); if (!res->pbuf) return -ENOMEM; @@ -400,11 +403,11 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) * So we need SEC_PBUF_PAGE_NUM numbers of PAGE * for the SEC_TOTAL_PBUF_SZ */ - for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { + for (i = 0; i <= size; i++) { pbuf_page_offset = PAGE_SIZE * i; for (j = 0; j < SEC_PBUF_NUM; j++) { k = i * SEC_PBUF_NUM + j; - if (k == QM_Q_DEPTH) + if (k == q_depth) break; res[k].pbuf = res->pbuf + j * SEC_PBUF_PKG + pbuf_page_offset; @@ -470,36 +473,29 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, sec_free_mac_resource(dev, qp_ctx->res); } -static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, - int qp_ctx_id, int alg_type) +static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) { + u16 q_depth = qp_ctx->qp->sq_depth; struct device *dev = ctx->dev; - struct sec_qp_ctx *qp_ctx; - struct hisi_qp *qp; int ret = -ENOMEM; - qp_ctx = &ctx->qp_ctx[qp_ctx_id]; - qp = ctx->qps[qp_ctx_id]; - qp->req_type = 0; - qp->qp_ctx = qp_ctx; - qp_ctx->qp = qp; - qp_ctx->ctx = ctx; - - qp->req_cb = sec_req_cb; + qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); + if (!qp_ctx->req_list) + return ret; - spin_lock_init(&qp_ctx->req_lock); - idr_init(&qp_ctx->req_idr); - INIT_LIST_HEAD(&qp_ctx->backlog); + qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); + if (!qp_ctx->res) + goto err_free_req_list; + qp_ctx->res->depth = q_depth; - qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, - SEC_SGL_SGE_NR); + qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); if (IS_ERR(qp_ctx->c_in_pool)) { dev_err(dev, "fail to create sgl pool for input!\n"); - goto err_destroy_idr; + goto err_free_res; } - qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, - SEC_SGL_SGE_NR); + qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); if (IS_ERR(qp_ctx->c_out_pool)) { dev_err(dev, "fail to create sgl pool for output!\n"); goto err_free_c_in_pool; @@ -509,34 +505,72 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, if (ret) goto err_free_c_out_pool; - ret = hisi_qm_start_qp(qp, 0); - if (ret < 0) - goto err_queue_free; - return 0; -err_queue_free: - sec_alg_resource_free(ctx, qp_ctx); err_free_c_out_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); err_free_c_in_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); -err_destroy_idr: - idr_destroy(&qp_ctx->req_idr); +err_free_res: + kfree(qp_ctx->res); +err_free_req_list: + kfree(qp_ctx->req_list); return ret; } -static void sec_release_qp_ctx(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) +static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { struct device *dev = ctx->dev; - hisi_qm_stop_qp(qp_ctx->qp); sec_alg_resource_free(ctx, qp_ctx); - hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); + kfree(qp_ctx->res); + kfree(qp_ctx->req_list); +} + +static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, + int qp_ctx_id, int alg_type) +{ + struct sec_qp_ctx *qp_ctx; + struct hisi_qp *qp; + int ret; + qp_ctx = &ctx->qp_ctx[qp_ctx_id]; + qp = ctx->qps[qp_ctx_id]; + qp->req_type = 0; + qp->qp_ctx = qp_ctx; + qp_ctx->qp = qp; + qp_ctx->ctx = ctx; + + qp->req_cb = sec_req_cb; + + spin_lock_init(&qp_ctx->req_lock); + idr_init(&qp_ctx->req_idr); + INIT_LIST_HEAD(&qp_ctx->backlog); + + ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx); + if (ret) + goto err_destroy_idr; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) + goto err_resource_free; + + return 0; + +err_resource_free: + sec_free_qp_ctx_resource(ctx, qp_ctx); +err_destroy_idr: + idr_destroy(&qp_ctx->req_idr); + return ret; +} + +static void sec_release_qp_ctx(struct sec_ctx *ctx, + struct sec_qp_ctx *qp_ctx) +{ + hisi_qm_stop_qp(qp_ctx->qp); + sec_free_qp_ctx_resource(ctx, qp_ctx); idr_destroy(&qp_ctx->req_idr); } @@ -559,7 +593,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) ctx->pbuf_supported = ctx->sec->iommu_used; /* Half of queue depth is taken as fake requests limit in the queue. */ - ctx->fake_req_limit = QM_Q_DEPTH >> 1; + ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), GFP_KERNEL); if (!ctx->qp_ctx) { diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 1ec3b06345fd..ea7f85f72b10 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -27,7 +27,6 @@ #define SEC_BD_ERR_CHK_EN3 0xffffbfff #define SEC_SQE_SIZE 128 -#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) #define SEC_PF_DEF_Q_NUM 256 #define SEC_PF_DEF_Q_BASE 0 #define SEC_CTX_Q_NUM_DEF 2 diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index a6c914d527eb..a7f6884c3ab3 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -599,12 +599,13 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) { + u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; struct hisi_zip_req_q *req_q; int i, ret; for (i = 0; i < HZIP_CTX_Q_NUM; i++) { req_q = &ctx->qp_ctx[i].req_q; - req_q->size = QM_Q_DEPTH; + req_q->size = q_depth; req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL); if (!req_q->req_bitmap) { @@ -650,6 +651,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) { + u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; struct hisi_zip_qp_ctx *tmp; struct device *dev; int i; @@ -657,7 +659,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) for (i = 0; i < HZIP_CTX_Q_NUM; i++) { tmp = &ctx->qp_ctx[i]; dev = &tmp->qp->qm->pdev->dev; - tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1, + tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1, sgl_sge_nr); if (IS_ERR(tmp->sgl_pool)) { if (i == 1) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 36809bae6334..24d5226e0a0a 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -82,7 +82,6 @@ #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ HZIP_DECOMP_CORE_NUM) #define HZIP_SQE_SIZE 128 -#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH) #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 -- cgit v1.2.3 From c832da79cbf9448e7ece097c3a93996b4c74a83e Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 9 Sep 2022 17:46:57 +0800 Subject: crypto: hisilicon/qm - add UACCE_CMD_QM_SET_QP_INFO support To be compatible with accelerator devices of different versions, 'UACCE_CMD_QM_SET_QP_INFO' ioctl is added to obtain queue information in userspace, including queue depth and buffer description size. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index b3216ee627e5..e227a3e50600 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3430,6 +3430,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, unsigned long arg) { struct hisi_qp *qp = q->priv; + struct hisi_qp_info qp_info; struct hisi_qp_ctx qp_ctx; if (cmd == UACCE_CMD_QM_SET_QP_CTX) { @@ -3446,11 +3447,25 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, if (copy_to_user((void __user *)arg, &qp_ctx, sizeof(struct hisi_qp_ctx))) return -EFAULT; - } else { - return -EINVAL; + + return 0; + } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { + if (copy_from_user(&qp_info, (void __user *)arg, + sizeof(struct hisi_qp_info))) + return -EFAULT; + + qp_info.sqe_size = qp->qm->sqe_size; + qp_info.sq_depth = qp->sq_depth; + qp_info.cq_depth = qp->cq_depth; + + if (copy_to_user((void __user *)arg, &qp_info, + sizeof(struct hisi_qp_info))) + return -EFAULT; + + return 0; } - return 0; + return -EINVAL; } static const struct uacce_ops uacce_qm_ops = { -- cgit v1.2.3 From d90fab0deb8e580aa001f6876e4436c21e944f27 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 9 Sep 2022 17:46:58 +0800 Subject: crypto: hisilicon/qm - get error type from hardware registers Hardware V3 and later versions support get error type from registers. To be compatible with later hardware versions, get error type from registers instead of fixed marco. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 68 +++++++++++++++----- drivers/crypto/hisilicon/qm.c | 101 +++++++++++++----------------- drivers/crypto/hisilicon/sec2/sec.h | 11 ++++ drivers/crypto/hisilicon/sec2/sec_main.c | 51 ++++++++++----- drivers/crypto/hisilicon/zip/zip_main.c | 74 ++++++++++++++++------ 5 files changed, 202 insertions(+), 103 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index d484105b2716..36177c437c56 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -53,9 +53,7 @@ #define HPRE_CORE_IS_SCHD_OFFSET 0x90 #define HPRE_RAS_CE_ENB 0x301410 -#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23)) #define HPRE_RAS_NFE_ENB 0x301414 -#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe #define HPRE_RAS_FE_ENB 0x301418 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c #define HPRE_HAC_RAS_FE_ENABLE 0 @@ -147,6 +145,28 @@ static const char * const hpre_debug_file_name[] = { [HPRE_CLUSTER_CTRL] = "cluster_ctrl", }; +enum hpre_cap_type { + HPRE_QM_NFE_MASK_CAP, + HPRE_QM_RESET_MASK_CAP, + HPRE_QM_OOO_SHUTDOWN_MASK_CAP, + HPRE_QM_CE_MASK_CAP, + HPRE_NFE_MASK_CAP, + HPRE_RESET_MASK_CAP, + HPRE_OOO_SHUTDOWN_MASK_CAP, + HPRE_CE_MASK_CAP, +}; + +static const struct hisi_qm_cap_info hpre_basic_info[] = { + {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37}, + {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, + {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, + {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, + {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE}, + {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, + {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, + {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, +}; + static const struct hpre_hw_error hpre_hw_errors[] = { { .int_msk = BIT(0), @@ -630,7 +650,8 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); if (enable) { val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE; - val2 = HPRE_HAC_RAS_NFE_ENABLE; + val2 = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); } else { val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; val2 = 0x0; @@ -644,21 +665,30 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void hpre_hw_error_disable(struct hisi_qm *qm) { - /* disable hpre hw error interrupts */ - writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK); + u32 ce, nfe; + ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); + nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + + /* disable hpre hw error interrupts */ + writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK); /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */ hpre_master_ooo_ctrl(qm, false); } static void hpre_hw_error_enable(struct hisi_qm *qm) { + u32 ce, nfe; + + ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); + nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + /* clear HPRE hw error source if having */ - writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT); /* configure error type */ - writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); - writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB); + writel(ce, qm->io_base + HPRE_RAS_CE_ENB); + writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */ @@ -1125,7 +1155,11 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm) static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { + u32 nfe; + writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); + nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); } static void hpre_open_axi_master_ooo(struct hisi_qm *qm) @@ -1143,14 +1177,20 @@ static void hpre_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; - err_info->ce = QM_BASE_CE; - err_info->fe = 0; - err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | - HPRE_OOO_ECC_2BIT_ERR; - err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE; + err_info->fe = HPRE_HAC_RAS_FE_ENABLE; + err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); + err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); + err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; + err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_QM_RESET_MASK_CAP, qm->cap_ver); + err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, + HPRE_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HPRE_WR_MSI_PORT; err_info->acpi_rst = "HRST"; - err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT; } static const struct hisi_qm_err_ini hpre_err_ini = { diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e227a3e50600..f62e48ccef2b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -126,7 +126,6 @@ #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_ABNORMAL_INT_SOURCE 0x100000 -#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0) #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff #define QM_ABNORMAL_INT_STATUS 0x100008 @@ -144,8 +143,10 @@ #define QM_RAS_NFE_ENABLE 0x1000f4 #define QM_RAS_CE_THRESHOLD 0x1000f8 #define QM_RAS_CE_TIMES_PER_IRQ 1 -#define QM_RAS_MSI_INT_SEL 0x1040f4 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 +#define QM_ECC_MBIT BIT(2) +#define QM_DB_TIMEOUT BIT(10) +#define QM_OF_FIFO_OF BIT(11) #define QM_RESET_WAIT_TIMEOUT 400 #define QM_PEH_VENDOR_ID 0x1000d8 @@ -454,7 +455,7 @@ struct hisi_qm_hw_ops { u8 cmd, u16 index, u8 priority); u32 (*get_irq_num)(struct hisi_qm *qm); int (*debug_init)(struct hisi_qm *qm); - void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); + void (*hw_error_init)(struct hisi_qm *qm); void (*hw_error_uninit)(struct hisi_qm *qm); enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); int (*set_msi)(struct hisi_qm *qm, bool set); @@ -651,22 +652,17 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm) } /* Check if the error causes the master ooo block */ -static int qm_check_dev_error(struct hisi_qm *qm) +static bool qm_check_dev_error(struct hisi_qm *qm) { u32 val, dev_val; if (qm->fun_type == QM_HW_VF) - return 0; - - val = qm_get_hw_error_status(qm); - dev_val = qm_get_dev_err_status(qm); + return false; - if (qm->ver < QM_HW_V3) - return (val & QM_ECC_MBIT) || - (dev_val & qm->err_info.ecc_2bits_mask); + val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; + dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; - return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) || - (dev_val & (~qm->err_info.dev_ce_mask)); + return val || dev_val; } static int qm_wait_reset_finish(struct hisi_qm *qm) @@ -2346,58 +2342,65 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, file->debug = &qm->debug; } -static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_init_v1(struct hisi_qm *qm) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); } -static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_cfg(struct hisi_qm *qm) { - qm->error_mask = ce | nfe | fe; + struct hisi_qm_err_info *err_info = &qm->err_info; + + qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; /* clear QM hw residual error source */ - writel(QM_ABNORMAL_INT_SOURCE_CLR, - qm->io_base + QM_ABNORMAL_INT_SOURCE); + writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* configure error type */ - writel(ce, qm->io_base + QM_RAS_CE_ENABLE); + writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); - writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); - writel(fe, qm->io_base + QM_RAS_FE_ENABLE); + writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); } -static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_init_v2(struct hisi_qm *qm) { - u32 irq_enable = ce | nfe | fe; - u32 irq_unmask = ~irq_enable; + u32 irq_unmask; - qm_hw_error_cfg(qm, ce, nfe, fe); + qm_hw_error_cfg(qm); + irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_uninit_v2(struct hisi_qm *qm) { - writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); + u32 irq_mask = qm->error_mask; + + irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); + writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); } -static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) +static void qm_hw_error_init_v3(struct hisi_qm *qm) { - u32 irq_enable = ce | nfe | fe; - u32 irq_unmask = ~irq_enable; + u32 irq_unmask; - qm_hw_error_cfg(qm, ce, nfe, fe); + qm_hw_error_cfg(qm); /* enable close master ooo when hardware error happened */ - writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL); + writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); + irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_uninit_v3(struct hisi_qm *qm) { - writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); + u32 irq_mask = qm->error_mask; + + irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); + writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); /* disable close master ooo when hardware error happened */ writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); @@ -2442,7 +2445,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { - u32 error_status, tmp, val; + u32 error_status, tmp; /* read err sts */ tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); @@ -2453,17 +2456,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); - val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE; - /* ce error does not need to be reset */ - if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) { - writel(error_status, qm->io_base + - QM_ABNORMAL_INT_SOURCE); - writel(qm->err_info.nfe, - qm->io_base + QM_RAS_NFE_ENABLE); - return ACC_ERR_RECOVERED; - } + if (error_status & qm->err_info.qm_reset_mask) + return ACC_ERR_NEED_RESET; - return ACC_ERR_NEED_RESET; + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); } return ACC_ERR_RECOVERED; @@ -4202,14 +4199,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, static void qm_hw_error_init(struct hisi_qm *qm) { - struct hisi_qm_err_info *err_info = &qm->err_info; - if (!qm->ops->hw_error_init) { dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); return; } - qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); + qm->ops->hw_error_init(qm); } static void qm_hw_error_uninit(struct hisi_qm *qm) @@ -4963,17 +4958,11 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) if (qm->err_ini->log_dev_hw_err) qm->err_ini->log_dev_hw_err(qm, err_sts); - /* ce error does not need to be reset */ - if ((err_sts | qm->err_info.dev_ce_mask) == - qm->err_info.dev_ce_mask) { - if (qm->err_ini->clear_dev_hw_err_status) - qm->err_ini->clear_dev_hw_err_status(qm, - err_sts); - - return ACC_ERR_RECOVERED; - } + if (err_sts & qm->err_info.dev_reset_mask) + return ACC_ERR_NEED_RESET; - return ACC_ERR_NEED_RESET; + if (qm->err_ini->clear_dev_hw_err_status) + qm->err_ini->clear_dev_hw_err_status(qm, err_sts); } return ACC_ERR_RECOVERED; diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 04e034abc5e8..895ba9b47554 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -192,6 +192,17 @@ struct sec_dev { bool iommu_used; }; +enum sec_cap_type { + SEC_QM_NFE_MASK_CAP = 0x0, + SEC_QM_RESET_MASK_CAP, + SEC_QM_OOO_SHUTDOWN_MASK_CAP, + SEC_QM_CE_MASK_CAP, + SEC_NFE_MASK_CAP, + SEC_RESET_MASK_CAP, + SEC_OOO_SHUTDOWN_MASK_CAP, + SEC_CE_MASK_CAP, +}; + void sec_destroy_qps(struct hisi_qp **qps, int qp_num); struct hisi_qp **sec_create_qps(void); int sec_register_to_crypto(struct hisi_qm *qm); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index ea7f85f72b10..e814f94dd0d4 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -41,16 +41,12 @@ #define SEC_ECC_NUM 16 #define SEC_ECC_MASH 0xFF #define SEC_CORE_INT_DISABLE 0x0 -#define SEC_CORE_INT_ENABLE 0x7c1ff -#define SEC_CORE_INT_CLEAR 0x7c1ff #define SEC_SAA_ENABLE 0x17f #define SEC_RAS_CE_REG 0x301050 #define SEC_RAS_FE_REG 0x301054 #define SEC_RAS_NFE_REG 0x301058 -#define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 -#define SEC_RAS_NFE_ENB_MSK 0x7c177 #define SEC_OOO_SHUTDOWN_SEL 0x301014 #define SEC_RAS_DISABLE 0x0 #define SEC_MEM_START_INIT_REG 0x301100 @@ -136,6 +132,17 @@ static struct hisi_qm_list sec_devices = { .unregister_from_crypto = sec_unregister_from_crypto, }; +static const struct hisi_qm_cap_info sec_basic_info[] = { + {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77}, + {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77}, + {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77}, + {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, + {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177}, + {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177}, + {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177}, + {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088}, +}; + static const struct sec_hw_error sec_hw_errors[] = { { .int_msk = BIT(0), @@ -575,7 +582,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + SEC_CONTROL_REG); if (enable) { val1 |= SEC_AXI_SHUTDOWN_ENABLE; - val2 = SEC_RAS_NFE_ENB_MSK; + val2 = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); } else { val1 &= SEC_AXI_SHUTDOWN_DISABLE; val2 = 0x0; @@ -589,25 +597,30 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void sec_hw_error_enable(struct hisi_qm *qm) { + u32 ce, nfe; + if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); pci_info(qm->pdev, "V1 not support hw error handle\n"); return; } + ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); + nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); + /* clear SEC hw error source if having */ - writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); + writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE); /* enable RAS int */ - writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); + writel(ce, qm->io_base + SEC_RAS_CE_REG); writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); - writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); + writel(nfe, qm->io_base + SEC_RAS_NFE_REG); /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ sec_master_ooo_ctrl(qm, true); /* enable SEC hw error interrupts */ - writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); + writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK); } static void sec_hw_error_disable(struct hisi_qm *qm) @@ -938,7 +951,11 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm) static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { + u32 nfe; + writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); + nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); + writel(nfe, qm->io_base + SEC_RAS_NFE_REG); } static void sec_open_axi_master_ooo(struct hisi_qm *qm) @@ -954,14 +971,20 @@ static void sec_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; - err_info->ce = QM_BASE_CE; - err_info->fe = 0; + err_info->fe = SEC_RAS_FE_ENB_MSK; + err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); + err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; - err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; + err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_QM_RESET_MASK_CAP, qm->cap_ver); + err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, + SEC_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = BIT(0); err_info->acpi_rst = "SRST"; - err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | - QM_ACC_WB_NOT_READY_TIMEOUT; } static const struct hisi_qm_err_ini sec_err_ini = { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 24d5226e0a0a..6ed10ec05a73 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -69,11 +69,10 @@ #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160 -#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 +#define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0 #define HZIP_OOO_SHUTDOWN_SEL 0x30120C -#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) @@ -186,6 +185,28 @@ struct hisi_zip_ctrl { struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; }; +enum zip_cap_type { + ZIP_QM_NFE_MASK_CAP = 0x0, + ZIP_QM_RESET_MASK_CAP, + ZIP_QM_OOO_SHUTDOWN_MASK_CAP, + ZIP_QM_CE_MASK_CAP, + ZIP_NFE_MASK_CAP, + ZIP_RESET_MASK_CAP, + ZIP_OOO_SHUTDOWN_MASK_CAP, + ZIP_CE_MASK_CAP, +}; + +static struct hisi_qm_cap_info zip_basic_cap_info[] = { + {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77}, + {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77}, + {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77}, + {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, + {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE}, + {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE}, + {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE}, + {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, +}; + enum { HZIP_COMP_CORE0, HZIP_COMP_CORE1, @@ -457,7 +478,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); if (enable) { val1 |= HZIP_AXI_SHUTDOWN_ENABLE; - val2 = HZIP_CORE_INT_RAS_NFE_ENABLE; + val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); } else { val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE; val2 = 0x0; @@ -471,6 +493,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) static void hisi_zip_hw_error_enable(struct hisi_qm *qm) { + u32 nfe, ce; + if (qm->ver == QM_HW_V1) { writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); @@ -478,17 +502,17 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm) return; } + nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); + /* clear ZIP hw error source if having */ - writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE); + writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE); /* configure error type */ - writel(HZIP_CORE_INT_RAS_CE_ENABLE, - qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); - writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); - writel(HZIP_CORE_INT_RAS_NFE_ENABLE, - qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); + writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); + writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); - /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */ hisi_zip_master_ooo_ctrl(qm, true); /* enable ZIP hw error interrupts */ @@ -497,10 +521,13 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm) static void hisi_zip_hw_error_disable(struct hisi_qm *qm) { + u32 nfe, ce; + /* disable ZIP hw error interrupts */ - writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); + nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); + writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG); - /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */ hisi_zip_master_ooo_ctrl(qm, false); } @@ -900,7 +927,11 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { + u32 nfe; + writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); + nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); } static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) @@ -934,16 +965,21 @@ static void hisi_zip_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; - err_info->ce = QM_BASE_CE; - err_info->fe = 0; + err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; + err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver); + err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_NFE_MASK_CAP, qm->cap_ver); err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; - err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE; + err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); + err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_QM_RESET_MASK_CAP, qm->cap_ver); + err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HZIP_WR_PORT; err_info->acpi_rst = "ZRST"; - err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT; - - if (qm->ver >= QM_HW_V3) - err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT; } static const struct hisi_qm_err_ini hisi_zip_err_ini = { -- cgit v1.2.3 From 3536cc55cadaf2a03241915f9cfdaf6cd073e4fe Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 9 Sep 2022 17:46:59 +0800 Subject: crypto: hisilicon/qm - support get device irq information from hardware registers Support get device irq information from hardware registers instead of fixed macros. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 294 ++++++++++++++++++++++++++++-------------- 1 file changed, 195 insertions(+), 99 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f62e48ccef2b..e4a506d02d23 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -22,15 +22,11 @@ #define QM_VF_AEQ_INT_MASK 0x4 #define QM_VF_EQ_INT_SOURCE 0x8 #define QM_VF_EQ_INT_MASK 0xc -#define QM_IRQ_NUM_V1 1 -#define QM_IRQ_NUM_PF_V2 4 -#define QM_IRQ_NUM_VF_V2 2 -#define QM_IRQ_NUM_VF_V3 3 -#define QM_EQ_EVENT_IRQ_VECTOR 0 -#define QM_AEQ_EVENT_IRQ_VECTOR 1 -#define QM_CMD_EVENT_IRQ_VECTOR 2 -#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 +#define QM_IRQ_VECTOR_MASK GENMASK(15, 0) +#define QM_IRQ_TYPE_MASK GENMASK(15, 0) +#define QM_IRQ_TYPE_SHIFT 16 +#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) /* mailbox */ #define QM_MB_PING_ALL_VFS 0xffff @@ -336,6 +332,12 @@ enum qm_basic_type { QM_FUNC_MAX_QP_CAP, QM_XEQ_DEPTH_CAP, QM_QP_DEPTH_CAP, + QM_EQ_IRQ_TYPE_CAP, + QM_AEQ_IRQ_TYPE_CAP, + QM_ABN_IRQ_TYPE_CAP, + QM_PF2VF_IRQ_TYPE_CAP, + QM_PF_IRQ_NUM_CAP, + QM_VF_IRQ_NUM_CAP, }; static const struct hisi_qm_cap_info qm_cap_info_comm[] = { @@ -359,6 +361,12 @@ static const struct hisi_qm_cap_info qm_basic_info[] = { {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800}, {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, + {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, + {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, + {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, + {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, + {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, + {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, }; struct qm_cqe { @@ -453,7 +461,6 @@ struct hisi_qm_hw_ops { int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); void (*qm_db)(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority); - u32 (*get_irq_num)(struct hisi_qm *qm); int (*debug_init)(struct hisi_qm *qm); void (*hw_error_init)(struct hisi_qm *qm); void (*hw_error_uninit)(struct hisi_qm *qm); @@ -562,6 +569,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = { {50100, 100000, 19} }; +static void qm_irqs_unregister(struct hisi_qm *qm); + static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) { enum qm_state curr = atomic_read(&qm->status.flags); @@ -904,25 +913,12 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; } -static u32 qm_get_irq_num_v1(struct hisi_qm *qm) -{ - return QM_IRQ_NUM_V1; -} - -static u32 qm_get_irq_num_v2(struct hisi_qm *qm) -{ - if (qm->fun_type == QM_HW_PF) - return QM_IRQ_NUM_PF_V2; - else - return QM_IRQ_NUM_VF_V2; -} - -static u32 qm_get_irq_num_v3(struct hisi_qm *qm) +static u32 qm_get_irq_num(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) - return QM_IRQ_NUM_PF_V2; + return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); - return QM_IRQ_NUM_VF_V3; + return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); } static int qm_pm_get_sync(struct hisi_qm *qm) @@ -1196,24 +1192,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) return IRQ_WAKE_THREAD; } -static void qm_irq_unregister(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - - if (qm->ver > QM_HW_V1) { - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); - - if (qm->fun_type == QM_HW_PF) - free_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); - } - - if (qm->ver > QM_HW_V2) - free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm); -} - static void qm_init_qp_status(struct hisi_qp *qp) { struct hisi_qp_status *qp_status = &qp->qp_status; @@ -2799,7 +2777,6 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set) static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { .qm_db = qm_db_v1, - .get_irq_num = qm_get_irq_num_v1, .hw_error_init = qm_hw_error_init_v1, .set_msi = qm_set_msi, }; @@ -2807,7 +2784,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { .get_vft = qm_get_vft_v2, .qm_db = qm_db_v2, - .get_irq_num = qm_get_irq_num_v2, .hw_error_init = qm_hw_error_init_v2, .hw_error_uninit = qm_hw_error_uninit_v2, .hw_error_handle = qm_hw_error_handle_v2, @@ -2817,7 +2793,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { .get_vft = qm_get_vft_v2, .qm_db = qm_db_v2, - .get_irq_num = qm_get_irq_num_v3, .hw_error_init = qm_hw_error_init_v3, .hw_error_uninit = qm_hw_error_uninit_v3, .hw_error_handle = qm_hw_error_handle_v2, @@ -3803,7 +3778,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); - qm_irq_unregister(qm); + qm_irqs_unregister(qm); hisi_qm_pci_uninit(qm); if (qm->use_sva) { uacce_remove(qm->uacce); @@ -5658,51 +5633,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data) return IRQ_HANDLED; } -static int qm_irq_register(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - int ret; - - ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), - qm_irq, 0, qm->dev_name, qm); - if (ret) - return ret; - - if (qm->ver > QM_HW_V1) { - ret = request_threaded_irq(pci_irq_vector(pdev, - QM_AEQ_EVENT_IRQ_VECTOR), - qm_aeq_irq, qm_aeq_thread, - 0, qm->dev_name, qm); - if (ret) - goto err_aeq_irq; - - if (qm->fun_type == QM_HW_PF) { - ret = request_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), - qm_abnormal_irq, 0, qm->dev_name, qm); - if (ret) - goto err_abonormal_irq; - } - } - - if (qm->ver > QM_HW_V2) { - ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), - qm_mb_cmd_irq, 0, qm->dev_name, qm); - if (ret) - goto err_mb_cmd_irq; - } - - return 0; - -err_mb_cmd_irq: - if (qm->fun_type == QM_HW_PF) - free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); -err_abonormal_irq: - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); -err_aeq_irq: - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - return ret; -} /** * hisi_qm_dev_shutdown() - Shutdown device. @@ -5983,6 +5913,176 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); +static void qm_unregister_abnormal_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + if (qm->fun_type == QM_HW_VF) + return; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_abnormal_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); + if (ret) + dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); + + return ret; +} + +static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_mb_cmd_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); + if (ret) + dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); + + return ret; +} + +static void qm_unregister_aeq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_aeq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, + qm_aeq_thread, 0, qm->dev_name, qm); + if (ret) + dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + + return ret; +} + +static void qm_unregister_eq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + free_irq(pci_irq_vector(pdev, irq_vector), qm); +} + +static int qm_register_eq_irq(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + u32 irq_vector, val; + int ret; + + val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); + if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) + return 0; + + irq_vector = val & QM_IRQ_VECTOR_MASK; + ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm); + if (ret) + dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); + + return ret; +} + +static void qm_irqs_unregister(struct hisi_qm *qm) +{ + qm_unregister_mb_cmd_irq(qm); + qm_unregister_abnormal_irq(qm); + qm_unregister_aeq_irq(qm); + qm_unregister_eq_irq(qm); +} + +static int qm_irqs_register(struct hisi_qm *qm) +{ + int ret; + + ret = qm_register_eq_irq(qm); + if (ret) + return ret; + + ret = qm_register_aeq_irq(qm); + if (ret) + goto free_eq_irq; + + ret = qm_register_abnormal_irq(qm); + if (ret) + goto free_aeq_irq; + + ret = qm_register_mb_cmd_irq(qm); + if (ret) + goto free_abnormal_irq; + + return 0; + +free_abnormal_irq: + qm_unregister_abnormal_irq(qm); +free_aeq_irq: + qm_unregister_aeq_irq(qm); +free_eq_irq: + qm_unregister_eq_irq(qm); + return ret; +} + static int qm_get_qp_num(struct hisi_qm *qm) { bool is_db_isolation; @@ -6117,11 +6217,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_get_pci_res; pci_set_master(pdev); - if (!qm->ops->get_irq_num) { - ret = -EOPNOTSUPP; - goto err_get_pci_res; - } - num_vec = qm->ops->get_irq_num(qm); + num_vec = qm_get_irq_num(qm); ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); if (ret < 0) { dev_err(dev, "Failed to enable MSI vectors!\n"); @@ -6294,7 +6390,7 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) return ret; - ret = qm_irq_register(qm); + ret = qm_irqs_register(qm); if (ret) goto err_pci_init; @@ -6336,7 +6432,7 @@ err_alloc_uacce: qm->uacce = NULL; } err_irq_register: - qm_irq_unregister(qm); + qm_irqs_unregister(qm); err_pci_init: hisi_qm_pci_uninit(qm); return ret; -- cgit v1.2.3 From f214d59a0603543cfa7c6c1cf2eb130ac77480c3 Mon Sep 17 00:00:00 2001 From: Zhiqi Song Date: Fri, 9 Sep 2022 17:47:00 +0800 Subject: crypto: hisilicon/hpre - support hpre capability Read some hpre device configuration info from capability register, instead of fixed macros. Signed-off-by: Zhiqi Song Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre.h | 8 +- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 134 ++++++++++++++++++++++------ drivers/crypto/hisilicon/hpre/hpre_main.c | 53 +++++++++-- 3 files changed, 157 insertions(+), 38 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 9a0558ed82f9..9f0b94c8e03d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -22,7 +22,8 @@ enum { HPRE_CLUSTER0, HPRE_CLUSTER1, HPRE_CLUSTER2, - HPRE_CLUSTER3 + HPRE_CLUSTER3, + HPRE_CLUSTERS_NUM_MAX }; enum hpre_ctrl_dbgfs_file { @@ -42,9 +43,6 @@ enum hpre_dfx_dbgfs_file { HPRE_DFX_FILE_NUM }; -#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1) -#define HPRE_CLUSTERS_NUM_V3 1 -#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2 #define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1) struct hpre_debugfs_file { @@ -105,5 +103,5 @@ struct hpre_sqe { struct hisi_qp *hpre_create_qp(u8 type); int hpre_algs_register(struct hisi_qm *qm); void hpre_algs_unregister(struct hisi_qm *qm); - +bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg); #endif diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 2aa91a4f77da..ad1475feb313 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -51,6 +51,12 @@ struct hpre_ctx; #define HPRE_ECC_HW256_KSZ_B 32 #define HPRE_ECC_HW384_KSZ_B 48 +/* capability register mask */ +#define HPRE_DRV_RSA_MASK_CAP BIT(0) +#define HPRE_DRV_DH_MASK_CAP BIT(1) +#define HPRE_DRV_ECDH_MASK_CAP BIT(2) +#define HPRE_DRV_X25519_MASK_CAP BIT(5) + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -2070,22 +2076,75 @@ static struct kpp_alg curve25519_alg = { }, }; +static int hpre_register_rsa(struct hisi_qm *qm) +{ + int ret; + + if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) + return 0; -static int hpre_register_ecdh(void) + rsa.base.cra_flags = 0; + ret = crypto_register_akcipher(&rsa); + if (ret) + dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret); + + return ret; +} + +static void hpre_unregister_rsa(struct hisi_qm *qm) +{ + if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) + return; + + crypto_unregister_akcipher(&rsa); +} + +static int hpre_register_dh(struct hisi_qm *qm) { int ret; - ret = crypto_register_kpp(&ecdh_nist_p192); + if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) + return 0; + + ret = crypto_register_kpp(&dh); if (ret) + dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret); + + return ret; +} + +static void hpre_unregister_dh(struct hisi_qm *qm) +{ + if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) + return; + + crypto_unregister_kpp(&dh); +} + +static int hpre_register_ecdh(struct hisi_qm *qm) +{ + int ret; + + if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) + return 0; + + ret = crypto_register_kpp(&ecdh_nist_p192); + if (ret) { + dev_err(&qm->pdev->dev, "failed to register ecdh_nist_p192 (%d)!\n", ret); return ret; + } ret = crypto_register_kpp(&ecdh_nist_p256); - if (ret) + if (ret) { + dev_err(&qm->pdev->dev, "failed to register ecdh_nist_p256 (%d)!\n", ret); goto unregister_ecdh_p192; + } ret = crypto_register_kpp(&ecdh_nist_p384); - if (ret) + if (ret) { + dev_err(&qm->pdev->dev, "failed to register ecdh_nist_p384 (%d)!\n", ret); goto unregister_ecdh_p256; + } return 0; @@ -2096,52 +2155,73 @@ unregister_ecdh_p192: return ret; } -static void hpre_unregister_ecdh(void) +static void hpre_unregister_ecdh(struct hisi_qm *qm) { + if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) + return; + crypto_unregister_kpp(&ecdh_nist_p384); crypto_unregister_kpp(&ecdh_nist_p256); crypto_unregister_kpp(&ecdh_nist_p192); } +static int hpre_register_x25519(struct hisi_qm *qm) +{ + int ret; + + if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) + return 0; + + ret = crypto_register_kpp(&curve25519_alg); + if (ret) + dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret); + + return ret; +} + +static void hpre_unregister_x25519(struct hisi_qm *qm) +{ + if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) + return; + + crypto_unregister_kpp(&curve25519_alg); +} + int hpre_algs_register(struct hisi_qm *qm) { int ret; - rsa.base.cra_flags = 0; - ret = crypto_register_akcipher(&rsa); + ret = hpre_register_rsa(qm); if (ret) return ret; - ret = crypto_register_kpp(&dh); + ret = hpre_register_dh(qm); if (ret) goto unreg_rsa; - if (qm->ver >= QM_HW_V3) { - ret = hpre_register_ecdh(); - if (ret) - goto unreg_dh; - ret = crypto_register_kpp(&curve25519_alg); - if (ret) - goto unreg_ecdh; - } - return 0; + ret = hpre_register_ecdh(qm); + if (ret) + goto unreg_dh; + + ret = hpre_register_x25519(qm); + if (ret) + goto unreg_ecdh; + + return ret; unreg_ecdh: - hpre_unregister_ecdh(); + hpre_unregister_ecdh(qm); unreg_dh: - crypto_unregister_kpp(&dh); + hpre_unregister_dh(qm); unreg_rsa: - crypto_unregister_akcipher(&rsa); + hpre_unregister_rsa(qm); return ret; } void hpre_algs_unregister(struct hisi_qm *qm) { - if (qm->ver >= QM_HW_V3) { - crypto_unregister_kpp(&curve25519_alg); - hpre_unregister_ecdh(); - } - - crypto_unregister_kpp(&dh); - crypto_unregister_akcipher(&rsa); + hpre_unregister_x25519(qm); + hpre_unregister_ecdh(qm); + hpre_unregister_dh(qm); + hpre_unregister_rsa(qm); } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 36177c437c56..407cdd9d8413 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -77,8 +77,6 @@ #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0) #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0) #define HPRE_BD_USR_MASK GENMASK(1, 0) -#define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0) -#define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0) #define HPRE_PREFETCH_CFG 0x301130 #define HPRE_SVA_PREFTCH_DFX 0x30115C #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30))) @@ -154,6 +152,23 @@ enum hpre_cap_type { HPRE_RESET_MASK_CAP, HPRE_OOO_SHUTDOWN_MASK_CAP, HPRE_CE_MASK_CAP, + HPRE_CLUSTER_NUM_CAP, + HPRE_CORE_TYPE_NUM_CAP, + HPRE_CORE_NUM_CAP, + HPRE_CLUSTER_CORE_NUM_CAP, + HPRE_CORE_ENABLE_BITMAP_CAP, + HPRE_DRV_ALG_BITMAP_CAP, + HPRE_DEV_ALG_BITMAP_CAP, + HPRE_CORE1_ALG_BITMAP_CAP, + HPRE_CORE2_ALG_BITMAP_CAP, + HPRE_CORE3_ALG_BITMAP_CAP, + HPRE_CORE4_ALG_BITMAP_CAP, + HPRE_CORE5_ALG_BITMAP_CAP, + HPRE_CORE6_ALG_BITMAP_CAP, + HPRE_CORE7_ALG_BITMAP_CAP, + HPRE_CORE8_ALG_BITMAP_CAP, + HPRE_CORE9_ALG_BITMAP_CAP, + HPRE_CORE10_ALG_BITMAP_CAP }; static const struct hisi_qm_cap_info hpre_basic_info[] = { @@ -165,6 +180,23 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, + {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, + {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, + {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA}, + {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA}, + {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF}, + {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27}, + {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F}, + {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, + {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}, + {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10} }; static const struct hpre_hw_error hpre_hw_errors[] = { @@ -282,6 +314,17 @@ static struct dfx_diff_registers hpre_diff_regs[] = { }, }; +bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) +{ + u32 cap_val; + + cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver); + if (alg & cap_val) + return true; + + return false; +} + static int hpre_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; @@ -350,14 +393,12 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); static inline int hpre_cluster_num(struct hisi_qm *qm) { - return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 : - HPRE_CLUSTERS_NUM_V2; + return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver); } static inline int hpre_cluster_core_mask(struct hisi_qm *qm) { - return (qm->ver >= QM_HW_V3) ? - HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2; + return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver); } struct hisi_qp *hpre_create_qp(u8 type) -- cgit v1.2.3 From b1be70a8c983a5bd12d88181b75d0f550086cb44 Mon Sep 17 00:00:00 2001 From: Zhiqi Song Date: Fri, 9 Sep 2022 17:47:01 +0800 Subject: crypto: hisilicon/hpre - optimize registration of ecdh Use table to store the different ecdh curve configuration, making the registration of ecdh clearer and expansion more convenient. Signed-off-by: Zhiqi Song Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 136 +++++++++++++--------------- 1 file changed, 63 insertions(+), 73 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index ad1475feb313..ac7fabf65865 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -2008,55 +2008,53 @@ static struct kpp_alg dh = { }, }; -static struct kpp_alg ecdh_nist_p192 = { - .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, - .max_size = hpre_ecdh_max_size, - .init = hpre_ecdh_nist_p192_init_tfm, - .exit = hpre_ecdh_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, - .base = { - .cra_ctxsize = sizeof(struct hpre_ctx), - .cra_priority = HPRE_CRYPTO_ALG_PRI, - .cra_name = "ecdh-nist-p192", - .cra_driver_name = "hpre-ecdh-nist-p192", - .cra_module = THIS_MODULE, - }, -}; - -static struct kpp_alg ecdh_nist_p256 = { - .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, - .max_size = hpre_ecdh_max_size, - .init = hpre_ecdh_nist_p256_init_tfm, - .exit = hpre_ecdh_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, - .base = { - .cra_ctxsize = sizeof(struct hpre_ctx), - .cra_priority = HPRE_CRYPTO_ALG_PRI, - .cra_name = "ecdh-nist-p256", - .cra_driver_name = "hpre-ecdh-nist-p256", - .cra_module = THIS_MODULE, - }, -}; - -static struct kpp_alg ecdh_nist_p384 = { - .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, - .max_size = hpre_ecdh_max_size, - .init = hpre_ecdh_nist_p384_init_tfm, - .exit = hpre_ecdh_exit_tfm, - .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, - .base = { - .cra_ctxsize = sizeof(struct hpre_ctx), - .cra_priority = HPRE_CRYPTO_ALG_PRI, - .cra_name = "ecdh-nist-p384", - .cra_driver_name = "hpre-ecdh-nist-p384", - .cra_module = THIS_MODULE, - }, +static struct kpp_alg ecdh_curves[] = { + { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p192_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p192", + .cra_driver_name = "hpre-ecdh-nist-p192", + .cra_module = THIS_MODULE, + }, + }, { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p256_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "hpre-ecdh-nist-p256", + .cra_module = THIS_MODULE, + }, + }, { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p384_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p384", + .cra_driver_name = "hpre-ecdh-nist-p384", + .cra_module = THIS_MODULE, + }, + } }; static struct kpp_alg curve25519_alg = { @@ -2123,46 +2121,38 @@ static void hpre_unregister_dh(struct hisi_qm *qm) static int hpre_register_ecdh(struct hisi_qm *qm) { - int ret; + int ret, i; if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) return 0; - ret = crypto_register_kpp(&ecdh_nist_p192); - if (ret) { - dev_err(&qm->pdev->dev, "failed to register ecdh_nist_p192 (%d)!\n", ret); - return ret; - } - - ret = crypto_register_kpp(&ecdh_nist_p256); - if (ret) { - dev_err(&qm->pdev->dev, "failed to register ecdh_nist_p256 (%d)!\n", ret); - goto unregister_ecdh_p192; - } - - ret = crypto_register_kpp(&ecdh_nist_p384); - if (ret) { - dev_err(&qm->pdev->dev, "failed to register ecdh_nist_p384 (%d)!\n", ret); - goto unregister_ecdh_p256; + for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) { + ret = crypto_register_kpp(&ecdh_curves[i]); + if (ret) { + dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n", + ecdh_curves[i].base.cra_name, ret); + goto unreg_kpp; + } } return 0; -unregister_ecdh_p256: - crypto_unregister_kpp(&ecdh_nist_p256); -unregister_ecdh_p192: - crypto_unregister_kpp(&ecdh_nist_p192); +unreg_kpp: + for (--i; i >= 0; --i) + crypto_unregister_kpp(&ecdh_curves[i]); + return ret; } static void hpre_unregister_ecdh(struct hisi_qm *qm) { + int i; + if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) return; - crypto_unregister_kpp(&ecdh_nist_p384); - crypto_unregister_kpp(&ecdh_nist_p256); - crypto_unregister_kpp(&ecdh_nist_p192); + for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i) + crypto_unregister_kpp(&ecdh_curves[i]); } static int hpre_register_x25519(struct hisi_qm *qm) -- cgit v1.2.3 From db700974b69d2c12a8fe84c45820892416a1e265 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Fri, 9 Sep 2022 17:47:02 +0800 Subject: crypto: hisilicon/zip - support zip capability Add function 'hisi_zip_alg_support' to get device configuration information from capability registers, instead of determining whether to register an algorithm based on hardware platform's version. Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip.h | 1 + drivers/crypto/hisilicon/zip/zip_crypto.c | 67 ++++++++++++++++---- drivers/crypto/hisilicon/zip/zip_main.c | 102 +++++++++++++++++++++--------- 3 files changed, 128 insertions(+), 42 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index f289656e9ac0..f2e6da3240ae 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -84,4 +84,5 @@ struct hisi_zip_sqe { int zip_create_qps(struct hisi_qp **qps, int qp_num, int node); int hisi_zip_register_to_crypto(struct hisi_qm *qm); void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); +bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index a7f6884c3ab3..6608971d10cd 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -39,6 +39,9 @@ #define HZIP_ALG_PRIORITY 300 #define HZIP_SGL_SGE_NR 10 +#define HZIP_ALG_ZLIB GENMASK(1, 0) +#define HZIP_ALG_GZIP GENMASK(3, 2) + static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = { 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03 @@ -756,6 +759,28 @@ static struct acomp_alg hisi_zip_acomp_zlib = { } }; +static int hisi_zip_register_zlib(struct hisi_qm *qm) +{ + int ret; + + if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) + return 0; + + ret = crypto_register_acomp(&hisi_zip_acomp_zlib); + if (ret) + dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret); + + return ret; +} + +static void hisi_zip_unregister_zlib(struct hisi_qm *qm) +{ + if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) + return; + + crypto_unregister_acomp(&hisi_zip_acomp_zlib); +} + static struct acomp_alg hisi_zip_acomp_gzip = { .init = hisi_zip_acomp_init, .exit = hisi_zip_acomp_exit, @@ -770,27 +795,45 @@ static struct acomp_alg hisi_zip_acomp_gzip = { } }; -int hisi_zip_register_to_crypto(struct hisi_qm *qm) +static int hisi_zip_register_gzip(struct hisi_qm *qm) { int ret; - ret = crypto_register_acomp(&hisi_zip_acomp_zlib); - if (ret) { - pr_err("failed to register to zlib (%d)!\n", ret); - return ret; - } + if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) + return 0; ret = crypto_register_acomp(&hisi_zip_acomp_gzip); - if (ret) { - pr_err("failed to register to gzip (%d)!\n", ret); - crypto_unregister_acomp(&hisi_zip_acomp_zlib); - } + if (ret) + dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret); return ret; } -void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) +static void hisi_zip_unregister_gzip(struct hisi_qm *qm) { + if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) + return; + crypto_unregister_acomp(&hisi_zip_acomp_gzip); - crypto_unregister_acomp(&hisi_zip_acomp_zlib); +} + +int hisi_zip_register_to_crypto(struct hisi_qm *qm) +{ + int ret = 0; + + ret = hisi_zip_register_zlib(qm); + if (ret) + return ret; + + ret = hisi_zip_register_gzip(qm); + if (ret) + hisi_zip_unregister_zlib(qm); + + return ret; +} + +void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) +{ + hisi_zip_unregister_zlib(qm); + hisi_zip_unregister_gzip(qm); } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 6ed10ec05a73..a8aedddac67a 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -20,18 +20,6 @@ #define HZIP_QUEUE_NUM_V1 4096 #define HZIP_CLOCK_GATE_CTRL 0x301004 -#define COMP0_ENABLE BIT(0) -#define COMP1_ENABLE BIT(1) -#define DECOMP0_ENABLE BIT(2) -#define DECOMP1_ENABLE BIT(3) -#define DECOMP2_ENABLE BIT(4) -#define DECOMP3_ENABLE BIT(5) -#define DECOMP4_ENABLE BIT(6) -#define DECOMP5_ENABLE BIT(7) -#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \ - DECOMP0_ENABLE | DECOMP1_ENABLE | \ - DECOMP2_ENABLE | DECOMP3_ENABLE | \ - DECOMP4_ENABLE | DECOMP5_ENABLE) #define HZIP_DECOMP_CHECK_ENABLE BIT(16) #define HZIP_FSM_MAX_CNT 0x301008 @@ -76,10 +64,6 @@ #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) -#define HZIP_COMP_CORE_NUM 2 -#define HZIP_DECOMP_CORE_NUM 6 -#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ - HZIP_DECOMP_CORE_NUM) #define HZIP_SQE_SIZE 128 #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 @@ -194,6 +178,21 @@ enum zip_cap_type { ZIP_RESET_MASK_CAP, ZIP_OOO_SHUTDOWN_MASK_CAP, ZIP_CE_MASK_CAP, + ZIP_CLUSTER_NUM_CAP, + ZIP_CORE_TYPE_NUM_CAP, + ZIP_CORE_NUM_CAP, + ZIP_CLUSTER_COMP_NUM_CAP, + ZIP_CLUSTER_DECOMP_NUM_CAP, + ZIP_DECOMP_ENABLE_BITMAP, + ZIP_COMP_ENABLE_BITMAP, + ZIP_DRV_ALG_BITMAP, + ZIP_DEV_ALG_BITMAP, + ZIP_CORE1_ALG_BITMAP, + ZIP_CORE2_ALG_BITMAP, + ZIP_CORE3_ALG_BITMAP, + ZIP_CORE4_ALG_BITMAP, + ZIP_CORE5_ALG_BITMAP, + ZIP_CAP_MAX }; static struct hisi_qm_cap_info zip_basic_cap_info[] = { @@ -205,6 +204,21 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = { {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE}, {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE}, {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, + {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1}, + {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2}, + {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5}, + {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2}, + {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3}, + {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C}, + {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3}, + {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF}, + {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF}, + {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, + {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, + {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, + {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, + {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, + {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0} }; enum { @@ -363,6 +377,17 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } +bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) +{ + u32 cap_val; + + cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver); + if ((alg & cap_val) == alg) + return true; + + return false; +} + static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) { u32 val; @@ -421,6 +446,7 @@ static void hisi_zip_enable_clock_gate(struct hisi_qm *qm) static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) { void __iomem *base = qm->io_base; + u32 dcomp_bm, comp_bm; /* qm user domain */ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); @@ -458,8 +484,11 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) } /* let's open all compression/decompression cores */ - writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN, - base + HZIP_CLOCK_GATE_CTRL); + dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver); + comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + ZIP_COMP_ENABLE_BITMAP, qm->cap_ver); + writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL); /* enable sqc,cqc writeback */ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | @@ -678,18 +707,23 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs); static int hisi_zip_core_debug_init(struct hisi_qm *qm) { + u32 zip_core_num, zip_comp_core_num; struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; struct dentry *tmp_d; char buf[HZIP_BUF_SIZE]; int i; - for (i = 0; i < HZIP_CORE_NUM; i++) { - if (i < HZIP_COMP_CORE_NUM) + zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); + zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, + qm->cap_ver); + + for (i = 0; i < zip_core_num; i++) { + if (i < zip_comp_core_num) scnprintf(buf, sizeof(buf), "comp_core%d", i); else scnprintf(buf, sizeof(buf), "decomp_core%d", - i - HZIP_COMP_CORE_NUM); + i - zip_comp_core_num); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) @@ -702,7 +736,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm) tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); debugfs_create_file("regs", 0444, tmp_d, regset, - &hisi_zip_regs_fops); + &hisi_zip_regs_fops); } return 0; @@ -822,10 +856,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); struct qm_debug *debug = &qm->debug; void __iomem *io_base; + u32 zip_core_num; int i, j, idx; - debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM + - com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); + zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); + + debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num, + sizeof(unsigned int), GFP_KERNEL); if (!debug->last_words) return -ENOMEM; @@ -834,7 +871,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) debug->last_words[i] = readl_relaxed(io_base); } - for (i = 0; i < HZIP_CORE_NUM; i++) { + for (i = 0; i < zip_core_num; i++) { io_base = qm->io_base + core_offsets[i]; for (j = 0; j < core_dfx_regs_num; j++) { idx = com_dfx_regs_num + i * core_dfx_regs_num + j; @@ -861,6 +898,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) { int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); + u32 zip_core_num, zip_comp_core_num; struct qm_debug *debug = &qm->debug; char buf[HZIP_BUF_SIZE]; void __iomem *base; @@ -874,15 +912,18 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset); if (debug->last_words[i] != val) pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n", - hzip_com_dfx_regs[i].name, debug->last_words[i], val); + hzip_com_dfx_regs[i].name, debug->last_words[i], val); } - for (i = 0; i < HZIP_CORE_NUM; i++) { - if (i < HZIP_COMP_CORE_NUM) + zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); + zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, + qm->cap_ver); + for (i = 0; i < zip_core_num; i++) { + if (i < zip_comp_core_num) scnprintf(buf, sizeof(buf), "Comp_core-%d", i); else scnprintf(buf, sizeof(buf), "Decomp_core-%d", - i - HZIP_COMP_CORE_NUM); + i - zip_comp_core_num); base = qm->io_base + core_offsets[i]; pci_info(qm->pdev, "==>%s:\n", buf); @@ -892,7 +933,8 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset); if (debug->last_words[idx] != val) pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n", - hzip_dump_dfx_regs[j].name, debug->last_words[idx], val); + hzip_dump_dfx_regs[j].name, + debug->last_words[idx], val); } } } -- cgit v1.2.3 From 921715b6b7827157bba6e8153d7a09774b0d034f Mon Sep 17 00:00:00 2001 From: Wenkai Lin Date: Fri, 9 Sep 2022 17:47:03 +0800 Subject: crypto: hisilicon/sec - get algorithm bitmap from registers Add function 'sec_get_alg_bitmap' to get hardware algorithm bitmap before register algorithm to crypto, instead of determining whether to register an algorithm based on hardware platform's version. Signed-off-by: Wenkai Lin Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec.h | 18 ++ drivers/crypto/hisilicon/sec2/sec_crypto.c | 306 ++++++++++++++++++----------- drivers/crypto/hisilicon/sec2/sec_main.c | 33 +++- 3 files changed, 236 insertions(+), 121 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 895ba9b47554..3e57fc04b377 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -201,10 +201,28 @@ enum sec_cap_type { SEC_RESET_MASK_CAP, SEC_OOO_SHUTDOWN_MASK_CAP, SEC_CE_MASK_CAP, + SEC_CLUSTER_NUM_CAP, + SEC_CORE_TYPE_NUM_CAP, + SEC_CORE_NUM_CAP, + SEC_CORES_PER_CLUSTER_NUM_CAP, + SEC_CORE_ENABLE_BITMAP, + SEC_DRV_ALG_BITMAP_LOW, + SEC_DRV_ALG_BITMAP_HIGH, + SEC_DEV_ALG_BITMAP_LOW, + SEC_DEV_ALG_BITMAP_HIGH, + SEC_CORE1_ALG_BITMAP_LOW, + SEC_CORE1_ALG_BITMAP_HIGH, + SEC_CORE2_ALG_BITMAP_LOW, + SEC_CORE2_ALG_BITMAP_HIGH, + SEC_CORE3_ALG_BITMAP_LOW, + SEC_CORE3_ALG_BITMAP_HIGH, + SEC_CORE4_ALG_BITMAP_LOW, + SEC_CORE4_ALG_BITMAP_HIGH, }; void sec_destroy_qps(struct hisi_qp **qps, int qp_num); struct hisi_qp **sec_create_qps(void); int sec_register_to_crypto(struct hisi_qm *qm); void sec_unregister_from_crypto(struct hisi_qm *qm); +u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index eb23bffdcac8..84ae8ddd1a13 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -104,6 +104,16 @@ #define IV_CTR_INIT 0x1 #define IV_BYTE_OFFSET 0x8 +struct sec_skcipher { + u64 alg_msk; + struct skcipher_alg alg; +}; + +struct sec_aead { + u64 alg_msk; + struct aead_alg alg; +}; + /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) { @@ -2158,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .min_keysize = sec_min_key_size,\ .max_keysize = sec_max_key_size,\ .ivsize = iv_size,\ -}, +} #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ max_key_size, blk_size, iv_size) \ SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) -static struct skcipher_alg sec_skciphers[] = { - SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, - AES_BLOCK_SIZE, 0) - - SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, - SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, - SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, - DES3_EDE_BLOCK_SIZE, 0) - - SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, - SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, - DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, - SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, - AES_BLOCK_SIZE, AES_BLOCK_SIZE) -}; - -static struct skcipher_alg sec_skciphers_v3[] = { - SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, - AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) - - SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, - AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, - SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) +static struct sec_skcipher sec_skciphers[] = { + { + .alg_msk = BIT(0), + .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0), + }, + { + .alg_msk = BIT(1), + .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(2), + .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(3), + .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE, + SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(4), + .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(5), + .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE, + AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(12), + .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE, + AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(13), + .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE, + AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(14), + .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE, + SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(15), + .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE, + AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(16), + .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE, + AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(23), + .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE, + SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0), + }, + { + .alg_msk = BIT(24), + .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE, + SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, + DES3_EDE_BLOCK_SIZE), + }, }; static int aead_iv_demension_check(struct aead_request *aead_req) @@ -2412,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req) .maxauthsize = max_authsize,\ } -static struct aead_alg sec_aeads[] = { - SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", - sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, - sec_aead_ctx_exit, AES_BLOCK_SIZE, - AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), +static struct sec_aead sec_aeads[] = { + { + .alg_msk = BIT(6), + .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(7), + .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(17), + .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(18), + .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, + AES_BLOCK_SIZE), + }, + { + .alg_msk = BIT(43), + .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1, + sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), + }, + { + .alg_msk = BIT(44), + .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256, + sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), + }, + { + .alg_msk = BIT(45), + .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512, + sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), + }, +}; + +static void sec_unregister_skcipher(u64 alg_mask, int end) +{ + int i; - SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", - sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, - sec_aead_ctx_exit, AES_BLOCK_SIZE, - AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), + for (i = 0; i < end; i++) + if (sec_skciphers[i].alg_msk & alg_mask) + crypto_unregister_skcipher(&sec_skciphers[i].alg); +} - SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", - sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, - sec_aead_ctx_exit, AES_BLOCK_SIZE, - AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), +static int sec_register_skcipher(u64 alg_mask) +{ + int i, ret, count; - SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, - AES_BLOCK_SIZE, AES_BLOCK_SIZE), + count = ARRAY_SIZE(sec_skciphers); - SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, - SEC_AIV_SIZE, AES_BLOCK_SIZE) -}; + for (i = 0; i < count; i++) { + if (!(sec_skciphers[i].alg_msk & alg_mask)) + continue; -static struct aead_alg sec_aeads_v3[] = { - SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, - AES_BLOCK_SIZE, AES_BLOCK_SIZE), + ret = crypto_register_skcipher(&sec_skciphers[i].alg); + if (ret) + goto err; + } - SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, - sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, - SEC_AIV_SIZE, AES_BLOCK_SIZE) -}; + return 0; + +err: + sec_unregister_skcipher(alg_mask, i); + + return ret; +} + +static void sec_unregister_aead(u64 alg_mask, int end) +{ + int i; + + for (i = 0; i < end; i++) + if (sec_aeads[i].alg_msk & alg_mask) + crypto_unregister_aead(&sec_aeads[i].alg); +} + +static int sec_register_aead(u64 alg_mask) +{ + int i, ret, count; + + count = ARRAY_SIZE(sec_aeads); + + for (i = 0; i < count; i++) { + if (!(sec_aeads[i].alg_msk & alg_mask)) + continue; + + ret = crypto_register_aead(&sec_aeads[i].alg); + if (ret) + goto err; + } + + return 0; + +err: + sec_unregister_aead(alg_mask, i); + + return ret; +} int sec_register_to_crypto(struct hisi_qm *qm) { + u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); int ret; - /* To avoid repeat register */ - ret = crypto_register_skciphers(sec_skciphers, - ARRAY_SIZE(sec_skciphers)); + ret = sec_register_skcipher(alg_mask); if (ret) return ret; - if (qm->ver > QM_HW_V2) { - ret = crypto_register_skciphers(sec_skciphers_v3, - ARRAY_SIZE(sec_skciphers_v3)); - if (ret) - goto reg_skcipher_fail; - } - - ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); + ret = sec_register_aead(alg_mask); if (ret) - goto reg_aead_fail; - if (qm->ver > QM_HW_V2) { - ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3)); - if (ret) - goto reg_aead_v3_fail; - } - return ret; + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); -reg_aead_v3_fail: - crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); -reg_aead_fail: - if (qm->ver > QM_HW_V2) - crypto_unregister_skciphers(sec_skciphers_v3, - ARRAY_SIZE(sec_skciphers_v3)); -reg_skcipher_fail: - crypto_unregister_skciphers(sec_skciphers, - ARRAY_SIZE(sec_skciphers)); return ret; } void sec_unregister_from_crypto(struct hisi_qm *qm) { - if (qm->ver > QM_HW_V2) - crypto_unregister_aeads(sec_aeads_v3, - ARRAY_SIZE(sec_aeads_v3)); - crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); + u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); - if (qm->ver > QM_HW_V2) - crypto_unregister_skciphers(sec_skciphers_v3, - ARRAY_SIZE(sec_skciphers_v3)); - crypto_unregister_skciphers(sec_skciphers, - ARRAY_SIZE(sec_skciphers)); + sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); } diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index e814f94dd0d4..84233a489c74 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -41,7 +41,6 @@ #define SEC_ECC_NUM 16 #define SEC_ECC_MASH 0xFF #define SEC_CORE_INT_DISABLE 0x0 -#define SEC_SAA_ENABLE 0x17f #define SEC_RAS_CE_REG 0x301050 #define SEC_RAS_FE_REG 0x301054 @@ -114,6 +113,8 @@ #define SEC_DFX_COMMON1_LEN 0x45 #define SEC_DFX_COMMON2_LEN 0xBA +#define SEC_ALG_BITMAP_SHIFT 32 + struct sec_hw_error { u32 int_msk; const char *msg; @@ -141,6 +142,23 @@ static const struct hisi_qm_cap_info sec_basic_info[] = { {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177}, {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177}, {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088}, + {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1}, + {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1}, + {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4}, + {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4}, + {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF}, + {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF}, + {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C}, + {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, + {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, + {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, + {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, + {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, }; static const struct sec_hw_error sec_hw_errors[] = { @@ -345,6 +363,16 @@ struct hisi_qp **sec_create_qps(void) return NULL; } +u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low) +{ + u32 cap_val_h, cap_val_l; + + cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver); + cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver); + + return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l; +} + static const struct kernel_param_ops sec_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, @@ -512,7 +540,8 @@ static int sec_engine_init(struct hisi_qm *qm) writel(SEC_SINGLE_PORT_MAX_TRANS, qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); - writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); + reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver); + writel(reg, qm->io_base + SEC_SAA_EN_REG); if (qm->ver < QM_HW_V3) { /* HW V2 enable sm4 extra mode, as ctr/ecb */ -- cgit v1.2.3 From d310dc2554a5296a338f974d2b4e4f9af2687558 Mon Sep 17 00:00:00 2001 From: Zhiqi Song Date: Fri, 9 Sep 2022 17:47:04 +0800 Subject: crypto: hisilicon - support get algs by the capability register The value of qm algorithm can change dynamically according to the value of the capability register. Add xxx_set_qm_algs() function to obtain the algs that the hardware device supported from the capability register and set them into usr mode attribute files. Signed-off-by: Zhiqi Song Signed-off-by: Wenkai Lin Signed-off-by: Weili Qian Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 10 ++-- drivers/crypto/hisilicon/hpre/hpre_main.c | 83 +++++++++++++++++++++++++++-- drivers/crypto/hisilicon/qm.c | 1 - drivers/crypto/hisilicon/sec2/sec_main.c | 71 +++++++++++++++++++++++- drivers/crypto/hisilicon/zip/zip_main.c | 75 ++++++++++++++++++++++++-- 5 files changed, 222 insertions(+), 18 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index ac7fabf65865..ef02dadd6217 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -51,11 +51,11 @@ struct hpre_ctx; #define HPRE_ECC_HW256_KSZ_B 32 #define HPRE_ECC_HW384_KSZ_B 48 -/* capability register mask */ -#define HPRE_DRV_RSA_MASK_CAP BIT(0) -#define HPRE_DRV_DH_MASK_CAP BIT(1) -#define HPRE_DRV_ECDH_MASK_CAP BIT(2) -#define HPRE_DRV_X25519_MASK_CAP BIT(5) +/* capability register mask of driver */ +#define HPRE_DRV_RSA_MASK_CAP BIT(0) +#define HPRE_DRV_DH_MASK_CAP BIT(1) +#define HPRE_DRV_ECDH_MASK_CAP BIT(2) +#define HPRE_DRV_X25519_MASK_CAP BIT(5) typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 407cdd9d8413..471e5ca720f5 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -118,6 +118,8 @@ #define HPRE_DFX_COMMON2_LEN 0xE #define HPRE_DFX_CORE_LEN 0x43 +#define HPRE_DEV_ALG_MAX_LEN 256 + static const char hpre_name[] = "hisi_hpre"; static struct dentry *hpre_debugfs_root; static const struct pci_device_id hpre_dev_ids[] = { @@ -133,6 +135,38 @@ struct hpre_hw_error { const char *msg; }; +struct hpre_dev_alg { + u32 alg_msk; + const char *alg; +}; + +static const struct hpre_dev_alg hpre_dev_algs[] = { + { + .alg_msk = BIT(0), + .alg = "rsa\n" + }, { + .alg_msk = BIT(1), + .alg = "dh\n" + }, { + .alg_msk = BIT(2), + .alg = "ecdh\n" + }, { + .alg_msk = BIT(3), + .alg = "ecdsa\n" + }, { + .alg_msk = BIT(4), + .alg = "sm2\n" + }, { + .alg_msk = BIT(5), + .alg = "x25519\n" + }, { + .alg_msk = BIT(6), + .alg = "x448\n" + }, { + /* sentinel */ + } +}; + static struct hisi_qm_list hpre_devices = { .register_to_crypto = hpre_algs_register, .unregister_from_crypto = hpre_algs_unregister, @@ -325,6 +359,35 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) return false; } +static int hpre_set_qm_algs(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + char *algs, *ptr; + u32 alg_msk; + int i; + + if (!qm->use_sva) + return 0; + + algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); + if (!algs) + return -ENOMEM; + + alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver); + + for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++) + if (alg_msk & hpre_dev_algs[i].alg_msk) + strcat(algs, hpre_dev_algs[i].alg); + + ptr = strrchr(algs, '\n'); + if (ptr) + *ptr = '\0'; + + qm->uacce->algs = algs; + + return 0; +} + static int hpre_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; @@ -1073,15 +1136,13 @@ static void hpre_debugfs_exit(struct hisi_qm *qm) static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { + int ret; + if (pdev->revision == QM_HW_V1) { pci_warn(pdev, "HPRE version 1 is not supported!\n"); return -EINVAL; } - if (pdev->revision >= QM_HW_V3) - qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2"; - else - qm->algs = "rsa\ndh"; qm->mode = uacce_mode; qm->pdev = pdev; qm->ver = pdev->revision; @@ -1097,7 +1158,19 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qm_list = &hpre_devices; } - return hisi_qm_init(qm); + ret = hisi_qm_init(qm); + if (ret) { + pci_err(pdev, "Failed to init hpre qm configures!\n"); + return ret; + } + + ret = hpre_set_qm_algs(qm); + if (ret) { + pci_err(pdev, "Failed to set hpre algs!\n"); + hisi_qm_uninit(qm); + } + + return ret; } static int hpre_show_last_regs_init(struct hisi_qm *qm) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e4a506d02d23..30fdf0673f00 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3484,7 +3484,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm) uacce->is_vf = pdev->is_virtfn; uacce->priv = qm; - uacce->algs = qm->algs; if (qm->ver == QM_HW_V1) uacce->api_ver = HISI_QM_API_VER_BASE; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 84233a489c74..3705412bac5f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -115,6 +115,14 @@ #define SEC_ALG_BITMAP_SHIFT 32 +#define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \ + GENMASK(24, 21)) +#define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \ + GENMASK_ULL(42, 25)) +#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \ + GENMASK_ULL(45, 43)) +#define SEC_DEV_ALG_MAX_LEN 256 + struct sec_hw_error { u32 int_msk; const char *msg; @@ -125,6 +133,11 @@ struct sec_dfx_item { u32 offset; }; +struct sec_dev_alg { + u64 alg_msk; + const char *algs; +}; + static const char sec_name[] = "hisi_sec2"; static struct dentry *sec_debugfs_root; @@ -161,6 +174,18 @@ static const struct hisi_qm_cap_info sec_basic_info[] = { {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, }; +static const struct sec_dev_alg sec_dev_algs[] = { { + .alg_msk = SEC_CIPHER_BITMAP, + .algs = "cipher\n", + }, { + .alg_msk = SEC_DIGEST_BITMAP, + .algs = "digest\n", + }, { + .alg_msk = SEC_AEAD_BITMAP, + .algs = "aead\n", + }, +}; + static const struct sec_hw_error sec_hw_errors[] = { { .int_msk = BIT(0), @@ -1052,11 +1077,41 @@ static int sec_pf_probe_init(struct sec_dev *sec) return ret; } +static int sec_set_qm_algs(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + char *algs, *ptr; + u64 alg_mask; + int i; + + if (!qm->use_sva) + return 0; + + algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); + if (!algs) + return -ENOMEM; + + alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW); + + for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++) + if (alg_mask & sec_dev_algs[i].alg_msk) + strcat(algs, sec_dev_algs[i].algs); + + ptr = strrchr(algs, '\n'); + if (ptr) + *ptr = '\0'; + + qm->uacce->algs = algs; + + return 0; +} + static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { + int ret; + qm->pdev = pdev; qm->ver = pdev->revision; - qm->algs = "cipher\ndigest\naead"; qm->mode = uacce_mode; qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; @@ -1079,7 +1134,19 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } - return hisi_qm_init(qm); + ret = hisi_qm_init(qm); + if (ret) { + pci_err(qm->pdev, "Failed to init sec qm configures!\n"); + return ret; + } + + ret = sec_set_qm_algs(qm); + if (ret) { + pci_err(qm->pdev, "Failed to set sec algs!\n"); + hisi_qm_uninit(qm); + } + + return ret; } static void sec_qm_uninit(struct hisi_qm *qm) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index a8aedddac67a..c863435e8c75 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -74,6 +74,12 @@ #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) #define HZIP_WR_PORT BIT(11) +#define HZIP_DEV_ALG_MAX_LEN 256 +#define HZIP_ALG_ZLIB_BIT GENMASK(1, 0) +#define HZIP_ALG_GZIP_BIT GENMASK(3, 2) +#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4) +#define HZIP_ALG_LZ77_BIT GENMASK(7, 6) + #define HZIP_BUF_SIZE 22 #define HZIP_SQE_MASK_OFFSET 64 #define HZIP_SQE_MASK_LEN 48 @@ -114,6 +120,26 @@ struct zip_dfx_item { u32 offset; }; +struct zip_dev_alg { + u32 alg_msk; + const char *algs; +}; + +static const struct zip_dev_alg zip_dev_algs[] = { { + .alg_msk = HZIP_ALG_ZLIB_BIT, + .algs = "zlib\n", + }, { + .alg_msk = HZIP_ALG_GZIP_BIT, + .algs = "gzip\n", + }, { + .alg_msk = HZIP_ALG_DEFLATE_BIT, + .algs = "deflate\n", + }, { + .alg_msk = HZIP_ALG_LZ77_BIT, + .algs = "lz77_zstd\n", + }, +}; + static struct hisi_qm_list zip_devices = { .register_to_crypto = hisi_zip_register_to_crypto, .unregister_from_crypto = hisi_zip_unregister_from_crypto, @@ -388,6 +414,35 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) return false; } +static int hisi_zip_set_qm_algs(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + char *algs, *ptr; + u32 alg_mask; + int i; + + if (!qm->use_sva) + return 0; + + algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); + if (!algs) + return -ENOMEM; + + alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver); + + for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++) + if (alg_mask & zip_dev_algs[i].alg_msk) + strcat(algs, zip_dev_algs[i].algs); + + ptr = strrchr(algs, '\n'); + if (ptr) + *ptr = '\0'; + + qm->uacce->algs = algs; + + return 0; +} + static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) { u32 val; @@ -1071,12 +1126,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { + int ret; + qm->pdev = pdev; qm->ver = pdev->revision; - if (pdev->revision >= QM_HW_V3) - qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd"; - else - qm->algs = "zlib\ngzip"; qm->mode = uacce_mode; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; @@ -1100,7 +1153,19 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; } - return hisi_qm_init(qm); + ret = hisi_qm_init(qm); + if (ret) { + pci_err(qm->pdev, "Failed to init zip qm configures!\n"); + return ret; + } + + ret = hisi_zip_set_qm_algs(qm); + if (ret) { + pci_err(qm->pdev, "Failed to set zip algs!\n"); + hisi_qm_uninit(qm); + } + + return ret; } static void hisi_zip_qm_uninit(struct hisi_qm *qm) -- cgit v1.2.3 From cf5bb835b7c8a5fee7f26455099cca7feb57f5e9 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 9 Sep 2022 11:49:12 +0100 Subject: crypto: qat - fix DMA transfer direction When CONFIG_DMA_API_DEBUG is selected, while running the crypto self test on the QAT crypto algorithms, the function add_dma_entry() reports a warning similar to the one below, saying that overlapping mappings are not supported. This occurs in tests where the input and the output scatter list point to the same buffers (i.e. two different scatter lists which point to the same chunks of memory). The logic that implements the mapping uses the flag DMA_BIDIRECTIONAL for both the input and the output scatter lists which leads to overlapped write mappings. These are not supported by the DMA layer. Fix by specifying the correct DMA transfer directions when mapping buffers. For in-place operations where the input scatter list matches the output scatter list, buffers are mapped once with DMA_BIDIRECTIONAL, otherwise input buffers are mapped using the flag DMA_TO_DEVICE and output buffers are mapped with DMA_FROM_DEVICE. Overlapping a read mapping with a write mapping is a valid case in dma-coherent devices like QAT. The function that frees and unmaps the buffers, qat_alg_free_bufl() has been changed accordingly to the changes to the mapping function. DMA-API: 4xxx 0000:06:00.0: cacheline tracking EEXIST, overlapping mappings aren't supported WARNING: CPU: 53 PID: 4362 at kernel/dma/debug.c:570 add_dma_entry+0x1e9/0x270 ... Call Trace: dma_map_page_attrs+0x82/0x2d0 ? preempt_count_add+0x6a/0xa0 qat_alg_sgl_to_bufl+0x45b/0x990 [intel_qat] qat_alg_aead_dec+0x71/0x250 [intel_qat] crypto_aead_decrypt+0x3d/0x70 test_aead_vec_cfg+0x649/0x810 ? number+0x310/0x3a0 ? vsnprintf+0x2a3/0x550 ? scnprintf+0x42/0x70 ? valid_sg_divisions.constprop.0+0x86/0xa0 ? test_aead_vec+0xdf/0x120 test_aead_vec+0xdf/0x120 alg_test_aead+0x185/0x400 alg_test+0x3d8/0x500 ? crypto_acomp_scomp_free_ctx+0x30/0x30 ? __schedule+0x32a/0x12a0 ? ttwu_queue_wakelist+0xbf/0x110 ? _raw_spin_unlock_irqrestore+0x23/0x40 ? try_to_wake_up+0x83/0x570 ? _raw_spin_unlock_irqrestore+0x23/0x40 ? __set_cpus_allowed_ptr_locked+0xea/0x1b0 ? crypto_acomp_scomp_free_ctx+0x30/0x30 cryptomgr_test+0x27/0x50 kthread+0xe6/0x110 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x1f/0x30 Fixes: d370cec ("crypto: qat - Intel(R) QAT crypto interface") Link: https://lore.kernel.org/linux-crypto/20220223080400.139367-1-gilad@benyossef.com/ Signed-off-by: Damian Muszynski Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_algs.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index fb45fa83841c..cad9c58caab1 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -673,11 +673,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, dma_addr_t blpout = qat_req->buf.bloutp; size_t sz = qat_req->buf.sz; size_t sz_out = qat_req->buf.sz_out; + int bl_dma_dir; int i; + bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + for (i = 0; i < bl->num_bufs; i++) dma_unmap_single(dev, bl->bufers[i].addr, - bl->bufers[i].len, DMA_BIDIRECTIONAL); + bl->bufers[i].len, bl_dma_dir); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); @@ -691,7 +694,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, for (i = bufless; i < blout->num_bufs; i++) { dma_unmap_single(dev, blout->bufers[i].addr, blout->bufers[i].len, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); @@ -716,6 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct scatterlist *sg; size_t sz_out, sz = struct_size(bufl, bufers, n); int node = dev_to_node(&GET_DEV(inst->accel_dev)); + int bufl_dma_dir; if (unlikely(!n)) return -EINVAL; @@ -733,6 +737,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, qat_req->buf.sgl_src_valid = true; } + bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; + for_each_sg(sgl, sg, n, i) bufl->bufers[i].addr = DMA_MAPPING_ERROR; @@ -744,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, - DMA_BIDIRECTIONAL); + bufl_dma_dir); bufl->bufers[y].len = sg->length; if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) goto err_in; @@ -787,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, bufers[y].addr))) goto err_out; bufers[y].len = sg->length; @@ -817,7 +823,7 @@ err_out: if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); if (!qat_req->buf.sgl_dst_valid) kfree(buflout); @@ -831,7 +837,7 @@ err_in: if (!dma_mapping_error(dev, bufl->bufers[i].addr)) dma_unmap_single(dev, bufl->bufers[i].addr, bufl->bufers[i].len, - DMA_BIDIRECTIONAL); + bufl_dma_dir); if (!qat_req->buf.sgl_src_valid) kfree(bufl); -- cgit v1.2.3 From 9c5f21b198d259bfe1191b1fedf08e2eab15b33b Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Fri, 9 Sep 2022 11:49:13 +0100 Subject: Revert "crypto: qat - reduce size of mapped region" This reverts commit e48767c17718067ba21fb2ef461779ec2506f845. In an attempt to resolve a set of warnings reported by the static analyzer Smatch, the reverted commit improperly reduced the sizes of the DMA mappings used for the input and output parameters for both RSA and DH creating a mismatch (map size=8 bytes, unmap size=64 bytes). This issue is reported when CONFIG_DMA_API_DEBUG is selected, when the crypto self test is run. The function dma_unmap_single() reports a warning similar to the one below, saying that the `device driver frees DMA memory with different size`. DMA-API: 4xxx 0000:06:00.0: device driver frees DMA memory with different size [device address=0x0000000123206c80] [map size=8 bytes] [unmap size=64 bytes] WARNING: CPU: 0 PID: 0 at kernel/dma/debug.c:973 check_unmap+0x3d0/0x8c0\ ... Call Trace: debug_dma_unmap_page+0x5c/0x60 qat_dh_cb+0xd7/0x110 [intel_qat] qat_alg_asym_callback+0x1a/0x30 [intel_qat] adf_response_handler+0xbd/0x1a0 [intel_qat] tasklet_action_common.constprop.0+0xcd/0xe0 __do_softirq+0xf8/0x30c __irq_exit_rcu+0xbf/0x140 common_interrupt+0xb9/0xd0 The original commit was correct. Cc: Reported-by: Herbert Xu Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_asym_algs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 095ed2a404d2..85b0f30712e1 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -333,13 +333,13 @@ static int qat_dh_compute_value(struct kpp_request *req) qat_req->out.dh.out_tab[1] = 0; /* Mapping in.in.b or in.in_g2.xa is the same */ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b, - sizeof(qat_req->in.dh.in.b), + sizeof(struct qat_dh_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r, - sizeof(qat_req->out.dh.r), + sizeof(struct qat_dh_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) goto unmap_in_params; @@ -730,13 +730,13 @@ static int qat_rsa_enc(struct akcipher_request *req) qat_req->in.rsa.in_tab[3] = 0; qat_req->out.rsa.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, - sizeof(qat_req->in.rsa.enc.m), + sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c, - sizeof(qat_req->out.rsa.enc.c), + sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) goto unmap_in_params; @@ -876,13 +876,13 @@ static int qat_rsa_dec(struct akcipher_request *req) qat_req->in.rsa.in_tab[3] = 0; qat_req->out.rsa.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c, - sizeof(qat_req->in.rsa.dec.c), + sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m, - sizeof(qat_req->out.rsa.dec.m), + sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) goto unmap_in_params; -- cgit v1.2.3 From 072d36eefd6fde17928d214df64fdac32f60b4f4 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 9 Sep 2022 11:49:14 +0100 Subject: crypto: qat - use reference to structure in dma_map_single() When mapping the input and output parameters, the implementations of RSA and DH pass to the function dma_map_single() a pointer to the first member of the structure they want to map instead of a pointer to the actual structure. This results in set of warnings reported by the static analyser Smatch: drivers/crypto/qat/qat_common/qat_asym_algs.c:335 qat_dh_compute_value() error: dma_map_single_attrs() '&qat_req->in.dh.in.b' too small (8 vs 64) drivers/crypto/qat/qat_common/qat_asym_algs.c:341 qat_dh_compute_value() error: dma_map_single_attrs() '&qat_req->out.dh.r' too small (8 vs 64) drivers/crypto/qat/qat_common/qat_asym_algs.c:732 qat_rsa_enc() error: dma_map_single_attrs() '&qat_req->in.rsa.enc.m' too small (8 vs 64) drivers/crypto/qat/qat_common/qat_asym_algs.c:738 qat_rsa_enc() error: dma_map_single_attrs() '&qat_req->out.rsa.enc.c' too small (8 vs 64) drivers/crypto/qat/qat_common/qat_asym_algs.c:878 qat_rsa_dec() error: dma_map_single_attrs() '&qat_req->in.rsa.dec.c' too small (8 vs 64) drivers/crypto/qat/qat_common/qat_asym_algs.c:884 qat_rsa_dec() error: dma_map_single_attrs() '&qat_req->out.rsa.dec.m' too small (8 vs 64) Where the address of the first element of a structure is used as an input for the function dma_map_single(), replace it with the address of the structure. This fix does not introduce any functional change as the addresses are the same. Signed-off-by: Damian Muszynski Signed-off-by: Giovanni Cabiddu Reviewed-by: Adam Guerin Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/qat_asym_algs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 85b0f30712e1..94a26702aeae 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -332,13 +332,13 @@ static int qat_dh_compute_value(struct kpp_request *req) qat_req->in.dh.in_tab[n_input_params] = 0; qat_req->out.dh.out_tab[1] = 0; /* Mapping in.in.b or in.in_g2.xa is the same */ - qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b, + qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh, sizeof(struct qat_dh_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; - qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r, + qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh, sizeof(struct qat_dh_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) @@ -729,13 +729,13 @@ static int qat_rsa_enc(struct akcipher_request *req) qat_req->in.rsa.in_tab[3] = 0; qat_req->out.rsa.out_tab[1] = 0; - qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, + qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; - qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c, + qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) @@ -875,13 +875,13 @@ static int qat_rsa_dec(struct akcipher_request *req) else qat_req->in.rsa.in_tab[3] = 0; qat_req->out.rsa.out_tab[1] = 0; - qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c, + qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) goto unmap_dst; - qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m, + qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa, sizeof(struct qat_rsa_output_params), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) -- cgit v1.2.3 From 49186a7d9e46ff132a0ed9b721ad6b6a58dba6c1 Mon Sep 17 00:00:00 2001 From: Peter Harliman Liem Date: Tue, 13 Sep 2022 16:03:47 +0800 Subject: crypto: inside_secure - Avoid dma map if size is zero From commit d03c54419274 ("dma-mapping: disallow .map_sg operations from returning zero on error"), dma_map_sg() produces warning if size is 0. This results in visible warnings if crypto length is zero. To avoid that, we avoid calling dma_map_sg if size is zero. Signed-off-by: Peter Harliman Liem Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 44 ++++++++++++++++++-------- 1 file changed, 31 insertions(+), 13 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index d68ef16650d4..5a222c228c3b 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -642,10 +642,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin safexcel_complete(priv, ring); if (src == dst) { - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_TO_DEVICE); + if (sreq->nr_dst > 0) + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, + DMA_FROM_DEVICE); } /* @@ -737,23 +743,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, max(totlen_src, totlen_dst)); return -EINVAL; } - dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); + if (sreq->nr_src > 0) + dma_map_sg(priv->dev, src, sreq->nr_src, + DMA_BIDIRECTIONAL); } else { if (unlikely(totlen_src && (sreq->nr_src <= 0))) { dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", totlen_src); return -EINVAL; } - dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); + + if (sreq->nr_src > 0) + dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", totlen_dst); - dma_unmap_sg(priv->dev, src, sreq->nr_src, - DMA_TO_DEVICE); - return -EINVAL; + ret = -EINVAL; + goto unmap; } - dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); + + if (sreq->nr_dst > 0) + dma_map_sg(priv->dev, dst, sreq->nr_dst, + DMA_FROM_DEVICE); } memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); @@ -883,12 +895,18 @@ rdesc_rollback: cdesc_rollback: for (i = 0; i < n_cdesc; i++) safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); - +unmap: if (src == dst) { - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); - dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE); + if (sreq->nr_src > 0) + dma_unmap_sg(priv->dev, src, sreq->nr_src, + DMA_TO_DEVICE); + if (sreq->nr_dst > 0) + dma_unmap_sg(priv->dev, dst, sreq->nr_dst, + DMA_FROM_DEVICE); } return ret; -- cgit v1.2.3 From 320406cb60b6408d87f6a5bc729285fc44107352 Mon Sep 17 00:00:00 2001 From: Peter Harliman Liem Date: Tue, 13 Sep 2022 16:03:48 +0800 Subject: crypto: inside-secure - Replace generic aes with libaes Commit 363a90c2d517 ("crypto: safexcel/aes - switch to library version of key expansion routine") removed CRYPTO_AES in the config. However, some portions of codes still rely on generic AES cipher (e.g. refer to safexcel_aead_gcm_cra_init(), safexcel_xcbcmac_cra_init()). This causes transform allocation failure for those algos, if CRYPTO_AES is not manually enabled. To resolve that, we replace all existing AES cipher dependent codes with their AES library counterpart. Fixes: 363a90c2d517 ("crypto: safexcel/aes - switch to library version of key expansion routine") Signed-off-by: Peter Harliman Liem Signed-off-by: Herbert Xu --- drivers/crypto/inside-secure/safexcel_cipher.c | 16 +------ drivers/crypto/inside-secure/safexcel_hash.c | 59 +++++++++----------------- 2 files changed, 21 insertions(+), 54 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 5a222c228c3b..32a37e3850c5 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -63,7 +63,6 @@ struct safexcel_cipher_ctx { u32 hash_alg; u32 state_sz; - struct crypto_cipher *hkaes; struct crypto_aead *fback; }; @@ -2607,15 +2606,8 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key, ctx->key_len = len; /* Compute hash key by encrypting zeroes with cipher key */ - crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) & - CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(ctx->hkaes, key, len); - if (ret) - return ret; - memset(hashkey, 0, AES_BLOCK_SIZE); - crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey); + aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey); if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) { for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) { @@ -2644,15 +2636,11 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm) ctx->xcm = EIP197_XCM_MODE_GCM; ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */ - ctx->hkaes = crypto_alloc_cipher("aes", 0, 0); - return PTR_ERR_OR_ZERO(ctx->hkaes); + return 0; } static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm) { - struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(ctx->hkaes); safexcel_aead_cra_exit(tfm); } diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 2124416742f8..103fc551d2af 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -30,7 +30,7 @@ struct safexcel_ahash_ctx { bool fb_init_done; bool fb_do_setkey; - struct crypto_cipher *kaes; + struct crypto_aes_ctx *aes; struct crypto_ahash *fback; struct crypto_shash *shpre; struct shash_desc *shdesc; @@ -824,7 +824,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) result[i] = swab32(ctx->base.ipad.word[i + 4]); } areq->result[0] ^= 0x80; // 10- padding - crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result); + aes_encrypt(ctx->aes, areq->result, areq->result); return 0; } else if (unlikely(req->hmac && (req->len == req->block_sz) && @@ -2083,37 +2083,26 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - struct crypto_aes_ctx aes; u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)]; int ret, i; - ret = aes_expandkey(&aes, key, len); + ret = aes_expandkey(ctx->aes, key, len); if (ret) return ret; /* precompute the XCBC key material */ - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & - CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(ctx->kaes, key, len); - if (ret) - return ret; - - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, - "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1"); - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp, - "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2"); - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE, - "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); + aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, + "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1"); + aes_encrypt(ctx->aes, (u8 *)key_tmp, + "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2"); + aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE, + "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3"); for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++) ctx->base.ipad.word[i] = swab32(key_tmp[i]); - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & - CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(ctx->kaes, - (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, - AES_MIN_KEY_SIZE); + ret = aes_expandkey(ctx->aes, + (u8 *)key_tmp + 2 * AES_BLOCK_SIZE, + AES_MIN_KEY_SIZE); if (ret) return ret; @@ -2121,7 +2110,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key, ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE; ctx->cbcmac = false; - memzero_explicit(&aes, sizeof(aes)); return 0; } @@ -2130,15 +2118,15 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm) struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); safexcel_ahash_cra_init(tfm); - ctx->kaes = crypto_alloc_cipher("aes", 0, 0); - return PTR_ERR_OR_ZERO(ctx->kaes); + ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL); + return PTR_ERR_OR_ZERO(ctx->aes); } static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(ctx->kaes); + kfree(ctx->aes); safexcel_ahash_cra_exit(tfm); } @@ -2178,31 +2166,23 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); - struct crypto_aes_ctx aes; __be64 consts[4]; u64 _const[2]; u8 msb_mask, gfmask; int ret, i; - ret = aes_expandkey(&aes, key, len); + /* precompute the CMAC key material */ + ret = aes_expandkey(ctx->aes, key, len); if (ret) return ret; for (i = 0; i < len / sizeof(u32); i++) - ctx->base.ipad.word[i + 8] = swab32(aes.key_enc[i]); - - /* precompute the CMAC key material */ - crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) & - CRYPTO_TFM_REQ_MASK); - ret = crypto_cipher_setkey(ctx->kaes, key, len); - if (ret) - return ret; + ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]); /* code below borrowed from crypto/cmac.c */ /* encrypt the zero block */ memset(consts, 0, AES_BLOCK_SIZE); - crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts); + aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts); gfmask = 0x87; _const[0] = be64_to_cpu(consts[1]); @@ -2234,7 +2214,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, } ctx->cbcmac = false; - memzero_explicit(&aes, sizeof(aes)); return 0; } -- cgit v1.2.3 From 4532f1cf9caaf2588d48d1de7770a6d5d1f95e89 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 16 Sep 2022 18:35:50 +0800 Subject: crypto: artpec6 - Fix printk warning on size_t/%d Switch to %zu instead of %d for printing size_t. Signed-off-by: Herbert Xu Acked-by: Jesper Nilsson Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index b4820594ab80..51c66afbe677 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -1712,7 +1712,7 @@ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq) cipher_len = regk_crypto_key_256; break; default: - pr_err("%s: Invalid key length %d!\n", + pr_err("%s: Invalid key length %zu!\n", MODULE_NAME, ctx->key_length); return -EINVAL; } -- cgit v1.2.3 From b21dc631222bbe61c372dfb19373fb9d83718314 Mon Sep 17 00:00:00 2001 From: Liu Shixin Date: Fri, 16 Sep 2022 22:13:40 +0800 Subject: crypto: sun4i-ss - use DEFINE_SHOW_ATTRIBUTE to simplify sun4i_ss_debugfs Use DEFINE_SHOW_ATTRIBUTE helper macro to simplify the code. Signed-off-by: Liu Shixin Acked-by: Corentin Labbe Tested-by: Corentin Labbe Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c index 44b8fc4b786d..006e40133c28 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = { #endif }; -static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v) +static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v) { unsigned int i; @@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v) } return 0; } - -static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, sun4i_ss_dbgfs_read, inode->i_private); -} - -static const struct file_operations sun4i_ss_debugfs_fops = { - .owner = THIS_MODULE, - .open = sun4i_ss_dbgfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs); /* * Power management strategy: The device is suspended unless a TFM exists for -- cgit v1.2.3 From f5b657e5dbf830cfcb19b588b784b8190a5164a0 Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Sat, 17 Sep 2022 10:03:45 +0000 Subject: crypto: hisilicon/qm - fix the qos value initialization The default qos value is not initialized when sriov is repeatedly enabled and disabled. So add the vf qos value initialized in the sriov enable process. Signed-off-by: Kai Ye Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 30fdf0673f00..8b387de69d22 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4801,6 +4801,14 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); +static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) +{ + int i; + + for (i = 1; i <= total_func; i++) + qm->factor[i].func_qos = QM_QOS_MAX_VAL; +} + /** * hisi_qm_sriov_enable() - enable virtual functions * @pdev: the PCIe device @@ -4834,6 +4842,10 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) } num_vfs = max_vfs; + + if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) + hisi_qm_init_vf_qos(qm, num_vfs); + ret = qm_vf_q_assign(qm, num_vfs); if (ret) { pci_err(pdev, "Can't assign queues for VF!\n"); @@ -4869,7 +4881,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) { struct hisi_qm *qm = pci_get_drvdata(pdev); - int total_vfs = pci_sriov_get_totalvfs(qm->pdev); int ret; if (pci_vfs_assigned(pdev)) { @@ -4884,9 +4895,6 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) } pci_disable_sriov(pdev); - /* clear vf function shaper configure array */ - if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) - memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs); ret = qm_clear_vft_config(qm); if (ret) @@ -6297,7 +6305,7 @@ err_init_qp_mem: static int hisi_qm_memory_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - int ret, total_func, i; + int ret, total_func; size_t off = 0; if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { @@ -6306,8 +6314,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) if (!qm->factor) return -ENOMEM; - for (i = 0; i < total_func; i++) - qm->factor[i].func_qos = QM_QOS_MAX_VAL; + /* Only the PF value needs to be initialized */ + qm->factor[0].func_qos = QM_QOS_MAX_VAL; } #define QM_INIT_BUF(qm, type, num) do { \ -- cgit v1.2.3 From f78f6f0bf34fd85c17ebcb31d645536112aa25d3 Mon Sep 17 00:00:00 2001 From: Neal Liu Date: Mon, 19 Sep 2022 14:37:05 +0800 Subject: crypto: aspeed - fix build error when only CRYPTO_DEV_ASPEED is enabled Fix build error within the following configs setting: - CONFIG_CRYPTO_DEV_ASPEED=y - CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH is not set - CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO is not set Error messages: make[4]: *** No rule to make target 'drivers/crypto/aspeed/aspeed_crypto.o' , needed by 'drivers/crypto/aspeed/built-in.a'. make[4]: Target '__build' not remade because of errors. Reported-by: kernel test robot Signed-off-by: Neal Liu Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/Kconfig | 3 +-- drivers/crypto/aspeed/Makefile | 7 ++++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig index ae3eb0eb57f6..ae2710ae8d8f 100644 --- a/drivers/crypto/aspeed/Kconfig +++ b/drivers/crypto/aspeed/Kconfig @@ -1,6 +1,7 @@ config CRYPTO_DEV_ASPEED tristate "Support for Aspeed cryptographic engine driver" depends on ARCH_ASPEED || COMPILE_TEST + select CRYPTO_ENGINE help Hash and Crypto Engine (HACE) is designed to accelerate the throughput of hash data digest, encryption and decryption. @@ -20,7 +21,6 @@ config CRYPTO_DEV_ASPEED_DEBUG config CRYPTO_DEV_ASPEED_HACE_HASH bool "Enable Aspeed Hash & Crypto Engine (HACE) hash" depends on CRYPTO_DEV_ASPEED - select CRYPTO_ENGINE select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 @@ -34,7 +34,6 @@ config CRYPTO_DEV_ASPEED_HACE_HASH config CRYPTO_DEV_ASPEED_HACE_CRYPTO bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto" depends on CRYPTO_DEV_ASPEED - select CRYPTO_ENGINE select CRYPTO_AES select CRYPTO_DES select CRYPTO_ECB diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile index 3be78cec0ecb..a0ed40ddaad1 100644 --- a/drivers/crypto/aspeed/Makefile +++ b/drivers/crypto/aspeed/Makefile @@ -1,6 +1,7 @@ -hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace.o aspeed-hace-hash.o -hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace.o aspeed-hace-crypto.o +hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o +hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o -aspeed_crypto-objs := $(hace-hash-y) \ +aspeed_crypto-objs := aspeed-hace.o \ + $(hace-hash-y) \ $(hace-crypto-y) -- cgit v1.2.3 From caca37cf6c749ff0303f68418cfe7b757a4e0697 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 19 Sep 2022 09:43:19 +0300 Subject: crypto: marvell/octeontx - prevent integer overflows The "code_length" value comes from the firmware file. If your firmware is untrusted realistically there is probably very little you can do to protect yourself. Still we try to limit the damage as much as possible. Also Smatch marks any data read from the filesystem as untrusted and prints warnings if it not capped correctly. The "code_length * 2" can overflow. The round_up(ucode_size, 16) + sizeof() expression can overflow too. Prevent these overflows. Fixes: d9110b0b01ff ("crypto: marvell - add support for OCTEON TX CPT engine") Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index 23c6edc70914..df9c2b8747e6 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev, struct tar_ucode_info_t *tar_info; struct otx_cpt_ucode_hdr *ucode_hdr; int ucode_type, ucode_size; + unsigned int code_length; /* * If size is less than microcode header size then don't report @@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev, if (get_ucode_type(ucode_hdr, &ucode_type)) return 0; - ucode_size = ntohl(ucode_hdr->code_length) * 2; + code_length = ntohl(ucode_hdr->code_length); + if (code_length >= INT_MAX / 2) { + dev_err(dev, "Invalid code_length %u\n", code_length); + return -EINVAL; + } + + ucode_size = code_length * 2; if (!ucode_size || (size < round_up(ucode_size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { dev_err(dev, "Ucode %s invalid size\n", filename); @@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, { struct otx_cpt_ucode_hdr *ucode_hdr; const struct firmware *fw; + unsigned int code_length; int ret; set_ucode_filename(ucode, ucode_filename); @@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data; memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); ucode->ver_num = ucode_hdr->ver_num; - ucode->size = ntohl(ucode_hdr->code_length) * 2; + code_length = ntohl(ucode_hdr->code_length); + if (code_length >= INT_MAX / 2) { + dev_err(dev, "Ucode invalid code_length %u\n", code_length); + ret = -EINVAL; + goto release_fw; + } + ucode->size = code_length * 2; if (!ucode->size || (fw->size < round_up(ucode->size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { dev_err(dev, "Ucode %s invalid size\n", ucode_filename); -- cgit v1.2.3 From 2526d6bf27d15054bb0778b2f7bc6625fd934905 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 19 Sep 2022 09:43:27 +0300 Subject: crypto: cavium - prevent integer overflow loading firmware The "code_length" value comes from the firmware file. If your firmware is untrusted realistically there is probably very little you can do to protect yourself. Still we try to limit the damage as much as possible. Also Smatch marks any data read from the filesystem as untrusted and prints warnings if it not capped correctly. The "ntohl(ucode->code_length) * 2" multiplication can have an integer overflow. Fixes: 9e2c7d99941d ("crypto: cavium - Add Support for Octeon-tx CPT Engine") Signed-off-by: Dan Carpenter Signed-off-by: Herbert Xu --- drivers/crypto/cavium/cpt/cptpf_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 8c32d0eb8fcf..6872ac344001 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) const struct firmware *fw_entry; struct device *dev = &cpt->pdev->dev; struct ucode_header *ucode; + unsigned int code_length; struct microcode *mcode; int j, ret = 0; @@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) ucode = (struct ucode_header *)fw_entry->data; mcode = &cpt->mcode[cpt->next_mc_idx]; memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); - mcode->code_size = ntohl(ucode->code_length) * 2; - if (!mcode->code_size) { + code_length = ntohl(ucode->code_length); + if (code_length == 0 || code_length >= INT_MAX / 2) { ret = -EINVAL; goto fw_release; } + mcode->code_size = code_length * 2; mcode->is_ae = is_ae; mcode->core_mask = 0ULL; -- cgit v1.2.3 From 4a209078656c3ada49c81d69c4b556be2dda1310 Mon Sep 17 00:00:00 2001 From: lei he Date: Mon, 19 Sep 2022 15:51:58 +0800 Subject: crypto: virtio - fix memory-leak Fix memory-leak for virtio-crypto akcipher request, this problem is introduced by 59ca6c93387d3(virtio-crypto: implement RSA algorithm). The leak can be reproduced and tested with the following script inside virtual machine: #!/bin/bash LOOP_TIMES=10000 # required module: pkcs8_key_parser, virtio_crypto modprobe pkcs8_key_parser # if CONFIG_PKCS8_PRIVATE_KEY_PARSER=m modprobe virtio_crypto # if CONFIG_CRYPTO_DEV_VIRTIO=m rm -rf /tmp/data dd if=/dev/random of=/tmp/data count=1 bs=230 # generate private key and self-signed cert openssl req -nodes -x509 -newkey rsa:2048 -keyout key.pem \ -outform der -out cert.der \ -subj "/C=CN/ST=GD/L=SZ/O=vihoo/OU=dev/CN=always.com/emailAddress=yy@always.com" # convert private key from pem to der openssl pkcs8 -in key.pem -topk8 -nocrypt -outform DER -out key.der # add key PRIV_KEY_ID=`cat key.der | keyctl padd asymmetric test_priv_key @s` echo "priv key id = "$PRIV_KEY_ID PUB_KEY_ID=`cat cert.der | keyctl padd asymmetric test_pub_key @s` echo "pub key id = "$PUB_KEY_ID # query key keyctl pkey_query $PRIV_KEY_ID 0 keyctl pkey_query $PUB_KEY_ID 0 # here we only run pkey_encrypt becasuse it is the fastest interface function bench_pub() { keyctl pkey_encrypt $PUB_KEY_ID 0 /tmp/data enc=pkcs1 >/tmp/enc.pub } # do bench_pub in loop to obtain the memory leak for (( i = 0; i < ${LOOP_TIMES}; ++i )); do bench_pub done Signed-off-by: lei he Acked-by: Michael S. Tsirkin Reviewed-by: Gonglei Signed-off-by: Herbert Xu --- drivers/crypto/virtio/virtio_crypto_akcipher_algs.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/crypto') diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 2a60d0525cde..168195672e2e 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req( struct virtio_crypto_akcipher_request *vc_akcipher_req, struct akcipher_request *req, int err) { + kfree(vc_akcipher_req->src_buf); + kfree(vc_akcipher_req->dst_buf); + vc_akcipher_req->src_buf = NULL; + vc_akcipher_req->dst_buf = NULL; virtcrypto_clear_request(&vc_akcipher_req->base); crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err); -- cgit v1.2.3 From 70513e1d65599f39aba4fa6594546f7c81fa59f4 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 20 Sep 2022 11:21:18 +0800 Subject: crypto: aspeed - Fix check for platform_get_irq() errors The platform_get_irq() function returns negative on error and positive non-zero values on success. It never returns zero, but if it did then treat that as a success. Also remove redundant dev_err() print as platform_get_irq() already prints an error. Fixes: 108713a713c7 ("crypto: aspeed - Add HACE hash driver") Signed-off-by: YueHaibing Reviewed-by: Neal Liu Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-hace.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c index 3f880aafb6a2..f7f1d33defb1 100644 --- a/drivers/crypto/aspeed/aspeed-hace.c +++ b/drivers/crypto/aspeed/aspeed-hace.c @@ -130,10 +130,8 @@ static int aspeed_hace_probe(struct platform_device *pdev) /* Get irq number and register it */ hace_dev->irq = platform_get_irq(pdev, 0); - if (!hace_dev->irq) { - dev_err(&pdev->dev, "Failed to get interrupt\n"); + if (hace_dev->irq < 0) return -ENXIO; - } rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0, dev_name(&pdev->dev), hace_dev); -- cgit v1.2.3 From 6a40fb0d9db15f95b9ea884fbaedf8f82c51399f Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Tue, 20 Sep 2022 06:32:52 +0000 Subject: crypto: ccp - Remove the unneeded result variable Return the value ccp_crypto_enqueue_request() directly instead of storing it in another redundant variable. Reported-by: Zeal Robot Signed-off-by: ye xingchen Acked-by: John Allen Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-des3.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index ec97daf0fcb7..278636ed251a 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -64,7 +64,6 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt) struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; - int ret; if (!ctx->u.des3.key_len) return -EINVAL; @@ -100,9 +99,7 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt) rctx->cmd.u.des3.src_len = req->cryptlen; rctx->cmd.u.des3.dst = req->dst; - ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); - - return ret; + return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); } static int ccp_des3_encrypt(struct skcipher_request *req) -- cgit v1.2.3 From 0cb3c9cdf7fcc2ef75a6008223d2e3ee58ea00e1 Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Tue, 20 Sep 2022 06:47:54 +0000 Subject: crypto: octeontx2 - Remove the unneeded result variable Return the value otx2_cpt_send_mbox_msg() directly instead of storing it in another redundant variable. Reported-by: Zeal Robot Signed-off-by: ye xingchen Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c index 02cb9e44afd8..75c403f2b1d9 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c @@ -191,7 +191,6 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf) struct otx2_mbox *mbox = &cptvf->pfvf_mbox; struct pci_dev *pdev = cptvf->pdev; struct mbox_msghdr *req; - int ret; req = (struct mbox_msghdr *) otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), @@ -204,7 +203,5 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf) req->sig = OTX2_MBOX_REQ_SIG; req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0); - ret = otx2_cpt_send_mbox_msg(mbox, pdev); - - return ret; + return otx2_cpt_send_mbox_msg(mbox, pdev); } -- cgit v1.2.3 From 72f6e0ea2b0ecea8585f3cd4298286c85c5121e6 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Wed, 21 Sep 2022 10:38:30 +0100 Subject: crypto: qat - add limit to linked list parsing adf_copy_key_value_data() copies data from userland to kernel, based on a linked link provided by userland. If userland provides a circular list (or just a very long one) then it would drive a long loop where allocation occurs in every loop. This could lead to low memory conditions. Adding a limit to stop endless loop. Signed-off-by: Adam Guerin Co-developed-by: Ciunas Bennett Signed-off-by: Ciunas Bennett Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/qat/qat_common/adf_ctl_drv.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index 508c18edd692..82b69e1f725b 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -16,6 +16,9 @@ #include "adf_cfg_common.h" #include "adf_cfg_user.h" +#define ADF_CFG_MAX_SECTION 512 +#define ADF_CFG_MAX_KEY_VAL 256 + #define DEVICE_NAME "qat_adf_ctl" static DEFINE_MUTEX(adf_ctl_lock); @@ -137,10 +140,11 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, struct adf_user_cfg_key_val key_val; struct adf_user_cfg_key_val *params_head; struct adf_user_cfg_section section, *section_head; + int i, j; section_head = ctl_data->config_section; - while (section_head) { + for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) { if (copy_from_user(§ion, (void __user *)section_head, sizeof(*section_head))) { dev_err(&GET_DEV(accel_dev), @@ -156,7 +160,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, params_head = section.params; - while (params_head) { + for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) { if (copy_from_user(&key_val, (void __user *)params_head, sizeof(key_val))) { dev_err(&GET_DEV(accel_dev), -- cgit v1.2.3 From 4edff849f7a0abca962374512907b3e2151091f4 Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Thu, 22 Sep 2022 11:27:53 +0000 Subject: crypto: zip - remove the unneeded result variable Return the value directly instead of storing it in another redundant variable. Reported-by: Zeal Robot Signed-off-by: ye xingchen Signed-off-by: Herbert Xu --- drivers/crypto/cavium/zip/zip_crypto.c | 30 ++++++------------------------ 1 file changed, 6 insertions(+), 24 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index 7df71fcebe8f..1046a746d36f 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -198,22 +198,16 @@ static int zip_decompress(const u8 *src, unsigned int slen, /* Legacy Compress framework start */ int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm) { - int ret; struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - ret = zip_ctx_init(zip_ctx, 0); - - return ret; + return zip_ctx_init(zip_ctx, 0); } int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm) { - int ret; struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - ret = zip_ctx_init(zip_ctx, 1); - - return ret; + return zip_ctx_init(zip_ctx, 1); } void zip_free_comp_ctx(struct crypto_tfm *tfm) @@ -227,24 +221,18 @@ int zip_comp_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { - int ret; struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - ret = zip_compress(src, slen, dst, dlen, zip_ctx); - - return ret; + return zip_compress(src, slen, dst, dlen, zip_ctx); } int zip_comp_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { - int ret; struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - ret = zip_decompress(src, slen, dst, dlen, zip_ctx); - - return ret; + return zip_decompress(src, slen, dst, dlen, zip_ctx); } /* Legacy compress framework end */ /* SCOMP framework start */ @@ -298,22 +286,16 @@ int zip_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { - int ret; struct zip_kernel_ctx *zip_ctx = ctx; - ret = zip_compress(src, slen, dst, dlen, zip_ctx); - - return ret; + return zip_compress(src, slen, dst, dlen, zip_ctx); } int zip_scomp_decompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { - int ret; struct zip_kernel_ctx *zip_ctx = ctx; - ret = zip_decompress(src, slen, dst, dlen, zip_ctx); - - return ret; + return zip_decompress(src, slen, dst, dlen, zip_ctx); } /* SCOMP framework end */ -- cgit v1.2.3 From edfc7e76d2252eebb98328b23e09336d47810569 Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Fri, 23 Sep 2022 01:29:52 +0000 Subject: crypto: marvell/octeontx - use sysfs_emit() to instead of scnprintf() Replace the open-code with sysfs_emit() to simplify the code. Signed-off-by: ye xingchen Reviewed-by: Kees Cook Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptvf_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c index 36d72e35ebeb..88a41d1ca5f6 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c @@ -661,7 +661,7 @@ static ssize_t vf_type_show(struct device *dev, msg = "Invalid"; } - return scnprintf(buf, PAGE_SIZE, "%s\n", msg); + return sysfs_emit(buf, "%s\n", msg); } static ssize_t vf_engine_group_show(struct device *dev, @@ -670,7 +670,7 @@ static ssize_t vf_engine_group_show(struct device *dev, { struct otx_cptvf *cptvf = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp); + return sysfs_emit(buf, "%d\n", cptvf->vfgrp); } static ssize_t vf_engine_group_store(struct device *dev, @@ -706,7 +706,7 @@ static ssize_t vf_coalesc_time_wait_show(struct device *dev, { struct otx_cptvf *cptvf = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", cptvf_read_vq_done_timewait(cptvf)); } @@ -716,7 +716,7 @@ static ssize_t vf_coalesc_num_wait_show(struct device *dev, { struct otx_cptvf *cptvf = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", cptvf_read_vq_done_numwait(cptvf)); } -- cgit v1.2.3 From 5e9578b29aff681ec30a3866c049305e26629a41 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Fri, 23 Sep 2022 17:08:21 +0800 Subject: crypto: bcm - Simplify obtain the name for cipher The crypto_ahash_alg_name(tfm) can obtain the name for cipher in include/crypto/hash.h, but now the function is not in use, so we use it to simplify the code, and optimize the code structure. Signed-off-by: Gaosheng Cui Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 053315e260c2..c8c799428fe0 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -1928,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req) /* SPU2 hardware does not compute hash of zero length data */ if ((rctx->is_final == 1) && (rctx->total_todo == 0) && (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) { - alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); + alg_name = crypto_ahash_alg_name(tfm); flow_log("Doing %sfinal %s zero-len hash request in software\n", rctx->is_final ? "" : "non-", alg_name); err = do_shash((unsigned char *)alg_name, req->result, @@ -2029,7 +2029,7 @@ static int ahash_init(struct ahash_request *req) * supported by the hardware, we need to handle it in software * by calling synchronous hash functions. */ - alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); + alg_name = crypto_ahash_alg_name(tfm); hash = crypto_alloc_shash(alg_name, 0, 0); if (IS_ERR(hash)) { ret = PTR_ERR(hash); -- cgit v1.2.3 From b411b1a0c8bddd470fc8c3457629ac25a168cba0 Mon Sep 17 00:00:00 2001 From: Shang XiaoJing Date: Fri, 23 Sep 2022 18:01:59 +0800 Subject: crypto: aspeed - Remove redundant dev_err call devm_ioremap_resource() prints error message in itself. Remove the dev_err call to avoid redundant error message. Signed-off-by: Shang XiaoJing Signed-off-by: Herbert Xu --- drivers/crypto/aspeed/aspeed-hace.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c index f7f1d33defb1..656cb92c8bb6 100644 --- a/drivers/crypto/aspeed/aspeed-hace.c +++ b/drivers/crypto/aspeed/aspeed-hace.c @@ -123,10 +123,8 @@ static int aspeed_hace_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hace_dev); hace_dev->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(hace_dev->regs)) { - dev_err(&pdev->dev, "Failed to map resources\n"); + if (IS_ERR(hace_dev->regs)) return PTR_ERR(hace_dev->regs); - } /* Get irq number and register it */ hace_dev->irq = platform_get_irq(pdev, 0); -- cgit v1.2.3