summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/ba431-rng.c16
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c27
-rw-r--r--drivers/char/hw_random/cctrng.c20
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c14
-rw-r--r--drivers/char/hw_random/pic32-rng.c3
-rw-r--r--drivers/char/hw_random/xiphera-trng.c4
-rw-r--r--drivers/char/random.c21
-rw-r--r--drivers/crypto/allwinner/Kconfig14
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c23
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c2
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c9
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c11
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c12
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c12
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c18
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h8
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h18
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c3
-rw-r--r--drivers/crypto/atmel-ecc.c30
-rw-r--r--drivers/crypto/atmel-i2c.c2
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c1
-rw-r--r--drivers/crypto/bcm/cipher.c7
-rw-r--r--drivers/crypto/bcm/spu.c16
-rw-r--r--drivers/crypto/bcm/spu2.c43
-rw-r--r--drivers/crypto/bcm/util.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c3
-rw-r--r--drivers/crypto/caam/caampkc.c3
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c9
-rw-r--r--drivers/crypto/cavium/zip/common.h1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c12
-rw-r--r--drivers/crypto/ccp/ccp-ops.c1
-rw-r--r--drivers/crypto/ccp/sev-dev.c6
-rw-r--r--drivers/crypto/ccp/sp-dev.c12
-rw-r--r--drivers/crypto/ccp/sp-dev.h15
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/crypto/ccp/tee-dev.c57
-rw-r--r--drivers/crypto/ccp/tee-dev.h20
-rw-r--r--drivers/crypto/ccree/cc_driver.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c32
-rw-r--r--drivers/crypto/chelsio/chcr_core.c5
-rw-r--r--drivers/crypto/chelsio/chcr_core.h1
-rw-r--r--drivers/crypto/geode-aes.c4
-rw-r--r--drivers/crypto/hisilicon/Kconfig2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h18
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c921
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c158
-rw-r--r--drivers/crypto/hisilicon/qm.c396
-rw-r--r--drivers/crypto/hisilicon/qm.h29
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c2
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c13
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h10
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c137
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c267
-rw-r--r--drivers/crypto/hisilicon/sgl.c37
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c13
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h50
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c698
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c99
-rw-r--r--drivers/crypto/img-hash.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/keembay/keembay-ocs-aes-core.c8
-rw-r--r--drivers/crypto/keembay/keembay-ocs-hcu-core.c8
-rw-r--r--drivers/crypto/keembay/ocs-hcu.c8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h10
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c8
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c33
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c144
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c4
-rw-r--r--drivers/crypto/nx/nx-sha256.c2
-rw-r--r--drivers/crypto/nx/nx-sha512.c2
-rw-r--r--drivers/crypto/nx/nx.c5
-rw-r--r--drivers/crypto/nx/nx_debugfs.c2
-rw-r--r--drivers/crypto/omap-aes.c7
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c25
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h13
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.c40
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h14
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c17
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c32
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c4
-rw-r--r--drivers/crypto/qce/cipher.h1
-rw-r--r--drivers/crypto/qce/common.c25
-rw-r--r--drivers/crypto/qce/common.h3
-rw-r--r--drivers/crypto/qce/sha.c143
-rw-r--r--drivers/crypto/qce/skcipher.c69
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/s5p-sss.c17
-rw-r--r--drivers/crypto/sa2ul.c143
-rw-r--r--drivers/crypto/sa2ul.h4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c5
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c10
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irq.h4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_irqp.h4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_p.h15
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/vmx/aes.c2
-rw-r--r--drivers/crypto/vmx/aes_cbc.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/aes_xts.c2
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/crypto/vmx/vmx.c2
144 files changed, 2913 insertions, 1482 deletions
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
index 410b50b05e21..5b7ca0416490 100644
--- a/drivers/char/hw_random/ba431-rng.c
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng)
static int ba431_trng_probe(struct platform_device *pdev)
{
struct ba431_trng *ba431;
- struct resource *res;
int ret;
ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
@@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
ba431->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ba431->base = devm_ioremap_resource(&pdev->dev, res);
+ ba431->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ba431->base))
return PTR_ERR(ba431->base);
@@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ba431);
- ret = hwrng_register(&ba431->rng);
+ ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
if (ret) {
dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
return ret;
@@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev)
return 0;
}
-static int ba431_trng_remove(struct platform_device *pdev)
-{
- struct ba431_trng *ba431 = platform_get_drvdata(pdev);
-
- hwrng_unregister(&ba431->rng);
-
- return 0;
-}
-
static const struct of_device_id ba431_trng_dt_ids[] = {
{ .compatible = "silex-insight,ba431-rng", .data = NULL },
{ /* sentinel */ }
@@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = {
.of_match_table = ba431_trng_dt_ids,
},
.probe = ba431_trng_probe,
- .remove = ba431_trng_remove,
};
module_platform_driver(ba431_trng_driver);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 1a7c43b43c6b..e7dd457e9b22 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -32,6 +33,7 @@ struct bcm2835_rng_priv {
void __iomem *base;
bool mask_interrupts;
struct clk *clk;
+ struct reset_control *reset;
};
static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
@@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng)
int ret = 0;
u32 val;
- if (!IS_ERR(priv->clk)) {
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
if (priv->mask_interrupts) {
/* mask the interrupt */
@@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng)
/* disable rng hardware */
rng_writel(priv, 0, RNG_CTRL);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
struct bcm2835_rng_of_data {
@@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
/* Clock is optional on most platforms */
- priv->clk = devm_clk_get(dev, NULL);
- if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
priv->rng.name = pdev->name;
priv->rng.init = bcm2835_rng_init;
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 7a293f2147a0..302ffa354c2f 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
static int cctrng_probe(struct platform_device *pdev)
{
- struct resource *req_mem_cc_regs = NULL;
struct cctrng_drvdata *drvdata;
struct device *dev = &pdev->dev;
int rc = 0;
@@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev)
drvdata->circ.buf = (char *)drvdata->data_buf;
- /* Get device resources */
- /* First CC registers space */
- req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Map registers space */
- drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->cc_base)) {
dev_err(dev, "Failed to ioremap registers");
return PTR_ERR(drvdata->cc_base);
}
- dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
- req_mem_cc_regs);
- dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
- &req_mem_cc_regs->start, drvdata->cc_base);
-
/* Then IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed getting IRQ resource\n");
+ if (irq < 0)
return irq;
- }
/* parse sampling rate from device tree */
rc = cc_trng_parse_sampling_ratio(drvdata);
@@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev)
atomic_set(&drvdata->pending_hw, 1);
/* registration of the hwrng device */
- rc = hwrng_register(&drvdata->rng);
+ rc = devm_hwrng_register(dev, &drvdata->rng);
if (rc) {
dev_err(dev, "Could not register hwrng device.\n");
goto post_pm_err;
@@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev)
dev_dbg(dev, "Releasing cctrng resources...\n");
- hwrng_unregister(&drvdata->rng);
-
cc_trng_pm_fini(drvdata);
cc_trng_clk_fini(drvdata);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 8c1c47dd9f46..adb3c2bd7783 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -396,7 +396,7 @@ static ssize_t hwrng_attr_selected_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+ return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index eb7db27f9f19..d740b8814bf3 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -25,13 +25,13 @@
*/
#include <linux/hw_random.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/stop_machine.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
#define PFX KBUILD_MODNAME ": "
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 5cc5fc504968..cede9f159102 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -30,8 +30,7 @@
#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#define RNG_REG_STATUS_RDY (1 << 0)
@@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match);
static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
int irq, err;
- match = of_match_device(of_match_ptr(omap_rng_of_match), dev);
- if (!match) {
- dev_err(dev, "no compatible OF match\n");
- return -EINVAL;
- }
- priv->pdata = match->data;
+ priv->pdata = of_device_get_match_data(dev);
+ if (!priv->pdata)
+ return -ENODEV;
+
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index e8210c1715cf..99c8bd0859a1 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name;
priv->rng.read = pic32_rng_read;
- ret = hwrng_register(&priv->rng);
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret)
goto err_register;
@@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
{
struct pic32_rng *rng = platform_get_drvdata(pdev);
- hwrng_unregister(&rng->rng);
writel(0, rng->base + RNGCON);
clk_disable_unprepare(rng->clk);
return 0;
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
index 7bdab8c8a6a8..2a9fea72b2e0 100644
--- a/drivers/char/hw_random/xiphera-trng.c
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev)
int ret;
struct xiphera_trng *trng;
struct device *dev = &pdev->dev;
- struct resource *res;
trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trng->mem = devm_ioremap_resource(dev, res);
+ trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0fe9e200e4c8..605969ed0f96 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -500,7 +500,6 @@ struct entropy_store {
unsigned short add_ptr;
unsigned short input_rotate;
int entropy_count;
- unsigned int initialized:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -660,7 +659,7 @@ static void process_random_ready_list(void)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- int entropy_count, orig, has_initialized = 0;
+ int entropy_count, orig;
const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT;
@@ -717,23 +716,14 @@ retry:
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
- if (has_initialized) {
- r->initialized = 1;
- kill_fasync(&fasync, SIGIO, POLL_IN);
- }
-
trace_credit_entropy_bits(r->name, nbits,
entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
- if (crng_init < 2) {
- if (entropy_bits < 128)
- return;
+ if (crng_init < 2 && entropy_bits >= 128)
crng_reseed(&primary_crng, r);
- entropy_bits = ENTROPY_BITS(r);
- }
}
}
@@ -819,7 +809,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
{
- memcpy(&crng->state[0], "expand 32-byte k", 16);
+ chacha_init_consts(crng->state);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
@@ -827,7 +817,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
static void __init crng_initialize_primary(struct crng_state *crng)
{
- memcpy(&crng->state[0], "expand 32-byte k", 16);
+ chacha_init_consts(crng->state);
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
if (crng_init_try_arch_early(crng) && trust_cpu) {
invalidate_batched_entropy();
@@ -1372,8 +1362,7 @@ retry:
}
/*
- * This function does the actual extraction for extract_entropy and
- * extract_entropy_user.
+ * This function does the actual extraction for extract_entropy.
*
* Note: we assume that .poolwords is a multiple of 16 words.
*/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index 856fb2045656..b8e75210a0e3 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
config CRYPTO_DEV_SUN8I_CE_HASH
bool "Enable support for hash on sun8i-ce"
depends on CRYPTO_DEV_SUN8I_CE
- select MD5
- select SHA1
- select SHA256
- select SHA512
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
help
Say y to enable support for hash algorithms.
@@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
config CRYPTO_DEV_SUN8I_SS_HASH
bool "Enable support for hash on sun8i-ss"
depends on CRYPTO_DEV_SUN8I_SS
- select MD5
- select SHA1
- select SHA256
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
help
Say y to enable support for hash algorithms.
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index c2e6f5ed1d79..dec79fa3ebaf 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
sizeof(struct sun4i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm));
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 709905ec4680..44b8fc4b786d 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev)
{
struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
- if (ss->reset)
- reset_control_assert(ss->reset);
+ reset_control_assert(ss->reset);
clk_disable_unprepare(ss->ssclk);
clk_disable_unprepare(ss->busclk);
@@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev)
goto err_enable;
}
- if (ss->reset) {
- err = reset_control_deassert(ss->reset);
- if (err) {
- dev_err(ss->dev, "Cannot deassert reset control\n");
- goto err_enable;
- }
+ err = reset_control_deassert(ss->reset);
+ if (err) {
+ dev_err(ss->dev, "Cannot deassert reset control\n");
+ goto err_enable;
}
return err;
@@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
- if (IS_ERR(ss->reset)) {
- if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
- return PTR_ERR(ss->reset);
+ if (IS_ERR(ss->reset))
+ return PTR_ERR(ss->reset);
+ if (!ss->reset)
dev_info(&pdev->dev, "no reset control found\n");
- ss->reset = NULL;
- }
/*
* Check that clock have the correct rates given in the datasheet
@@ -459,7 +454,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
* this info could be useful
*/
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
index c1b4585e9bbc..d28292762b32 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
@@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
op->ss = algt->ss;
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0)
return err;
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
index 443160a114bb..491fcb7b81b4 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
@@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
return err;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 33707a2e55ff..54ae8d16e493 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -240,11 +240,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
} else {
if (nr_sgs > 0)
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
}
theend_iv:
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 158422ff5695..00194d1d9ae6 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
if (err)
goto error_alg;
- err = pm_runtime_get_sync(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
if (err < 0)
goto error_alg;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 2f09a37306e2..88194718a806 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -405,7 +405,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index cfde9ee4356b..cd1baee424a1 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, dma_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
+ err = -EFAULT;
goto err_iv;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index ed2a69f82e1c..9ef1c85c4aaa 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -232,10 +232,13 @@ sgd_next:
theend_sgs:
if (areq->src == areq->dst) {
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
+ DMA_FROM_DEVICE);
}
theend_iv:
@@ -351,7 +354,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
- err = pm_runtime_get_sync(op->ss->dev);
+ err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) {
dev_err(op->ss->dev, "pm error %d\n", err);
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index e0ddc684798d..80e89066dbd1 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
if (err)
goto error_alg;
- err = pm_runtime_get_sync(ss->dev);
+ err = pm_runtime_resume_and_get(ss->dev);
if (err < 0)
goto error_alg;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 11cbcbc83a7b..3c073eb3db03 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
bf = (__le32 *)pad;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result)
+ if (!result) {
+ kfree(pad);
return -ENOMEM;
+ }
for (i = 0; i < MAX_SG; i++) {
rctx->t_dst[i].addr = 0;
@@ -432,14 +434,14 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
- dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
+ DMA_TO_DEVICE);
dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- kfree(pad);
-
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
- kfree(result);
theend:
+ kfree(pad);
+ kfree(result);
crypto_finalize_hash_request(engine, breq, err);
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index 08a1473b2145..3191527928e4 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ss->dev, dma_iv)) {
dev_err(ss->dev, "Cannot DMA MAP IV\n");
- return -EFAULT;
+ err = -EFAULT;
+ goto err_free;
}
dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
@@ -167,6 +168,7 @@ err_iv:
memcpy(ctx->seed, d + dlen, ctx->slen);
}
memzero_explicit(d, todo);
+err_free:
kfree(d);
return err;
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a3fa849b139a..ded732242732 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -115,7 +115,7 @@ int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}
-/**
+/*
* AES Functions
*/
static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
@@ -374,7 +374,7 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
}
-/**
+/*
* AES-CCM Functions
*/
@@ -489,7 +489,7 @@ int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
}
-/**
+/*
* AES-GCM Functions
*/
@@ -617,7 +617,7 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
return crypto4xx_crypt_aes_gcm(req, true);
}
-/**
+/*
* HASH SHA1 Functions
*/
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
@@ -711,7 +711,7 @@ int crypto4xx_hash_digest(struct ahash_request *req)
ctx->sa_len, 0, NULL);
}
-/**
+/*
* SHA1 Algorithm
*/
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 8d1b918a0533..8278d98074e9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -44,7 +44,7 @@
#define PPC4XX_SEC_VERSION_STR "0.5"
-/**
+/*
* PPC4xx Crypto Engine Initialization Routine
*/
static void crypto4xx_hw_init(struct crypto4xx_device *dev)
@@ -159,7 +159,7 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
ctx->sa_len = 0;
}
-/**
+/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -268,7 +268,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
return tail;
}
-/**
+/*
* alloc memory for the gather ring
* no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function
@@ -346,7 +346,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
return &dev->gdr[idx];
}
-/**
+/*
* alloc memory for the scatter ring
* need to alloc buf for the ring
* sdr_tail, sdr_head and sdr_count are initialized by this function
@@ -930,7 +930,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
return is_busy ? -EBUSY : -EINPROGRESS;
}
-/**
+/*
* Algorithm Registration Functions
*/
static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
@@ -1097,7 +1097,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
} while (head != tail);
}
-/**
+/*
* Top Half of isr.
*/
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
@@ -1186,7 +1186,7 @@ static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
return 0;
}
-/**
+/*
* Supported Crypto Algorithms
*/
static struct crypto4xx_alg_common crypto4xx_alg[] = {
@@ -1369,7 +1369,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
} },
};
-/**
+/*
* Module Initialization Routine
*/
static int crypto4xx_probe(struct platform_device *ofdev)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index a4e25b46cd0a..56c10668c0ab 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -188,7 +188,7 @@ int crypto4xx_hash_final(struct ahash_request *req);
int crypto4xx_hash_update(struct ahash_request *req);
int crypto4xx_hash_init(struct ahash_request *req);
-/**
+/*
* Note: Only use this function to copy items that is word aligned.
*/
static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index c4c0a1a75941..1038061224da 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -104,7 +104,7 @@
#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
-/**
+/*
* Initialize CRYPTO ENGINE registers, and memory bases.
*/
#define PPC4XX_PDR_POLL 0x3ff
@@ -123,7 +123,7 @@
#define PPC4XX_INT_TIMEOUT_CNT 0
#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
-/**
+/*
* all follow define are ad hoc
*/
#define PPC4XX_RING_RETRY 100
@@ -131,7 +131,7 @@
#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
-/**
+/*
* Generic Security Association (SA) with all possible fields. These will
* never likely used except for reference purpose. These structure format
* can be not changed as the hardware expects them to be layout as defined.
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index fe756abfc19f..e98e4e7abbad 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
@@ -14,7 +14,7 @@
#define AES_IV_SIZE 16
-/**
+/*
* Contents of Dynamic Security Association (SA) with all possible fields
*/
union dynamic_sa_contents {
@@ -122,7 +122,7 @@ union sa_command_0 {
#define SA_AES_KEY_LEN_256 4
#define SA_REV2 1
-/**
+/*
* The follow defines bits sa_command_1
* In Basic hash mode this bit define simple hash or hmac.
* In IPsec mode, this bit define muting control.
@@ -172,7 +172,7 @@ struct dynamic_sa_ctl {
union sa_command_1 sa_command_1;
} __attribute__((packed));
-/**
+/*
* State Record for Security Association (SA)
*/
struct sa_state_record {
@@ -184,7 +184,7 @@ struct sa_state_record {
};
} __attribute__((packed));
-/**
+/*
* Security Association (SA) for AES128
*
*/
@@ -213,7 +213,7 @@ struct dynamic_sa_aes192 {
#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
#define SA_AES192_CONTENTS 0x3e000062
-/**
+/*
* Security Association (SA) for AES256
*/
struct dynamic_sa_aes256 {
@@ -228,7 +228,7 @@ struct dynamic_sa_aes256 {
#define SA_AES256_CONTENTS 0x3e000082
#define SA_AES_CONTENTS 0x3e000002
-/**
+/*
* Security Association (SA) for AES128 CCM
*/
struct dynamic_sa_aes128_ccm {
@@ -242,7 +242,7 @@ struct dynamic_sa_aes128_ccm {
#define SA_AES128_CCM_CONTENTS 0x3e000042
#define SA_AES_CCM_CONTENTS 0x3e000002
-/**
+/*
* Security Association (SA) for AES128_GCM
*/
struct dynamic_sa_aes128_gcm {
@@ -258,7 +258,7 @@ struct dynamic_sa_aes128_gcm {
#define SA_AES128_GCM_CONTENTS 0x3e000442
#define SA_AES_GCM_CONTENTS 0x3e000402
-/**
+/*
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
index 3af732f25c1c..7356716274cb 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.h
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/**
+/*
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 8b5e07316352..c6865cbd334b 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -236,10 +236,10 @@ static int meson_cipher(struct skcipher_request *areq)
dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
if (areq->src == areq->dst) {
- dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
- dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
+ dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
}
if (areq->iv && ivsize > 0) {
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 5bbeff433c8c..6e7ae896717c 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
struct meson_dev *mc;
int err, i;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 9bd8e5167be3..333fbefbbccb 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -26,7 +26,7 @@
static struct atmel_ecc_driver_data driver_data;
/**
- * atmel_ecdh_ctx - transformation context
+ * struct atmel_ecdh_ctx - transformation context
* @client : pointer to i2c client device
* @fallback : used for unsupported curves or when user wants to use its own
* private key.
@@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data;
* of the user to not call set_secret() while
* generate_public_key() or compute_shared_secret() are in flight.
* @curve_id : elliptic curve id
- * @n_sz : size in bytes of the n prime
* @do_fallback: true when the device doesn't support the curve or when the user
* wants to use its own private key.
*/
@@ -43,7 +42,6 @@ struct atmel_ecdh_ctx {
struct crypto_kpp *fallback;
const u8 *public_key;
unsigned int curve_id;
- size_t n_sz;
bool do_fallback;
};
@@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status)
{
struct kpp_request *req = areq;
- struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz;
@@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
goto free_work_data;
/* might want less than we've got */
- n_sz = min_t(size_t, ctx->n_sz, req->dst_len);
+ n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len);
/* copy the shared secret */
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
@@ -73,14 +70,6 @@ free_work_data:
kpp_request_complete(req, status);
}
-static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
-{
- if (curve_id == ECC_CURVE_NIST_P256)
- return ATMEL_ECC_NIST_P256_N_SIZE;
-
- return 0;
-}
-
/*
* A random private key is generated and stored in the device. The device
* returns the pair public key.
@@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
- if (!ctx->n_sz || params.key_size) {
+ if (params.key_size) {
/* fallback to ecdh software implementation */
ctx->do_fallback = true;
return crypto_kpp_set_secret(ctx->fallback, buf, len);
@@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
goto free_cmd;
ctx->do_fallback = false;
- ctx->curve_id = params.curve_id;
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
@@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
struct crypto_kpp *fallback;
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->client = atmel_ecc_i2c_client_alloc();
if (IS_ERR(ctx->client)) {
pr_err("tfm - i2c_client binding failed\n");
@@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
return ATMEL_ECC_PUBKEY_SIZE;
}
-static struct kpp_alg atmel_ecdh = {
+static struct kpp_alg atmel_ecdh_nist_p256 = {
.set_secret = atmel_ecdh_set_secret,
.generate_public_key = atmel_ecdh_generate_public_key,
.compute_shared_secret = atmel_ecdh_compute_shared_secret,
@@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = {
.max_size = atmel_ecdh_max_size,
.base = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
- .cra_name = "ecdh",
+ .cra_name = "ecdh-nist-p256",
.cra_driver_name = "atmel-ecdh",
.cra_priority = ATMEL_ECC_PRIORITY,
.cra_module = THIS_MODULE,
@@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client,
&driver_data.i2c_client_list);
spin_unlock(&driver_data.i2c_list_lock);
- ret = crypto_register_kpp(&atmel_ecdh);
+ ret = crypto_register_kpp(&atmel_ecdh_nist_p256);
if (ret) {
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
dev_err(&client->dev, "%s alg registration failed\n",
- atmel_ecdh.base.cra_driver_name);
+ atmel_ecdh_nist_p256.base.cra_driver_name);
} else {
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
}
@@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
return -EBUSY;
}
- crypto_unregister_kpp(&atmel_ecdh);
+ crypto_unregister_kpp(&atmel_ecdh_nist_p256);
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index e8e8281e027d..6fd3e969211d 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -339,7 +339,7 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
if (bus_clk_rate > 1000000L) {
- dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
bus_clk_rate);
return -EINVAL;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 352d80cb5ae9..1b13f601fd95 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -434,7 +434,7 @@ static int atmel_sha_init(struct ahash_request *req)
ctx->flags = 0;
- dev_dbg(dd->dev, "init: digest size: %d\n",
+ dev_dbg(dd->dev, "init: digest size: %u\n",
crypto_ahash_digestsize(tfm));
switch (crypto_ahash_digestsize(tfm)) {
@@ -1102,7 +1102,7 @@ static int atmel_sha_start(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
int err;
- dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n",
ctx->op, req->nbytes);
err = atmel_sha_hw_init(dd);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 4d63cb13a54f..6f01c51e3c37 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1217,7 +1217,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
- dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill;
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 851b149f7170..053315e260c2 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1019,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
* a SPU response message for an AEAD request. Includes buffers to catch SPU
* message headers and the response data.
* @mssg: mailbox message containing the receive sg
+ * @req: Crypto API request
* @rctx: crypto request context
* @rx_frag_num: number of scatterlist elements required to hold the
* SPU response message
@@ -2952,9 +2953,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
/**
* rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
- * cipher: AEAD structure
- * key: Key followed by 4 bytes of salt
- * keylen: Length of key plus salt, in bytes
+ * @cipher: AEAD structure
+ * @key: Key followed by 4 bytes of salt
+ * @keylen: Length of key plus salt, in bytes
*
* Extracts salt from key and stores it to be prepended to IV on each request.
* Digest is always 16 bytes
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index 007abf92cc05..6283e8c6d51d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -457,7 +457,7 @@ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
* @cipher_mode: Algo type
* @data_size: Length of plaintext (bytes)
*
- * @Return: Length of padding, in bytes
+ * Return: Length of padding, in bytes
*/
u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
unsigned int data_size)
@@ -510,10 +510,10 @@ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
}
/**
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+ * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode
- * @iv_ctr_len: initialization vector length in bytes
+ * @iv_len: initialization vector length in bytes
*
* In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
* to include the IV as a separate field in the SPU request msg.
@@ -543,9 +543,9 @@ enum hash_type spum_hash_type(u32 src_sent)
/**
* spum_digest_size() - Determine the size of a hash digest to expect the SPU to
* return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg: The hash algorithm
- * htype: Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg: The hash algorithm
+ * @htype: Type of hash operation (init, update, full, etc)
*
* When doing incremental hashing for an algorithm with a truncated hash
* (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
@@ -580,7 +580,7 @@ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
* @aead_parms: Parameters related to AEAD operation
* @data_size: Length of data to be encrypted or authenticated. If AEAD, does
* not include length of AAD.
-
+ *
* Return: the length of the SPU header in bytes. 0 if an error occurs.
*/
u32 spum_create_request(u8 *spu_hdr,
@@ -911,7 +911,7 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* setkey() time in spu_cipher_req_init().
* @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound: 0 encrypt, 1 decrypt
+ * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field
*
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 2db35b5ccaa2..07989bb8c220 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -543,7 +543,8 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for
* subsequent skcipher requests for this context.
- * @spu2_cipher_type: Cipher algorithm
+ * @fmd: Start of FMD field to be written
+ * @spu2_type: Cipher algorithm
* @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes
* @cipher_iv_len: Length of cipher initialization vector, in bytes
@@ -598,7 +599,7 @@ static int spu2_fmd_init(struct SPU2_FMD *fmd,
* SPU request packet.
* @fmd: Start of FMD field to be written
* @is_inbound: true if decrypting. false if encrypting.
- * @authFirst: true if alg authenticates before encrypting
+ * @auth_first: true if alg authenticates before encrypting
* @protocol: protocol selector
* @cipher_type: cipher algorithm
* @cipher_mode: cipher mode
@@ -640,6 +641,7 @@ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
* spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
* SPU request packet.
* @fmd: Start of FMD field to be written
+ * @is_inbound: true if decrypting. false if encrypting.
* @assoc_size: Length of additional associated data, in bytes
* @auth_key_len: Length of authentication key, in bytes
* @cipher_key_len: Length of cipher key, in bytes
@@ -793,7 +795,7 @@ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
}
/**
- * spu_payload_length() - Given a SPU2 message header, extract the payload
+ * spu2_payload_length() - Given a SPU2 message header, extract the payload
* length.
* @spu_hdr: Start of SPU message header (FMD)
*
@@ -812,10 +814,11 @@ u32 spu2_payload_length(u8 *spu_hdr)
}
/**
- * spu_response_hdr_len() - Determine the expected length of a SPU response
+ * spu2_response_hdr_len() - Determine the expected length of a SPU response
* header.
* @auth_key_len: Length of authentication key, in bytes
* @enc_key_len: Length of encryption key, in bytes
+ * @is_hash: Unused
*
* For SPU2, includes just FMD. OMD is never requested.
*
@@ -827,7 +830,7 @@ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
}
/**
- * spu_hash_pad_len() - Calculate the length of hash padding required to extend
+ * spu2_hash_pad_len() - Calculate the length of hash padding required to extend
* data to a full block size.
* @hash_alg: hash algorithm
* @hash_mode: hash mode
@@ -845,8 +848,10 @@ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
}
/**
- * spu2_gcm_ccm_padlen() - Determine the length of GCM/CCM padding for either
+ * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either
* the AAD field or the data.
+ * @cipher_mode: Unused
+ * @data_size: Unused
*
* Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
*/
@@ -857,7 +862,7 @@ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
}
/**
- * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
+ * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
* associated data in a SPU2 output packet.
* @cipher_mode: cipher mode
* @assoc_len: length of additional associated data, in bytes
@@ -878,11 +883,11 @@ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
return resp_len;
}
-/*
- * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
+/**
+ * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode
- * @iv_ctr_len: initialization vector length in bytes
+ * @iv_len: initialization vector length in bytes
*
* For SPU2, AEAD IV is included in OMD and does not need to be repeated
* prior to the payload.
@@ -909,9 +914,9 @@ enum hash_type spu2_hash_type(u32 src_sent)
/**
* spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
* return.
- * alg_digest_size: Number of bytes in the final digest for the given algo
- * alg: The hash algorithm
- * htype: Type of hash operation (init, update, full, etc)
+ * @alg_digest_size: Number of bytes in the final digest for the given algo
+ * @alg: The hash algorithm
+ * @htype: Type of hash operation (init, update, full, etc)
*
*/
u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
@@ -921,7 +926,7 @@ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
}
/**
- * spu_create_request() - Build a SPU2 request message header, includint FMD and
+ * spu2_create_request() - Build a SPU2 request message header, includint FMD and
* OMD.
* @spu_hdr: Start of buffer where SPU request header is to be written
* @req_opts: SPU request message options
@@ -1105,7 +1110,7 @@ u32 spu2_create_request(u8 *spu_hdr,
}
/**
- * spu_cipher_req_init() - Build an skcipher SPU2 request message header,
+ * spu2_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request
@@ -1162,11 +1167,11 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
}
/**
- * spu_cipher_req_finish() - Finish building a SPU request message header for a
+ * spu2_cipher_req_finish() - Finish building a SPU request message header for a
* block cipher request.
* @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header
- * @isInbound: 0 encrypt, 1 decrypt
+ * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field
*
@@ -1222,7 +1227,7 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
}
/**
- * spu_request_pad() - Create pad bytes at the end of the data.
+ * spu2_request_pad() - Create pad bytes at the end of the data.
* @pad_start: Start of buffer where pad bytes are to be written
* @gcm_padding: Length of GCM padding, in bytes
* @hash_pad_len: Number of bytes of padding extend data to full block
@@ -1311,7 +1316,7 @@ u8 spu2_rx_status_len(void)
}
/**
- * spu_status_process() - Process the status from a SPU response message.
+ * spu2_status_process() - Process the status from a SPU response message.
* @statp: start of STATUS word
*
* Return: 0 - if status is good and response should be processed
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index c4669a96eaec..d5d9cabea55a 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -119,8 +119,8 @@ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
* @from_skip: number of bytes to skip in from_sg. Non-zero when previous
* request included part of the buffer in entry in from_sg.
* Assumes from_skip < from_sg->length.
- * @from_nents number of entries in from_sg
- * @length number of bytes to copy. may reach this limit before exhausting
+ * @from_nents: number of entries in from_sg
+ * @length: number of bytes to copy. may reach this limit before exhausting
* from_sg.
*
* Copies the entries themselves, not the data in the entries. Assumes to_sg has
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a780e627838a..8b8ed77d8715 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -71,6 +71,9 @@ struct caam_skcipher_alg {
* @adata: authentication algorithm details
* @cdata: encryption algorithm details
* @authsize: authentication tag (a.k.a. ICV / MAC) size
+ * @xts_key_fallback: true if fallback tfm needs to be used due
+ * to unsupported xts key lengths
+ * @fallback: xts fallback tfm
*/
struct caam_ctx {
struct caam_flc flc[NUM_OP];
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index dd5f101e43f8..e313233ec6de 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -187,7 +187,8 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
}
/**
- * Count leading zeros, need it to strip, from a given scatterlist
+ * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
+ * from a given scatterlist
*
* @sgl : scatterlist to count zeros from
* @nbytes: number of zeros, in bytes, to strip
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 711b1acdd4e0..06ee42e8a245 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -10,7 +10,6 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/printk.h>
-#include <linux/version.h>
#include "cptpf.h"
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index 99b053094f5a..c288c4b51783 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -10,7 +10,7 @@
#include "nitrox_isr.h"
#include "nitrox_mbx.h"
-/**
+/*
* One vector for each type of ring
* - NPS packet ring, AQMQ ring and ZQMQ ring
*/
@@ -216,7 +216,7 @@ static void nps_core_int_tasklet(unsigned long data)
}
}
-/**
+/*
* nps_core_int_isr - interrupt handler for NITROX errors and
* mailbox communication
*/
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 53ef06792133..df95ba26b414 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -58,14 +58,15 @@ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
struct device *dev = DEV(ndev);
- dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg),
+ DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
DMA_TO_DEVICE);
kfree(sr->in.sgcomp);
sr->in.sg = NULL;
sr->in.sgmap_cnt = 0;
- dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
+ dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg),
DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
DMA_TO_DEVICE);
@@ -178,7 +179,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
return 0;
incomp_err:
- dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
sr->in.sgmap_cnt = 0;
return ret;
}
@@ -203,7 +204,7 @@ static int dma_map_outbufs(struct nitrox_softreq *sr,
return 0;
outcomp_map_err:
- dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL);
sr->out.sgmap_cnt = 0;
sr->out.sg = NULL;
return ret;
diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
index 58fb3ed6e644..54f6fb054119 100644
--- a/drivers/crypto/cavium/zip/common.h
+++ b/drivers/crypto/cavium/zip/common.h
@@ -56,7 +56,6 @@
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/version.h>
/* Device specific zlib function definitions */
#include "zip_device.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 88275b4867ea..5976530c00a8 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -59,7 +59,7 @@ struct ccp_crypto_queue {
#define CCP_CRYPTO_MAX_QLEN 100
static struct ccp_crypto_queue req_queue;
-static spinlock_t req_queue_lock;
+static DEFINE_SPINLOCK(req_queue_lock);
struct ccp_crypto_cmd {
struct list_head entry;
@@ -410,7 +410,6 @@ static int ccp_crypto_init(void)
return ret;
}
- spin_lock_init(&req_queue_lock);
INIT_LIST_HEAD(&req_queue.cmds);
req_queue.backlog = &req_queue.cmds;
req_queue.cmd_count = 0;
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 0971ee60f840..6777582aa1ce 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -548,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-int ccp_dev_suspend(struct sp_device *sp)
+void ccp_dev_suspend(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -556,7 +556,7 @@ int ccp_dev_suspend(struct sp_device *sp)
/* If there's no device there's nothing to do */
if (!ccp)
- return 0;
+ return;
spin_lock_irqsave(&ccp->cmd_lock, flags);
@@ -572,11 +572,9 @@ int ccp_dev_suspend(struct sp_device *sp)
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
-
- return 0;
}
-int ccp_dev_resume(struct sp_device *sp)
+void ccp_dev_resume(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -584,7 +582,7 @@ int ccp_dev_resume(struct sp_device *sp)
/* If there's no device there's nothing to do */
if (!ccp)
- return 0;
+ return;
spin_lock_irqsave(&ccp->cmd_lock, flags);
@@ -597,8 +595,6 @@ int ccp_dev_resume(struct sp_device *sp)
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
}
int ccp_dev_init(struct sp_device *sp)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index d6a8f4e4b14a..bb88198c874e 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -2418,7 +2418,6 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.address += CCP_ECC_OUTPUT_SIZE;
ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
CCP_ECC_MODULUS_BYTES);
- dst.address += CCP_ECC_OUTPUT_SIZE;
/* Restore the workarea address */
dst.address = save;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index cb9b4c4e371e..da3872c48308 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -21,6 +21,7 @@
#include <linux/ccp.h>
#include <linux/firmware.h>
#include <linux/gfp.h>
+#include <linux/cpufeature.h>
#include <asm/smp.h>
@@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp)
struct sev_device *sev;
int ret = -ENOMEM;
+ if (!boot_cpu_has(X86_FEATURE_SEV)) {
+ dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n");
+ return 0;
+ }
+
sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
if (!sev)
goto e_err;
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 6284a15e5047..7eb3e4668286 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -213,12 +213,8 @@ void sp_destroy(struct sp_device *sp)
int sp_suspend(struct sp_device *sp)
{
- int ret;
-
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_suspend(sp);
- if (ret)
- return ret;
+ ccp_dev_suspend(sp);
}
return 0;
@@ -226,12 +222,8 @@ int sp_suspend(struct sp_device *sp)
int sp_resume(struct sp_device *sp)
{
- int ret;
-
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_resume(sp);
- if (ret)
- return ret;
+ ccp_dev_resume(sp);
}
return 0;
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 0218d0670eee..20377e67f65d 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -134,8 +134,8 @@ struct sp_device *sp_get_psp_master_device(void);
int ccp_dev_init(struct sp_device *sp);
void ccp_dev_destroy(struct sp_device *sp);
-int ccp_dev_suspend(struct sp_device *sp);
-int ccp_dev_resume(struct sp_device *sp);
+void ccp_dev_suspend(struct sp_device *sp);
+void ccp_dev_resume(struct sp_device *sp);
#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
@@ -144,15 +144,8 @@ static inline int ccp_dev_init(struct sp_device *sp)
return 0;
}
static inline void ccp_dev_destroy(struct sp_device *sp) { }
-
-static inline int ccp_dev_suspend(struct sp_device *sp)
-{
- return 0;
-}
-static inline int ccp_dev_resume(struct sp_device *sp)
-{
- return 0;
-}
+static inline void ccp_dev_suspend(struct sp_device *sp) { }
+static inline void ccp_dev_resume(struct sp_device *sp) { }
#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index f471dbaef1fb..f468594ef8af 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -356,6 +356,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
+ { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
index 5e697a90ea7f..5c9d47f3be37 100644
--- a/drivers/crypto/ccp/tee-dev.c
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -5,7 +5,7 @@
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/
#include <linux/types.h>
@@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
if (!start_addr)
return -ENOMEM;
+ memset(start_addr, 0x0, ring_size);
rb_mgr->ring_start = start_addr;
rb_mgr->ring_size = ring_size;
rb_mgr->ring_pa = __psp_pa(start_addr);
@@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
void *buf, size_t len, struct tee_ring_cmd **resp)
{
struct tee_ring_cmd *cmd;
- u32 rptr, wptr;
int nloop = 1000, ret = 0;
+ u32 rptr;
*resp = NULL;
mutex_lock(&tee->rb_mgr.mutex);
- wptr = tee->rb_mgr.wptr;
-
- /* Check if ring buffer is full */
+ /* Loop until empty entry found in ring buffer */
do {
+ /* Get pointer to ring buffer command entry */
+ cmd = (struct tee_ring_cmd *)
+ (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
+
rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
- if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
+ /* Check if ring buffer is full or command entry is waiting
+ * for response from TEE
+ */
+ if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+ cmd->flag == CMD_WAITING_FOR_RESPONSE))
break;
- dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
- rptr, wptr);
+ dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
+ rptr, tee->rb_mgr.wptr);
- /* Wait if ring buffer is full */
+ /* Wait if ring buffer is full or TEE is processing data */
mutex_unlock(&tee->rb_mgr.mutex);
schedule_timeout_interruptible(msecs_to_jiffies(10));
mutex_lock(&tee->rb_mgr.mutex);
} while (--nloop);
- if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
- dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
- rptr, wptr);
+ if (!nloop &&
+ (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
+ cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
+ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
+ rptr, tee->rb_mgr.wptr, cmd->flag);
ret = -EBUSY;
goto unlock;
}
- /* Pointer to empty data entry in ring buffer */
- cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
+ /* Do not submit command if PSP got disabled while processing any
+ * command in another thread
+ */
+ if (psp_dead) {
+ ret = -EBUSY;
+ goto unlock;
+ }
/* Write command data into ring buffer */
cmd->cmd_id = cmd_id;
@@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
memset(&cmd->buf[0], 0, sizeof(cmd->buf));
memcpy(&cmd->buf[0], buf, len);
+ /* Indicate driver is waiting for response */
+ cmd->flag = CMD_WAITING_FOR_RESPONSE;
+
/* Update local copy of write pointer */
tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
@@ -309,14 +326,14 @@ static int tee_wait_cmd_completion(struct psp_tee_device *tee,
struct tee_ring_cmd *resp,
unsigned int timeout)
{
- /* ~5ms sleep per loop => nloop = timeout * 200 */
- int nloop = timeout * 200;
+ /* ~1ms sleep per loop => nloop = timeout * 1000 */
+ int nloop = timeout * 1000;
while (--nloop) {
if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
return 0;
- usleep_range(5000, 5100);
+ usleep_range(1000, 1100);
}
dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
@@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
return ret;
ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
- if (ret)
+ if (ret) {
+ resp->flag = CMD_RESPONSE_TIMEDOUT;
return ret;
+ }
memcpy(buf, &resp->buf[0], len);
*status = resp->status;
+ resp->flag = CMD_RESPONSE_COPIED;
+
return 0;
}
EXPORT_SYMBOL(psp_tee_process_cmd);
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
index f09960112115..49d26158b71e 100644
--- a/drivers/crypto/ccp/tee-dev.h
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
#define TEE_DEFAULT_TIMEOUT 10
-#define MAX_BUFFER_SIZE 992
+#define MAX_BUFFER_SIZE 988
/**
* enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
@@ -82,6 +82,20 @@ enum tee_cmd_state {
};
/**
+ * enum cmd_resp_state - TEE command's response status maintained by driver
+ * @CMD_RESPONSE_INVALID: initial state when no command is written to ring
+ * @CMD_WAITING_FOR_RESPONSE: driver waiting for response from TEE
+ * @CMD_RESPONSE_TIMEDOUT: failed to get response from TEE
+ * @CMD_RESPONSE_COPIED: driver has copied response from TEE
+ */
+enum cmd_resp_state {
+ CMD_RESPONSE_INVALID,
+ CMD_WAITING_FOR_RESPONSE,
+ CMD_RESPONSE_TIMEDOUT,
+ CMD_RESPONSE_COPIED,
+};
+
+/**
* struct tee_ring_cmd - Structure of the command buffer in TEE ring
* @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
* interface
@@ -91,6 +105,7 @@ enum tee_cmd_state {
* @pdata: private data (currently unused)
* @res1: reserved region
* @buf: TEE command specific buffer
+ * @flag: refers to &enum cmd_resp_state
*/
struct tee_ring_cmd {
u32 cmd_id;
@@ -100,6 +115,7 @@ struct tee_ring_cmd {
u64 pdata;
u32 res1[2];
u8 buf[MAX_BUFFER_SIZE];
+ u32 flag;
/* Total size: 1024 bytes */
} __packed;
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index d0e59e942568..e599ac6dc162 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -352,10 +352,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
- if (IS_ERR(new_drvdata->cc_base)) {
- dev_err(dev, "Failed to ioremap registers");
+ if (IS_ERR(new_drvdata->cc_base))
return PTR_ERR(new_drvdata->cc_base);
- }
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index f5a336634daa..6933546f87b1 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -126,11 +126,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
return container_of(ctx->dev, struct uld_ctx, dev);
}
-static inline int is_ofld_imm(const struct sk_buff *skb)
-{
- return (skb->len <= SGE_MAX_WR_LEN);
-}
-
static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
{
memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
@@ -769,13 +764,14 @@ static inline void create_wreq(struct chcr_context *ctx,
struct uld_ctx *u_ctx = ULD_CTX(ctx);
unsigned int tx_channel_id, rx_channel_id;
unsigned int txqidx = 0, rxqidx = 0;
- unsigned int qid, fid;
+ unsigned int qid, fid, portno;
get_qidxs(req, &txqidx, &rxqidx);
qid = u_ctx->lldi.rxq_ids[rxqidx];
fid = u_ctx->lldi.rxq_ids[0];
+ portno = rxqidx / ctx->rxq_perchan;
tx_channel_id = txqidx / ctx->txq_perchan;
- rx_channel_id = rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
@@ -797,15 +793,13 @@ static inline void create_wreq(struct chcr_context *ctx,
/**
* create_cipher_wr - form the WR for cipher operations
- * @req: cipher req.
- * @ctx: crypto driver context of the request.
- * @qid: ingress qid where response of this WR should be received.
- * @op_type: encryption or decryption
+ * @wrparam: Container for create_cipher_wr()'s parameters
*/
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
@@ -822,6 +816,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst);
dst_size = get_space_for_phys_dsgl(nents);
@@ -1559,7 +1554,8 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash)
/**
* create_hash_wr - Create hash work request
- * @req - Cipher req base
+ * @req: Cipher req base
+ * @param: Container for create_hash_wr()'s parameters
*/
static struct sk_buff *create_hash_wr(struct ahash_request *req,
struct hash_wr_param *param)
@@ -1580,6 +1576,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
int error = 0;
unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
param->sg_len) <= SGE_MAX_WR_LEN;
@@ -2438,6 +2435,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2457,6 +2455,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (req->cryptlen == 0)
return NULL;
@@ -2710,9 +2709,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
u32 temp;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
temp = req->assoclen + req->cryptlen +
@@ -2752,9 +2753,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct dsgl_walk dsgl_walk;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst);
@@ -2958,6 +2961,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
@@ -2967,6 +2971,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
+
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen = req->assoclen - 8;
else
@@ -3127,6 +3133,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
@@ -3143,6 +3150,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
+ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8;
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f91f9d762a45..39c70e6255f9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -1,4 +1,4 @@
-/**
+/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
@@ -184,7 +184,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
- pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
+ pr_info_once("%s\n", DRV_DESC);
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
return ERR_PTR(-EOPNOTSUPP);
@@ -309,4 +309,3 @@ module_exit(chcr_crypto_exit);
MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index b02f981e7c32..f7c8bb95a71b 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -44,7 +44,6 @@
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
-#define DRV_VERSION "1.0.0.0-ko"
#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
#define MAX_PENDING_REQ_TO_HW 20
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 4ee010f39912..fa5a9f207bc9 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -21,7 +21,7 @@
/* Static structures */
static void __iomem *_iobase;
-static spinlock_t lock;
+static DEFINE_SPINLOCK(lock);
/* Write a 128 bit field (either a writable key or IV) */
static inline void
@@ -383,8 +383,6 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto erequest;
}
- spin_lock_init(&lock);
-
/* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 843192666dc3..e572f9982d4e 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -68,6 +68,8 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
+ select CRYPTO_CURVE25519
+ select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
accelerator, which can accelerate RSA and DH algorithms.
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 181c109b19f7..e0b4a1982ee9 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -10,6 +10,14 @@
#define HPRE_PF_DEF_Q_NUM 64
#define HPRE_PF_DEF_Q_BASE 0
+/*
+ * type used in qm sqc DW6.
+ * 0 - Algorithm which has been supported in V2, like RSA, DH and so on;
+ * 1 - ECC algorithm in V3.
+ */
+#define HPRE_V2_ALG_TYPE 0
+#define HPRE_V3_ECC_ALG_TYPE 1
+
enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
@@ -18,7 +26,6 @@ enum {
};
enum hpre_ctrl_dbgfs_file {
- HPRE_CURRENT_QM,
HPRE_CLEAR_ENABLE,
HPRE_CLUSTER_CTRL,
HPRE_DEBUG_FILE_NUM,
@@ -75,6 +82,9 @@ enum hpre_alg_type {
HPRE_ALG_KG_CRT = 0x3,
HPRE_ALG_DH_G2 = 0x4,
HPRE_ALG_DH = 0x5,
+ HPRE_ALG_ECC_MUL = 0xD,
+ /* shared by x25519 and x448, but x448 is not supported now */
+ HPRE_ALG_CURVE25519_MUL = 0x10,
};
struct hpre_sqe {
@@ -92,8 +102,8 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hisi_qp *hpre_create_qp(void);
-int hpre_algs_register(void);
-void hpre_algs_unregister(void);
+struct hisi_qp *hpre_create_qp(u8 type);
+int hpre_algs_register(struct hisi_qm *qm);
+void hpre_algs_unregister(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a87f9904087a..a380087c83f7 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
+#include <crypto/curve25519.h>
#include <crypto/dh.h>
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/rsa.h>
@@ -36,6 +39,13 @@ struct hpre_ctx;
#define HPRE_DFX_SEC_TO_US 1000000
#define HPRE_DFX_US_TO_NS 1000
+/* size in bytes of the n prime */
+#define HPRE_ECC_NIST_P192_N_SIZE 24
+#define HPRE_ECC_NIST_P256_N_SIZE 32
+
+/* size in bytes */
+#define HPRE_ECC_HW256_KSZ_B 32
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -61,14 +71,35 @@ struct hpre_dh_ctx {
* else if base if the counterpart public key we
* compute the shared secret
* ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
+ * low address: d--->n, please refer to Hisilicon HPRE UM
*/
- char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
+ char *xa_p;
dma_addr_t dma_xa_p;
char *g; /* m */
dma_addr_t dma_g;
};
+struct hpre_ecdh_ctx {
+ /* low address: p->a->k->b */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* low address: x->y */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
+struct hpre_curve25519_ctx {
+ /* low address: p->a->k */
+ unsigned char *p;
+ dma_addr_t dma_p;
+
+ /* gx coordinate */
+ unsigned char *g;
+ dma_addr_t dma_g;
+};
+
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
@@ -80,7 +111,11 @@ struct hpre_ctx {
union {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
+ struct hpre_ecdh_ctx ecdh;
+ struct hpre_curve25519_ctx curve25519;
};
+ /* for ecc algorithms */
+ unsigned int curve_id;
};
struct hpre_asym_request {
@@ -91,6 +126,8 @@ struct hpre_asym_request {
union {
struct akcipher_request *rsa;
struct kpp_request *dh;
+ struct kpp_request *ecdh;
+ struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
}
}
-static struct hisi_qp *hpre_get_qp_and_start(void)
+static struct hisi_qp *hpre_get_qp_and_start(u8 type)
{
struct hisi_qp *qp;
int ret;
- qp = hpre_create_qp();
+ qp = hpre_create_qp(type);
if (!qp) {
pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
@@ -261,8 +298,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
- if (unlikely(!tmp))
- return;
if (src) {
if (req->src)
@@ -272,8 +307,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
- if (unlikely(!tmp))
- return;
if (req->dst) {
if (dst)
@@ -288,13 +321,16 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
void **kreq)
{
+ struct device *dev = HPRE_DEV(ctx);
struct hpre_asym_request *req;
- int err, id, done;
+ unsigned int err, done, alg;
+ int id;
#define HPRE_NO_HW_ERR 0
#define HPRE_HW_TASK_DONE 3
#define HREE_HW_ERR_MASK 0x7ff
#define HREE_SQE_DONE_MASK 0x3
+#define HREE_ALG_TYPE_MASK 0x1f
id = (int)le16_to_cpu(sqe->tag);
req = ctx->req_list[id];
hpre_rm_req_from_ctx(req);
@@ -307,7 +343,11 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
HREE_SQE_DONE_MASK;
if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
- return 0;
+ return 0;
+
+ alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
+ dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
+ alg, done, err);
return -EINVAL;
}
@@ -413,7 +453,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
struct hpre_sqe *sqe = resp;
struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
-
if (unlikely(!req)) {
atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
return;
@@ -422,18 +461,29 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
req->cb(ctx, resp);
}
-static int hpre_ctx_init(struct hpre_ctx *ctx)
+static void hpre_stop_qp_and_put(struct hisi_qp *qp)
+{
+ hisi_qm_stop_qp(qp);
+ hisi_qm_free_qps(&qp, 1);
+}
+
+static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
{
struct hisi_qp *qp;
+ int ret;
- qp = hpre_get_qp_and_start();
+ qp = hpre_get_qp_and_start(type);
if (IS_ERR(qp))
return PTR_ERR(qp);
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ if (ret)
+ hpre_stop_qp_and_put(qp);
+
+ return ret;
}
static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
@@ -510,7 +560,6 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
return ret;
}
-#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
@@ -674,7 +723,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return hpre_ctx_init(ctx);
+ return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
}
static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
@@ -683,7 +732,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
hpre_dh_clear_ctx(ctx, true);
}
-#endif
static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
{
@@ -1100,7 +1148,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->rsa.soft_tfm);
}
- ret = hpre_ctx_init(ctx);
+ ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
if (ret)
crypto_free_akcipher(ctx->rsa.soft_tfm);
@@ -1115,6 +1163,734 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
crypto_free_akcipher(ctx->rsa.soft_tfm);
}
+static void hpre_key_to_big_end(u8 *data, int len)
+{
+ int i, j;
+ u8 tmp;
+
+ for (i = 0; i < len / 2; i++) {
+ j = len - i - 1;
+ tmp = data[j];
+ data[j] = data[i];
+ data[i] = tmp;
+ }
+}
+
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
+ bool is_ecdh)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ if (is_clear_all)
+ hisi_qm_stop_qp(ctx->qp);
+
+ if (is_ecdh && ctx->ecdh.p) {
+ /* ecdh: p->a->k->b */
+ memzero_explicit(ctx->ecdh.p + shift, sz);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ } else if (!is_ecdh && ctx->curve25519.p) {
+ /* curve25519: p->a->k */
+ memzero_explicit(ctx->curve25519.p + shift, sz);
+ dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
+ ctx->curve25519.dma_p);
+ ctx->curve25519.p = NULL;
+ }
+
+ hpre_ctx_clear(ctx, is_clear_all);
+}
+
+static unsigned int hpre_ecdh_supported_curve(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_HW256_KSZ_B;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
+{
+ unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
+ u8 i = 0;
+
+ while (i < ndigits - 1) {
+ memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
+ i++;
+ }
+
+ memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
+ hpre_key_to_big_end((u8 *)addr, cur_sz);
+}
+
+static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
+ unsigned int cur_sz)
+{
+ unsigned int shifta = ctx->key_sz << 1;
+ unsigned int shiftb = ctx->key_sz << 2;
+ void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
+ void *a = ctx->ecdh.p + shifta - cur_sz;
+ void *b = ctx->ecdh.p + shiftb - cur_sz;
+ void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
+ void *y = ctx->ecdh.g + shifta - cur_sz;
+ const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
+ char *n;
+
+ if (unlikely(!curve))
+ return -EINVAL;
+
+ n = kzalloc(ctx->key_sz, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
+ fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
+ fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
+ fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
+ fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
+ fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
+
+ if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
+ kfree(n);
+ return -EINVAL;
+ }
+
+ kfree(n);
+ return 0;
+}
+
+static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
+{
+ switch (id) {
+ case ECC_CURVE_NIST_P192:
+ return HPRE_ECC_NIST_P192_N_SIZE;
+ case ECC_CURVE_NIST_P256:
+ return HPRE_ECC_NIST_P256_N_SIZE;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, shift, curve_sz;
+ int ret;
+
+ ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
+ if (!ctx->key_sz)
+ return -EINVAL;
+
+ curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ if (!curve_sz || params->key_size > curve_sz)
+ return -EINVAL;
+
+ sz = ctx->key_sz;
+
+ if (!ctx->ecdh.p) {
+ ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
+ GFP_KERNEL);
+ if (!ctx->ecdh.p)
+ return -ENOMEM;
+ }
+
+ shift = sz << 2;
+ ctx->ecdh.g = ctx->ecdh.p + shift;
+ ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
+
+ ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
+ if (ret) {
+ dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
+ dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
+ ctx->ecdh.p = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool hpre_key_is_zero(char *key, unsigned short key_sz)
+{
+ int i;
+
+ for (i = 0; i < key_sz; i++)
+ if (key[i])
+ return false;
+
+ return true;
+}
+
+static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz, sz_shift;
+ struct ecdh params;
+ int ret;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+ dev_err(dev, "failed to decode ecdh key!\n");
+ return -EINVAL;
+ }
+
+ if (hpre_key_is_zero(params.key, params.key_size)) {
+ dev_err(dev, "Invalid hpre key!\n");
+ return -EINVAL;
+ }
+
+ hpre_ecc_clear_ctx(ctx, false, true);
+
+ ret = hpre_ecdh_set_param(ctx, &params);
+ if (ret < 0) {
+ dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
+ return ret;
+ }
+
+ sz = ctx->key_sz;
+ sz_shift = (sz << 1) + sz - params.key_size;
+ memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
+
+ return 0;
+}
+
+static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
+}
+
+static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
+{
+ unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ char *p;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.ecdh;
+ areq->dst_len = ctx->key_sz << 1;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ p = sg_virt(areq->dst);
+ memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
+ memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
+
+ hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (req->dst_len < ctx->key_sz << 1) {
+ req->dst_len = ctx->key_sz << 1;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_ecdh_cb;
+ h_req->areq.ecdh = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->ecdh.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int tmpshift;
+ dma_addr_t dma = 0;
+ void *ptr;
+ int shift;
+
+ /* Src_data include gx and gy. */
+ shift = ctx->key_sz - (len >> 1);
+ if (unlikely(shift < 0))
+ return -EINVAL;
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ tmpshift = ctx->key_sz << 1;
+ scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
+ memcpy(ptr + shift, ptr + tmpshift, len >> 1);
+ memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_ecdh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_ecdh_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->ecdh.dma_g);
+ }
+
+ ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ /* max size is the pub_key_size, include x and y */
+ return ctx->key_sz << 1;
+}
+
+static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P192;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->curve_id = ECC_CURVE_NIST_P256;
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, true);
+}
+
+static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ u8 secret[CURVE25519_KEY_SIZE] = { 0 };
+ unsigned int sz = ctx->key_sz;
+ const struct ecc_curve *curve;
+ unsigned int shift = sz << 1;
+ void *p;
+
+ /*
+ * The key from 'buf' is in little-endian, we should preprocess it as
+ * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
+ * then convert it to big endian. Only in this way, the result can be
+ * the same as the software curve-25519 that exists in crypto.
+ */
+ memcpy(secret, buf, len);
+ curve25519_clamp_secret(secret);
+ hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
+
+ p = ctx->curve25519.p + sz - len;
+
+ curve = ecc_get_curve25519();
+
+ /* fill curve parameters */
+ fill_curve_param(p, curve->p, len, curve->g.ndigits);
+ fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
+ memcpy(p + shift, secret, len);
+ fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
+ memzero_explicit(secret, CURVE25519_KEY_SIZE);
+}
+
+static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
+ unsigned int len)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ unsigned int sz = ctx->key_sz;
+ unsigned int shift = sz << 1;
+
+ /* p->a->k->gx */
+ if (!ctx->curve25519.p) {
+ ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
+ &ctx->curve25519.dma_p,
+ GFP_KERNEL);
+ if (!ctx->curve25519.p)
+ return -ENOMEM;
+ }
+
+ ctx->curve25519.g = ctx->curve25519.p + shift + sz;
+ ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
+
+ hpre_curve25519_fill_curve(ctx, buf, len);
+
+ return 0;
+}
+
+static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ int ret = -EINVAL;
+
+ if (len != CURVE25519_KEY_SIZE ||
+ !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "key is null or key len is not 32bytes!\n");
+ return ret;
+ }
+
+ /* Free old secret if any */
+ hpre_ecc_clear_ctx(ctx, false, false);
+
+ ctx->key_sz = CURVE25519_KEY_SIZE;
+ ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
+ if (ret) {
+ dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
+ hpre_ecc_clear_ctx(ctx, false, false);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
+ struct hpre_asym_request *req,
+ struct scatterlist *dst,
+ struct scatterlist *src)
+{
+ struct device *dev = HPRE_DEV(ctx);
+ struct hpre_sqe *sqe = &req->req;
+ dma_addr_t dma;
+
+ dma = le64_to_cpu(sqe->in);
+
+ if (src && req->src)
+ dma_free_coherent(dev, ctx->key_sz, req->src, dma);
+
+ dma = le64_to_cpu(sqe->out);
+
+ if (req->dst)
+ dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
+ if (dst)
+ dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
+}
+
+static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ struct hpre_asym_request *req = NULL;
+ struct kpp_request *areq;
+ u64 overtime_thrhld;
+ int ret;
+
+ ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+ areq = req->areq.curve25519;
+ areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
+ hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
+
+ hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
+ kpp_request_complete(areq, ret);
+
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
+}
+
+static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
+ struct kpp_request *req)
+{
+ struct hpre_asym_request *h_req;
+ struct hpre_sqe *msg;
+ int req_id;
+ void *tmp;
+
+ if (unlikely(req->dst_len < ctx->key_sz)) {
+ req->dst_len = ctx->key_sz;
+ return -EINVAL;
+ }
+
+ tmp = kpp_request_ctx(req);
+ h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ h_req->cb = hpre_curve25519_cb;
+ h_req->areq.curve25519 = req;
+ msg = &h_req->req;
+ memset(msg, 0, sizeof(*msg));
+ msg->key = cpu_to_le64(ctx->curve25519.dma_p);
+
+ msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
+ msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
+ h_req->ctx = ctx;
+
+ req_id = hpre_add_req_to_ctx(h_req);
+ if (req_id < 0)
+ return -EBUSY;
+
+ msg->tag = cpu_to_le16((u16)req_id);
+ return 0;
+}
+
+static void hpre_curve25519_src_modulo_p(u8 *ptr)
+{
+ int i;
+
+ for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
+ ptr[i] = 0;
+
+ /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
+ ptr[i] -= 0xed;
+}
+
+static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ u8 p[CURVE25519_KEY_SIZE] = { 0 };
+ const struct ecc_curve *curve;
+ dma_addr_t dma = 0;
+ u8 *ptr;
+
+ if (len != CURVE25519_KEY_SIZE) {
+ dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
+ return -EINVAL;
+ }
+
+ ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
+ if (unlikely(!ptr))
+ return -ENOMEM;
+
+ scatterwalk_map_and_copy(ptr, data, 0, len, 0);
+
+ if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
+ dev_err(dev, "gx is null!\n");
+ goto err;
+ }
+
+ /*
+ * Src_data(gx) is in little-endian order, MSB in the final byte should
+ * be masked as described in RFC7748, then transform it to big-endian
+ * form, then hisi_hpre can use the data.
+ */
+ ptr[31] &= 0x7f;
+ hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
+
+ curve = ecc_get_curve25519();
+
+ fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
+
+ /*
+ * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
+ * we get its modulus to p, and then use it.
+ */
+ if (memcmp(ptr, p, ctx->key_sz) >= 0)
+ hpre_curve25519_src_modulo_p(ptr);
+
+ hpre_req->src = ptr;
+ msg->in = cpu_to_le64(dma);
+ return 0;
+
+err:
+ dma_free_coherent(dev, ctx->key_sz, ptr, dma);
+ return -EINVAL;
+}
+
+static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
+ struct scatterlist *data, unsigned int len)
+{
+ struct hpre_sqe *msg = &hpre_req->req;
+ struct hpre_ctx *ctx = hpre_req->ctx;
+ struct device *dev = HPRE_DEV(ctx);
+ dma_addr_t dma = 0;
+
+ if (!data || !sg_is_last(data) || len != ctx->key_sz) {
+ dev_err(dev, "data or data length is illegal!\n");
+ return -EINVAL;
+ }
+
+ hpre_req->dst = NULL;
+ dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ dev_err(dev, "dma map data err!\n");
+ return -ENOMEM;
+ }
+
+ msg->out = cpu_to_le64(dma);
+ return 0;
+}
+
+static int hpre_curve25519_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = HPRE_DEV(ctx);
+ void *tmp = kpp_request_ctx(req);
+ struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
+ struct hpre_sqe *msg = &hpre_req->req;
+ int ret;
+
+ ret = hpre_curve25519_msg_request_set(ctx, req);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
+ return ret;
+ }
+
+ if (req->src) {
+ ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init src data, ret = %d!\n",
+ ret);
+ goto clear_all;
+ }
+ } else {
+ msg->in = cpu_to_le64(ctx->curve25519.dma_g);
+ }
+
+ ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
+ if (unlikely(ret)) {
+ dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
+ goto clear_all;
+ }
+
+ msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
+ ret = hpre_send(ctx, msg);
+ if (likely(!ret))
+ return -EINPROGRESS;
+
+clear_all:
+ hpre_rm_req_from_ctx(hpre_req);
+ hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
+ return ret;
+}
+
+static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->key_sz;
+}
+
+static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
+}
+
+static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ hpre_ecc_clear_ctx(ctx, true, false);
+}
+
static struct akcipher_alg rsa = {
.sign = hpre_rsa_dec,
.verify = hpre_rsa_enc,
@@ -1135,7 +1911,6 @@ static struct akcipher_alg rsa = {
},
};
-#ifdef CONFIG_CRYPTO_DH
static struct kpp_alg dh = {
.set_secret = hpre_dh_set_secret,
.generate_public_key = hpre_dh_compute_value,
@@ -1152,9 +1927,83 @@ static struct kpp_alg dh = {
.cra_module = THIS_MODULE,
},
};
-#endif
-int hpre_algs_register(void)
+static struct kpp_alg ecdh_nist_p192 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg ecdh_nist_p256 = {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static struct kpp_alg curve25519_alg = {
+ .set_secret = hpre_curve25519_set_secret,
+ .generate_public_key = hpre_curve25519_compute_value,
+ .compute_shared_secret = hpre_curve25519_compute_value,
+ .max_size = hpre_curve25519_max_size,
+ .init = hpre_curve25519_init_tfm,
+ .exit = hpre_curve25519_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "curve25519",
+ .cra_driver_name = "hpre-curve25519",
+ .cra_module = THIS_MODULE,
+ },
+};
+
+
+static int hpre_register_ecdh(void)
+{
+ int ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p192);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_kpp(&ecdh_nist_p256);
+ if (ret) {
+ crypto_unregister_kpp(&ecdh_nist_p192);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hpre_unregister_ecdh(void)
+{
+ crypto_unregister_kpp(&ecdh_nist_p256);
+ crypto_unregister_kpp(&ecdh_nist_p192);
+}
+
+int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
@@ -1162,19 +2011,37 @@ int hpre_algs_register(void)
ret = crypto_register_akcipher(&rsa);
if (ret)
return ret;
-#ifdef CONFIG_CRYPTO_DH
+
ret = crypto_register_kpp(&dh);
if (ret)
- crypto_unregister_akcipher(&rsa);
-#endif
+ goto unreg_rsa;
+
+ if (qm->ver >= QM_HW_V3) {
+ ret = hpre_register_ecdh();
+ if (ret)
+ goto unreg_dh;
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret)
+ goto unreg_ecdh;
+ }
+ return 0;
+unreg_ecdh:
+ hpre_unregister_ecdh();
+unreg_dh:
+ crypto_unregister_kpp(&dh);
+unreg_rsa:
+ crypto_unregister_akcipher(&rsa);
return ret;
}
-void hpre_algs_unregister(void)
+void hpre_algs_unregister(struct hisi_qm *qm)
{
- crypto_unregister_akcipher(&rsa);
-#ifdef CONFIG_CRYPTO_DH
+ if (qm->ver >= QM_HW_V3) {
+ crypto_unregister_kpp(&curve25519_alg);
+ hpre_unregister_ecdh();
+ }
+
crypto_unregister_kpp(&dh);
-#endif
+ crypto_unregister_akcipher(&rsa);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index e7a2c70eb9cf..046bc962c8b2 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,7 +13,6 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_COMM_CNT_CLR_CE 0x0
@@ -119,7 +118,6 @@ static struct hisi_qm_list hpre_devices = {
};
static const char * const hpre_debug_file_name[] = {
- [HPRE_CURRENT_QM] = "current_qm",
[HPRE_CLEAR_ENABLE] = "rdclr_en",
[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};
@@ -226,41 +224,44 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-struct hisi_qp *hpre_create_qp(void)
+struct hisi_qp *hpre_create_qp(u8 type)
{
int node = cpu_to_node(smp_processor_id());
struct hisi_qp *qp = NULL;
int ret;
- ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
+ if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
+ return NULL;
+
+ /*
+ * type: 0 - RSA/DH. algorithm supported in V2,
+ * 1 - ECC algorithm in V3.
+ */
+ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
if (!ret)
return qp;
return NULL;
}
-static void hpre_pasid_enable(struct hisi_qm *qm)
+static void hpre_config_pasid(struct hisi_qm *qm)
{
- u32 val;
-
- val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
- val |= BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
- val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
- val |= BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
-}
+ u32 val1, val2;
-static void hpre_pasid_disable(struct hisi_qm *qm)
-{
- u32 val;
+ if (qm->ver >= QM_HW_V3)
+ return;
- val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
- val &= ~BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
- val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
- val &= ~BIT(HPRE_PASID_EN_BIT);
- writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
+ val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
+ val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
+ if (qm->use_sva) {
+ val1 |= BIT(HPRE_PASID_EN_BIT);
+ val2 |= BIT(HPRE_PASID_EN_BIT);
+ } else {
+ val1 &= ~BIT(HPRE_PASID_EN_BIT);
+ val2 &= ~BIT(HPRE_PASID_EN_BIT);
+ }
+ writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
+ writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
}
static int hpre_cfg_by_dsm(struct hisi_qm *qm)
@@ -320,7 +321,7 @@ static int hpre_set_cluster(struct hisi_qm *qm)
}
/*
- * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
* Or it may stay in D3 state when we bind and unbind hpre quickly,
* as it does FLR triggered by hardware.
*/
@@ -383,15 +384,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
if (qm->ver == QM_HW_V2) {
ret = hpre_cfg_by_dsm(qm);
if (ret)
- dev_err(dev, "acpi_evaluate_dsm err.\n");
+ return ret;
disable_flr_of_bme(qm);
-
- /* Enable data buffer pasid */
- if (qm->use_sva)
- hpre_pasid_enable(qm);
}
+ /* Config data buffer pasid needed by Kunpeng 920 */
+ hpre_config_pasid(qm);
+
return ret;
}
@@ -401,10 +401,6 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
unsigned long offset;
int i;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* clear clusterX/cluster_ctrl */
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
@@ -456,49 +452,6 @@ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
return &hpre->qm;
}
-static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
-{
- struct hisi_qm *qm = hpre_file_to_qm(file);
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
-{
- struct hisi_qm *qm = hpre_file_to_qm(file);
- u32 num_vfs = qm->vfs_num;
- u32 vfq_num, tmp;
-
- if (val > num_vfs)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
- if (val == num_vfs) {
- qm->debug.curr_qm_qp_num =
- qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
- } else {
- qm->debug.curr_qm_qp_num = vfq_num;
- }
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
@@ -519,7 +472,7 @@ static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
- return 0;
+ return 0;
}
static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
@@ -541,7 +494,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
- return 0;
+ return 0;
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -554,9 +507,6 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->type) {
- case HPRE_CURRENT_QM:
- val = hpre_current_qm_read(file);
- break;
case HPRE_CLEAR_ENABLE:
val = hpre_clear_enable_read(file);
break;
@@ -597,11 +547,6 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
spin_lock_irq(&file->lock);
switch (file->type) {
- case HPRE_CURRENT_QM:
- ret = hpre_current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case HPRE_CLEAR_ENABLE:
ret = hpre_clear_enable_write(file, val);
if (ret)
@@ -740,11 +685,6 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
{
int ret;
- ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
- HPRE_CURRENT_QM);
- if (ret)
- return ret;
-
ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
HPRE_CLEAR_ENABLE);
if (ret)
@@ -812,9 +752,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
}
if (pdev->revision >= QM_HW_V3)
- qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n";
+ qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
else
- qm->algs = "rsa\ndh\n";
+ qm->algs = "rsa\ndh";
qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
@@ -867,6 +807,20 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}
+static void hpre_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR;
+ err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+ err_info->msi_wr_port = HPRE_WR_MSI_PORT;
+ err_info->acpi_rst = "HRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+}
+
static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable,
@@ -875,16 +829,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR,
- .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE,
- .msi_wr_port = HPRE_WR_MSI_PORT,
- .acpi_rst = "HRST",
- }
+ .err_info_init = hpre_err_info_init,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -892,13 +837,12 @@ static int hpre_pf_probe_init(struct hpre *hpre)
struct hisi_qm *qm = &hpre->qm;
int ret;
- qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
-
ret = hpre_set_user_domain_and_cache(qm);
if (ret)
return ret;
qm->err_ini = &hpre_err_ini;
+ qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
return 0;
@@ -1006,8 +950,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) {
- if (qm->use_sva && qm->ver == QM_HW_V2)
- hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
hisi_qm_dev_err_uninit(qm);
@@ -1016,7 +958,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_uninit(qm);
}
-
static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset,
@@ -1075,4 +1016,5 @@ module_exit(hpre_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
+MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 13cb4216561a..ce439a0c66c9 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -38,6 +38,7 @@
#define QM_MB_CMD_SQC_BT 0x4
#define QM_MB_CMD_CQC_BT 0x5
#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
#define QM_MB_CMD_SEND_BASE 0x300
#define QM_MB_EVENT_SHIFT 8
@@ -93,6 +94,12 @@
#define QM_DB_PRIORITY_SHIFT_V1 48
#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QUE_ISO_CFG_V 0x0030
+#define QM_QUE_ISO_EN 0x100154
+#define QM_CAPBILITY 0x100158
+#define QM_QP_NUN_MASK GENMASK(10, 0)
+#define QM_QP_DB_INTERVAL 0x10000
+#define QM_QP_MAX_NUM_SHIFT 11
#define QM_DB_CMD_SHIFT_V2 12
#define QM_DB_RAND_SHIFT_V2 16
#define QM_DB_INDEX_SHIFT_V2 32
@@ -129,9 +136,9 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0)
+#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
-#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff
+#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
#define QM_ABNORMAL_INT_STATUS 0x100008
#define QM_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
@@ -164,6 +171,14 @@
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_DFX_MB_CNT_VF 0x104010
+#define QM_DFX_DB_CNT_VF 0x104020
+#define QM_DFX_SQE_CNT_VF_SQN 0x104030
+#define QM_DFX_CQE_CNT_VF_CQN 0x104040
+#define QM_DFX_QN_SHIFT 16
+#define CURRENT_FUN_MASK GENMASK(5, 0)
+#define CURRENT_Q_MASK GENMASK(31, 16)
+
#define POLL_PERIOD 10
#define POLL_TIMEOUT 1000
#define WAIT_PERIOD_US_MAX 200
@@ -173,6 +188,7 @@
#define QM_CACHE_WB_DONE 0x208
#define PCI_BAR_2 2
+#define PCI_BAR_4 4
#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
@@ -334,6 +350,7 @@ struct hisi_qm_hw_ops {
void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
+ int (*stop_qp)(struct hisi_qp *qp);
};
struct qm_dfx_item {
@@ -350,6 +367,7 @@ static struct qm_dfx_item qm_dfx_files[] = {
};
static const char * const qm_debug_file_name[] = {
+ [CURRENT_QM] = "current_qm",
[CURRENT_Q] = "current_q",
[CLEAR_ENABLE] = "clear_enable",
};
@@ -373,6 +391,8 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(10), .msg = "qm_db_timeout" },
{ .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
+ { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
+ { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
{ /* sentinel */ }
};
@@ -557,21 +577,22 @@ static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
{
- u64 doorbell;
- u64 dbase;
+ void __iomem *io_base = qm->io_base;
u16 randata = 0;
+ u64 doorbell;
if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
- dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
+ io_base = qm->db_io_base + (u64)qn * qm->db_interval +
+ QM_DOORBELL_SQ_CQ_BASE_V2;
else
- dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
+ io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
((u64)randata << QM_DB_RAND_SHIFT_V2) |
((u64)index << QM_DB_INDEX_SHIFT_V2) |
((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
- writeq(doorbell, qm->io_base + dbase);
+ writeq(doorbell, io_base);
}
static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
@@ -865,6 +886,26 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
return 0;
}
+static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
+{
+ u32 remain_q_num, vfq_num;
+ u32 num_vfs = qm->vfs_num;
+
+ vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
+ if (vfq_num >= qm->max_qp_num)
+ return qm->max_qp_num;
+
+ remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
+ if (vfq_num + remain_q_num <= qm->max_qp_num)
+ return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
+
+ /*
+ * if vfq_num + remain_q_num > max_qp_num, the last VFs,
+ * each with one more queue.
+ */
+ return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
+}
+
static struct hisi_qm *file_to_qm(struct debugfs_file *file)
{
struct qm_debug *debug = file->debug;
@@ -918,6 +959,41 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
return 0;
}
+static u32 current_qm_read(struct debugfs_file *file)
+{
+ struct hisi_qm *qm = file_to_qm(file);
+
+ return readl(qm->io_base + QM_DFX_MB_CNT_VF);
+}
+
+static int current_qm_write(struct debugfs_file *file, u32 val)
+{
+ struct hisi_qm *qm = file_to_qm(file);
+ u32 tmp;
+
+ if (val > qm->vfs_num)
+ return -EINVAL;
+
+ /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
+ if (!val)
+ qm->debug.curr_qm_qp_num = qm->qp_num;
+ else
+ qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
+
+ writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
+
+ tmp = val |
+ (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
+ writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
+
+ return 0;
+}
+
static ssize_t qm_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
@@ -929,6 +1005,9 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
mutex_lock(&file->lock);
switch (index) {
+ case CURRENT_QM:
+ val = current_qm_read(file);
+ break;
case CURRENT_Q:
val = current_q_read(file);
break;
@@ -971,27 +1050,24 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
mutex_lock(&file->lock);
switch (index) {
+ case CURRENT_QM:
+ ret = current_qm_write(file, val);
+ break;
case CURRENT_Q:
ret = current_q_write(file, val);
- if (ret)
- goto err_input;
break;
case CLEAR_ENABLE:
ret = clear_enable_write(file, val);
- if (ret)
- goto err_input;
break;
default:
ret = -EINVAL;
- goto err_input;
}
mutex_unlock(&file->lock);
- return count;
+ if (ret)
+ return ret;
-err_input:
- mutex_unlock(&file->lock);
- return ret;
+ return count;
}
static const struct file_operations qm_debug_fops = {
@@ -1529,12 +1605,12 @@ static const struct file_operations qm_cmd_fops = {
.write = qm_cmd_write,
};
-static void qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
+static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
+ enum qm_debug_file index)
{
- struct dentry *qm_d = qm->debug.qm_d;
struct debugfs_file *file = qm->debug.files + index;
- debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
+ debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
&qm_debug_fops);
file->index = index;
@@ -1628,7 +1704,7 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
writel(error_status, qm->io_base +
QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_ini->err_info.nfe,
+ writel(qm->err_info.nfe,
qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_RECOVERED;
}
@@ -1639,6 +1715,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
return ACC_ERR_RECOVERED;
}
+static int qm_stop_qp(struct hisi_qp *qp)
+{
+ return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
+}
+
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
.get_irq_num = qm_get_irq_num_v1,
@@ -1654,6 +1735,16 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.hw_error_handle = qm_hw_error_handle_v2,
};
+static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
+ .get_vft = qm_get_vft_v2,
+ .qm_db = qm_db_v2,
+ .get_irq_num = qm_get_irq_num_v2,
+ .hw_error_init = qm_hw_error_init_v2,
+ .hw_error_uninit = qm_hw_error_uninit_v2,
+ .hw_error_handle = qm_hw_error_handle_v2,
+ .stop_qp = qm_stop_qp,
+};
+
static void *qm_get_avail_sqe(struct hisi_qp *qp)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1933,6 +2024,14 @@ static int qm_drain_qp(struct hisi_qp *qp)
if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
return 0;
+ /* Kunpeng930 supports drain qp by device */
+ if (qm->ops->stop_qp) {
+ ret = qm->ops->stop_qp(qp);
+ if (ret)
+ dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
+ return ret;
+ }
+
addr = qm_ctx_alloc(qm, size, &dma_addr);
if (IS_ERR(addr)) {
dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
@@ -2132,6 +2231,8 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
{
struct hisi_qp *qp = q->priv;
struct hisi_qm *qm = qp->qm;
+ resource_size_t phys_base = qm->db_phys_base +
+ qp->qp_id * qm->db_interval;
size_t sz = vma->vm_end - vma->vm_start;
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
@@ -2143,16 +2244,19 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
if (qm->ver == QM_HW_V1) {
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
- } else {
+ } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
+ } else {
+ if (sz > qm->db_interval)
+ return -EINVAL;
}
vma->vm_flags |= VM_IO;
return remap_pfn_range(vma, vma->vm_start,
- qm->phys_base >> PAGE_SHIFT,
+ phys_base >> PAGE_SHIFT,
sz, pgprot_noncached(vma->vm_page_prot));
case UACCE_QFRT_DUS:
if (sz != qp->qdma.size)
@@ -2267,14 +2371,20 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->priv = qm;
uacce->algs = qm->algs;
- if (qm->ver == QM_HW_V1) {
- mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE;
- } else {
+ else if (qm->ver == QM_HW_V2)
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ else
+ uacce->api_ver = HISI_QM_API_VER3_BASE;
+
+ if (qm->ver == QM_HW_V1)
+ mmio_page_nr = QM_DOORBELL_PAGE_NR;
+ else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
- uacce->api_ver = HISI_QM_API_VER2_BASE;
- }
+ else
+ mmio_page_nr = qm->db_interval / PAGE_SIZE;
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
@@ -2482,8 +2592,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1)
qm->ops = &qm_hw_ops_v1;
- else
+ else if (qm->ver == QM_HW_V2)
qm->ops = &qm_hw_ops_v2;
+ else
+ qm->ops = &qm_hw_ops_v3;
pci_set_drvdata(pdev, qm);
mutex_init(&qm->mailbox_lock);
@@ -2492,13 +2604,23 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
qm->misc_ctl = false;
}
-static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
- pci_free_irq_vectors(pdev);
+ if (qm->use_db_isolation)
+ iounmap(qm->db_io_base);
+
iounmap(qm->io_base);
pci_release_mem_regions(pdev);
+}
+
+static void hisi_qm_pci_uninit(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ pci_free_irq_vectors(pdev);
+ qm_put_pci_res(qm);
pci_disable_device(pdev);
}
@@ -2527,7 +2649,6 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma);
- memset(&qm->qdma, 0, sizeof(qm->qdma));
}
qm_irq_unregister(qm);
@@ -2681,7 +2802,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
{
int ret;
- WARN_ON(!qm->qdma.dma);
+ WARN_ON(!qm->qdma.va);
if (qm->fun_type == QM_HW_PF) {
ret = qm_dev_mem_reset(qm);
@@ -2930,9 +3051,11 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
qm->debug.qm_d = qm_d;
/* only show this in PF */
- if (qm->fun_type == QM_HW_PF)
+ if (qm->fun_type == QM_HW_PF) {
+ qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
- qm_create_debugfs_file(qm, i);
+ qm_create_debugfs_file(qm, qm_d, i);
+ }
debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
@@ -2960,6 +3083,10 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
struct qm_dfx_registers *regs;
int i;
+ /* clear current_qm */
+ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
+ writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+
/* clear current_q */
writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
@@ -2982,7 +3109,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
static void qm_hw_error_init(struct hisi_qm *qm)
{
- const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
+ struct hisi_qm_err_info *err_info = &qm->err_info;
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
@@ -3175,30 +3302,46 @@ EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
{
- u32 remain_q_num, q_num, i, j;
+ u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
+ u32 max_qp_num = qm->max_qp_num;
u32 q_base = qm->qp_num;
int ret;
if (!num_vfs)
return -EINVAL;
- remain_q_num = qm->ctrl_qp_num - qm->qp_num;
+ vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
- /* If remain queues not enough, return error. */
- if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
+ /* If vfs_q_num is less than num_vfs, return error. */
+ if (vfs_q_num < num_vfs)
return -EINVAL;
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ q_num = vfs_q_num / num_vfs;
+ remain_q_num = vfs_q_num % num_vfs;
+
+ for (i = num_vfs; i > 0; i--) {
+ /*
+ * if q_num + remain_q_num > max_qp_num in last vf, divide the
+ * remaining queues equally.
+ */
+ if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
+ act_q_num = q_num + remain_q_num;
+ remain_q_num = 0;
+ } else if (remain_q_num > 0) {
+ act_q_num = q_num + 1;
+ remain_q_num--;
+ } else {
+ act_q_num = q_num;
+ }
+
+ act_q_num = min_t(int, act_q_num, max_qp_num);
+ ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
if (ret) {
- for (j = i; j > 0; j--)
+ for (j = num_vfs; j > i; j--)
hisi_qm_set_vft(qm, j, 0, 0);
return ret;
}
- q_base += q_num;
+ q_base += act_q_num;
}
return 0;
@@ -3318,15 +3461,15 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
/* get device hardware error status */
err_sts = qm->err_ini->get_dev_hw_err_status(qm);
if (err_sts) {
- if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
+ if (err_sts & qm->err_info.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
if (qm->err_ini->log_dev_hw_err)
qm->err_ini->log_dev_hw_err(qm, err_sts);
/* ce error does not need to be reset */
- if ((err_sts | qm->err_ini->err_info.dev_ce_mask) ==
- qm->err_ini->err_info.dev_ce_mask) {
+ if ((err_sts | qm->err_info.dev_ce_mask) ==
+ qm->err_info.dev_ce_mask) {
if (qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm,
err_sts);
@@ -3639,7 +3782,7 @@ static int qm_soft_reset(struct hisi_qm *qm)
acpi_status s;
s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
- qm->err_ini->err_info.acpi_rst,
+ qm->err_info.acpi_rst,
NULL, &value);
if (ACPI_FAILURE(s)) {
pci_err(pdev, "NO controller reset method!\n");
@@ -3707,12 +3850,11 @@ static void qm_restart_prepare(struct hisi_qm *qm)
/* temporarily close the OOO port used for PEH to write out MSI */
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
- writel(value & ~qm->err_ini->err_info.msi_wr_port,
+ writel(value & ~qm->err_info.msi_wr_port,
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
/* clear dev ecc 2bit error source if having */
- value = qm_get_dev_err_status(qm) &
- qm->err_ini->err_info.ecc_2bits_mask;
+ value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
if (value && qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm, value);
@@ -3736,7 +3878,7 @@ static void qm_restart_done(struct hisi_qm *qm)
/* open the OOO port for PEH to write out MSI */
value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
- value |= qm->err_ini->err_info.msi_wr_port;
+ value |= qm->err_info.msi_wr_port;
writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
qm->err_status.is_qm_ecc_mbit = false;
@@ -3875,8 +4017,7 @@ static int qm_check_dev_error(struct hisi_qm *qm)
if (ret)
return ret;
- return (qm_get_dev_err_status(qm) &
- qm->err_ini->err_info.ecc_2bits_mask);
+ return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask);
}
void hisi_qm_reset_prepare(struct pci_dev *pdev)
@@ -4084,7 +4225,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (flag) {
- ret = qm_list->register_to_crypto();
+ ret = qm_list->register_to_crypto(qm);
if (ret) {
mutex_lock(&qm_list->lock);
list_del(&qm->list);
@@ -4115,59 +4256,134 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
mutex_unlock(&qm_list->lock);
if (list_empty(&qm_list->list))
- qm_list->unregister_from_crypto();
+ qm_list->unregister_from_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
-static int hisi_qm_pci_init(struct hisi_qm *qm)
+static int qm_get_qp_num(struct hisi_qm *qm)
+{
+ if (qm->ver == QM_HW_V1)
+ qm->ctrl_qp_num = QM_QNUM_V1;
+ else if (qm->ver == QM_HW_V2)
+ qm->ctrl_qp_num = QM_QNUM_V2;
+ else
+ qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
+ QM_QP_NUN_MASK;
+
+ if (qm->use_db_isolation)
+ qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
+ QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
+ else
+ qm->max_qp_num = qm->ctrl_qp_num;
+
+ /* check if qp number is valid */
+ if (qm->qp_num > qm->max_qp_num) {
+ dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
+ qm->qp_num, qm->max_qp_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev;
- unsigned int num_vec;
int ret;
- ret = pci_enable_device_mem(pdev);
- if (ret < 0) {
- dev_err(dev, "Failed to enable device mem!\n");
- return ret;
- }
-
ret = pci_request_mem_regions(pdev, qm->dev_name);
if (ret < 0) {
dev_err(dev, "Failed to request mem regions!\n");
- goto err_disable_pcidev;
+ return ret;
}
qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
- qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
- qm->io_base = ioremap(qm->phys_base, qm->phys_size);
+ qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
if (!qm->io_base) {
ret = -EIO;
- goto err_release_mem_regions;
+ goto err_request_mem_regions;
+ }
+
+ if (qm->ver > QM_HW_V2) {
+ if (qm->fun_type == QM_HW_PF)
+ qm->use_db_isolation = readl(qm->io_base +
+ QM_QUE_ISO_EN) & BIT(0);
+ else
+ qm->use_db_isolation = readl(qm->io_base +
+ QM_QUE_ISO_CFG_V) & BIT(0);
+ }
+
+ if (qm->use_db_isolation) {
+ qm->db_interval = QM_QP_DB_INTERVAL;
+ qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
+ qm->db_io_base = ioremap(qm->db_phys_base,
+ pci_resource_len(pdev, PCI_BAR_4));
+ if (!qm->db_io_base) {
+ ret = -EIO;
+ goto err_ioremap;
+ }
+ } else {
+ qm->db_phys_base = qm->phys_base;
+ qm->db_io_base = qm->io_base;
+ qm->db_interval = 0;
}
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm_get_qp_num(qm);
+ if (ret)
+ goto err_db_ioremap;
+ }
+
+ return 0;
+
+err_db_ioremap:
+ if (qm->use_db_isolation)
+ iounmap(qm->db_io_base);
+err_ioremap:
+ iounmap(qm->io_base);
+err_request_mem_regions:
+ pci_release_mem_regions(pdev);
+ return ret;
+}
+
+static int hisi_qm_pci_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct device *dev = &pdev->dev;
+ unsigned int num_vec;
+ int ret;
+
+ ret = pci_enable_device_mem(pdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable device mem!\n");
+ return ret;
+ }
+
+ ret = qm_get_pci_res(qm);
+ if (ret)
+ goto err_disable_pcidev;
+
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret < 0)
- goto err_iounmap;
+ goto err_get_pci_res;
pci_set_master(pdev);
if (!qm->ops->get_irq_num) {
ret = -EOPNOTSUPP;
- goto err_iounmap;
+ goto err_get_pci_res;
}
num_vec = qm->ops->get_irq_num(qm);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
- goto err_iounmap;
+ goto err_get_pci_res;
}
return 0;
-err_iounmap:
- iounmap(qm->io_base);
-err_release_mem_regions:
- pci_release_mem_regions(pdev);
+err_get_pci_res:
+ qm_put_pci_res(qm);
err_disable_pcidev:
pci_disable_device(pdev);
return ret;
@@ -4187,28 +4403,28 @@ int hisi_qm_init(struct hisi_qm *qm)
hisi_qm_pre_init(qm);
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
-
ret = hisi_qm_pci_init(qm);
if (ret)
- goto err_remove_uacce;
+ return ret;
ret = qm_irq_register(qm);
if (ret)
- goto err_pci_uninit;
+ goto err_pci_init;
if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
/* v2 starts to support get vft by mailbox */
ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
if (ret)
- goto err_irq_unregister;
+ goto err_irq_register;
}
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+
ret = hisi_qm_memory_init(qm);
if (ret)
- goto err_irq_unregister;
+ goto err_alloc_uacce;
INIT_WORK(&qm->work, qm_work_process);
if (qm->fun_type == QM_HW_PF)
@@ -4218,13 +4434,13 @@ int hisi_qm_init(struct hisi_qm *qm)
return 0;
-err_irq_unregister:
- qm_irq_unregister(qm);
-err_pci_uninit:
- hisi_qm_pci_uninit(qm);
-err_remove_uacce:
+err_alloc_uacce:
uacce_remove(qm->uacce);
qm->uacce = NULL;
+err_irq_register:
+ qm_irq_unregister(qm);
+err_pci_init:
+ hisi_qm_pci_uninit(qm);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_init);
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 54967c6b9c78..acefdf8b3a50 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -51,14 +51,6 @@
#define PEH_AXUSER_CFG 0x401001
#define PEH_AXUSER_CFG_ENABLE 0xffffffff
-#define QM_DFX_MB_CNT_VF 0x104010
-#define QM_DFX_DB_CNT_VF 0x104020
-#define QM_DFX_SQE_CNT_VF_SQN 0x104030
-#define QM_DFX_CQE_CNT_VF_CQN 0x104040
-#define QM_DFX_QN_SHIFT 16
-#define CURRENT_FUN_MASK GENMASK(5, 0)
-#define CURRENT_Q_MASK GENMASK(31, 16)
-
#define QM_AXI_RRESP BIT(0)
#define QM_AXI_BRESP BIT(1)
#define QM_ECC_MBIT BIT(2)
@@ -72,10 +64,13 @@
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
#define QM_DB_RANDOM_INVALID BIT(12)
+#define QM_MAILBOX_TIMEOUT BIT(13)
+#define QM_FLR_TIMEOUT BIT(14)
#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID)
+ QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
+ QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
#define QM_BASE_CE QM_ECC_1BIT
#define QM_Q_DEPTH 1024
@@ -123,6 +118,7 @@ enum qm_fun_type {
};
enum qm_debug_file {
+ CURRENT_QM,
CURRENT_Q,
CLEAR_ENABLE,
DEBUG_FILE_NUM,
@@ -193,14 +189,14 @@ struct hisi_qm_err_ini {
void (*open_axi_master_ooo)(struct hisi_qm *qm);
void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
- struct hisi_qm_err_info err_info;
+ void (*err_info_init)(struct hisi_qm *qm);
};
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
- int (*register_to_crypto)(void);
- void (*unregister_from_crypto)(void);
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
};
struct hisi_qm {
@@ -209,12 +205,15 @@ struct hisi_qm {
const char *dev_name;
struct pci_dev *pdev;
void __iomem *io_base;
+ void __iomem *db_io_base;
u32 sqe_size;
u32 qp_base;
u32 qp_num;
u32 qp_in_used;
u32 ctrl_qp_num;
+ u32 max_qp_num;
u32 vfs_num;
+ u32 db_interval;
struct list_head list;
struct hisi_qm_list *qm_list;
@@ -230,6 +229,7 @@ struct hisi_qm {
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
struct hisi_qm_err_status err_status;
unsigned long misc_ctl; /* driver removing and reset sched */
@@ -252,8 +252,11 @@ struct hisi_qm {
const char *algs;
bool use_sva;
bool is_frozen;
+
+ /* doorbell isolation enable */
+ bool use_db_isolation;
resource_size_t phys_base;
- resource_size_t phys_size;
+ resource_size_t db_phys_base;
struct uacce_device *uacce;
int mode;
};
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 8ca945ac297e..0a3c8f019b02 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index 91ee2bb575df..c8de1b51c843 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for the Hisilicon SEC units found on Hip06 Hip07
+ * Driver for the HiSilicon SEC units found on Hip06 Hip07
*
- * Copyright (c) 2016-2017 Hisilicon Limited.
+ * Copyright (c) 2016-2017 HiSilicon Limited.
*/
#include <linux/acpi.h>
#include <linux/atomic.h>
@@ -233,7 +233,7 @@ static int sec_queue_map_io(struct sec_queue *queue)
IORESOURCE_MEM,
2 + queue->queue_id);
if (!res) {
- dev_err(dev, "Failed to get queue %d memory resource\n",
+ dev_err(dev, "Failed to get queue %u memory resource\n",
queue->queue_id);
return -ENOMEM;
}
@@ -653,12 +653,12 @@ static int sec_queue_free(struct sec_queue *queue)
struct sec_dev_info *info = queue->dev_info;
if (queue->queue_id >= SEC_Q_NUM) {
- dev_err(info->dev, "No queue %d\n", queue->queue_id);
+ dev_err(info->dev, "No queue %u\n", queue->queue_id);
return -ENODEV;
}
if (!queue->in_use) {
- dev_err(info->dev, "Queue %d is idle\n", queue->queue_id);
+ dev_err(info->dev, "Queue %u is idle\n", queue->queue_id);
return -ENODEV;
}
@@ -834,6 +834,7 @@ int sec_queue_stop_release(struct sec_queue *queue)
/**
* sec_queue_empty() - Is this hardware queue currently empty.
+ * @queue: The queue to test
*
* We need to know if we have an empty queue for some of the chaining modes
* as if it is not empty we may need to hold the message in a software queue
@@ -1315,6 +1316,6 @@ static struct platform_driver sec_driver = {
module_platform_driver(sec_driver);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Hisilicon Security Accelerators");
+MODULE_DESCRIPTION("HiSilicon Security Accelerators");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h
index 4d9063a8b10b..179a8250d691 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.h
+++ b/drivers/crypto/hisilicon/sec/sec_drv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2017 Hisilicon Limited. */
+/* Copyright (c) 2016-2017 HiSilicon Limited. */
#ifndef _SEC_DRV_H_
#define _SEC_DRV_H_
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 08491912afd5..dfdce2f21e65 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -4,8 +4,6 @@
#ifndef __HISI_SEC_V2_H
#define __HISI_SEC_V2_H
-#include <linux/list.h>
-
#include "../qm.h"
#include "sec_crypto.h"
@@ -50,7 +48,7 @@ struct sec_req {
int err_type;
int req_id;
- int flag;
+ u32 flag;
/* Status of the SEC request */
bool fake_busy;
@@ -139,6 +137,7 @@ struct sec_ctx {
bool pbuf_supported;
struct sec_cipher_ctx c_ctx;
struct sec_auth_ctx a_ctx;
+ struct device *dev;
};
enum sec_endian {
@@ -148,7 +147,6 @@ enum sec_endian {
};
enum sec_debug_file_index {
- SEC_CURRENT_QM,
SEC_CLEAR_ENABLE,
SEC_DEBUG_FILE_NUM,
};
@@ -183,6 +181,6 @@ struct sec_dev {
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 2eaa516b3231..133aede8bf07 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -7,6 +7,7 @@
#include <crypto/des.h>
#include <crypto/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/skcipher.h>
@@ -43,7 +44,6 @@
#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR 128
-#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
@@ -96,7 +96,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
0, QM_Q_DEPTH, GFP_ATOMIC);
mutex_unlock(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
- dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ dev_err(req->ctx->dev, "alloc req id fail!\n");
return req_id;
}
@@ -112,7 +112,7 @@ static void sec_free_req_id(struct sec_req *req)
int req_id = req->req_id;
if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
- dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
+ dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
@@ -138,7 +138,7 @@ static int sec_aead_verify(struct sec_req *req)
aead_req->cryptlen + aead_req->assoclen -
authsize);
if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
- dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n");
+ dev_err(req->ctx->dev, "aead verify failure!\n");
return -EBADMSG;
}
@@ -177,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
if (unlikely(req->err_type || done != SEC_SQE_DONE ||
(ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
(ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
- dev_err(SEC_CTX_DEV(ctx),
+ dev_err_ratelimited(ctx->dev,
"err_type[%d],done[%d],flag[%d]\n",
req->err_type, done, flag);
err = -EIO;
@@ -326,8 +326,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
struct sec_alg_res *res = qp_ctx->res;
+ struct device *dev = ctx->dev;
int ret;
ret = sec_alloc_civ_resource(dev, res);
@@ -360,7 +360,7 @@ alloc_fail:
static void sec_alg_resource_free(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
sec_free_civ_resource(dev, qp_ctx->res);
@@ -373,7 +373,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
int qp_ctx_id, int alg_type)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
struct sec_qp_ctx *qp_ctx;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -428,7 +428,7 @@ err_destroy_idr:
static void sec_release_qp_ctx(struct sec_ctx *ctx,
struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
@@ -452,6 +452,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->dev = &sec->qm.pdev->dev;
ctx->hlf_q_num = sec->ctx_q_num >> 1;
ctx->pbuf_supported = ctx->sec->iommu_used;
@@ -476,11 +477,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
-
kfree(ctx->qp_ctx);
err_destroy_qps:
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
-
return ret;
}
@@ -499,7 +498,7 @@ static int sec_cipher_init(struct sec_ctx *ctx)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&c_ctx->c_key_dma, GFP_KERNEL);
if (!c_ctx->c_key)
return -ENOMEM;
@@ -512,7 +511,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx)
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
c_ctx->c_key, c_ctx->c_key_dma);
}
@@ -520,7 +519,7 @@ static int sec_auth_init(struct sec_ctx *ctx)
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
- a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&a_ctx->a_key_dma, GFP_KERNEL);
if (!a_ctx->a_key)
return -ENOMEM;
@@ -533,7 +532,7 @@ static void sec_auth_uninit(struct sec_ctx *ctx)
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
}
@@ -546,7 +545,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ pr_err("get error skcipher iv size!\n");
return -EINVAL;
}
@@ -573,10 +572,18 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
const u32 keylen,
const enum sec_cmode c_mode)
{
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ int ret;
+
+ ret = verify_skcipher_des3_key(tfm, key);
+ if (ret)
+ return ret;
+
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
@@ -633,12 +640,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
int ret;
if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
+ dev_err(dev, "xts mode key err!\n");
return ret;
}
}
@@ -648,7 +656,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
switch (c_alg) {
case SEC_CALG_3DES:
- ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
+ ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
break;
case SEC_CALG_AES:
case SEC_CALG_SM4:
@@ -659,7 +667,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
+ dev_err(dev, "set sec key err!\n");
return ret;
}
@@ -691,7 +699,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -701,21 +709,14 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
copy_size = c_req->c_len;
pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
- qp_ctx->res[req_id].pbuf,
- copy_size);
-
+ qp_ctx->res[req_id].pbuf,
+ copy_size);
if (unlikely(pbuf_length != copy_size)) {
dev_err(dev, "copy src data to pbuf error!\n");
return -EINVAL;
}
c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
-
- if (!c_req->c_in_dma) {
- dev_err(dev, "fail to set pbuffer address!\n");
- return -ENOMEM;
- }
-
c_req->c_out_dma = c_req->c_in_dma;
return 0;
@@ -727,7 +728,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct aead_request *aead_req = req->aead_req.aead_req;
struct sec_cipher_req *c_req = &req->c_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int copy_size, pbuf_length;
int req_id = req->req_id;
@@ -739,7 +740,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
qp_ctx->res[req_id].pbuf,
copy_size);
-
if (unlikely(pbuf_length != copy_size))
dev_err(dev, "copy pbuf data to dst error!\n");
}
@@ -751,7 +751,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
struct sec_aead_req *a_req = &req->aead_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
struct sec_alg_res *res = &qp_ctx->res[req->req_id];
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
int ret;
if (req->use_pbuf) {
@@ -806,7 +806,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
struct scatterlist *src, struct scatterlist *dst)
{
struct sec_cipher_req *c_req = &req->c_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
if (req->use_pbuf) {
sec_cipher_pbuf_unmap(ctx, req, dst);
@@ -891,6 +891,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
{
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
int ret;
@@ -904,13 +905,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
ret = sec_aead_aes_set_key(c_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n");
+ dev_err(dev, "set sec cipher key err!\n");
goto bad_key;
}
ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
if (ret) {
- dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n");
+ dev_err(dev, "set sec auth key err!\n");
goto bad_key;
}
@@ -1062,7 +1063,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
cryptlen - iv_size);
if (unlikely(sz != iv_size))
- dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
+ dev_err(req->ctx->dev, "copy output iv error!\n");
}
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
@@ -1160,7 +1161,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
ret = sec_skcipher_bd_fill(ctx, req);
if (unlikely(ret)) {
- dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n");
+ dev_err(ctx->dev, "skcipher bd fill is error!\n");
return ret;
}
@@ -1194,7 +1195,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
a_req->assoclen);
if (unlikely(sz != authsize)) {
- dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n");
+ dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
}
}
@@ -1259,7 +1260,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
ret = ctx->req_op->bd_send(ctx, req);
if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
- dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
+ dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
goto err_send_req;
}
@@ -1325,7 +1326,7 @@ static int sec_aead_init(struct crypto_aead *tfm)
ctx->alg_type = SEC_AEAD;
ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
- dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n");
+ dev_err(ctx->dev, "get error aead iv size!\n");
return -EINVAL;
}
@@ -1374,7 +1375,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(auth_ctx->hash_tfm)) {
- dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n");
+ dev_err(ctx->dev, "aead alloc shash error!\n");
sec_aead_exit(tfm);
return PTR_ERR(auth_ctx->hash_tfm);
}
@@ -1405,10 +1406,40 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
+
+static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
+ struct sec_req *sreq)
+{
+ u32 cryptlen = sreq->c_req.sk_req->cryptlen;
+ struct device *dev = ctx->dev;
+ u8 c_mode = ctx->c_ctx.c_mode;
+ int ret = 0;
+
+ switch (c_mode) {
+ case SEC_CMODE_XTS:
+ if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
+ dev_err(dev, "skcipher XTS mode input length error!\n");
+ ret = -EINVAL;
+ }
+ break;
+ case SEC_CMODE_ECB:
+ case SEC_CMODE_CBC:
+ if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher AES input length error!\n");
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
struct skcipher_request *sk_req = sreq->c_req.sk_req;
- struct device *dev = SEC_CTX_DEV(ctx);
+ struct device *dev = ctx->dev;
u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!sk_req->src || !sk_req->dst)) {
@@ -1429,12 +1460,9 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
}
return 0;
} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
- if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
- dev_err(dev, "skcipher aes input length error!\n");
- return -EINVAL;
- }
- return 0;
+ return sec_skcipher_cryptlen_ckeck(ctx, sreq);
}
+
dev_err(dev, "skcipher algorithm error!\n");
return -EINVAL;
@@ -1531,14 +1559,15 @@ static struct skcipher_alg sec_skciphers[] = {
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
- u8 c_alg = ctx->c_ctx.c_alg;
struct aead_request *req = sreq->aead_req.aead_req;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
size_t authsize = crypto_aead_authsize(tfm);
+ struct device *dev = ctx->dev;
+ u8 c_alg = ctx->c_ctx.c_alg;
if (unlikely(!req->src || !req->dst || !req->cryptlen ||
req->assoclen > SEC_MAX_AAD_LEN)) {
- dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n");
+ dev_err(dev, "aead input param error!\n");
return -EINVAL;
}
@@ -1550,7 +1579,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
/* Support AES only */
if (unlikely(c_alg != SEC_CALG_AES)) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");
+ dev_err(dev, "aead crypto alg error!\n");
return -EINVAL;
}
if (sreq->c_req.encrypt)
@@ -1559,7 +1588,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
sreq->c_req.c_len = req->cryptlen - authsize;
if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
- dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n");
+ dev_err(dev, "aead crypto length error!\n");
return -EINVAL;
}
@@ -1634,7 +1663,7 @@ static struct aead_alg sec_aeads[] = {
AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
};
-int sec_register_to_crypto(void)
+int sec_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -1651,7 +1680,7 @@ int sec_register_to_crypto(void)
return ret;
}
-void sec_unregister_from_crypto(void)
+void sec_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_skciphers(sec_skciphers,
ARRAY_SIZE(sec_skciphers));
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index b2786e17d8fe..9c78edac56a4 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -64,7 +64,6 @@ enum sec_addr_type {
};
struct sec_sqe_type2 {
-
/*
* mac_len: 0~4 bits
* a_key_len: 5~10 bits
@@ -120,7 +119,6 @@ struct sec_sqe_type2 {
/* c_pad_len_field: 0~1 bits */
__le16 c_pad_len_field;
-
__le64 long_a_data_len;
__le64 a_ivin_addr;
__le64 a_key_addr;
@@ -211,6 +209,6 @@ struct sec_sqe {
struct sec_sqe_type2 type2;
};
-int sec_register_to_crypto(void);
-void sec_unregister_from_crypto(void);
+int sec_register_to_crypto(struct hisi_qm *qm);
+void sec_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index dc68ba76f65e..6f0062d4408c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -19,7 +19,6 @@
#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
-#define SEC_QUEUE_NUM_V2 1024
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
@@ -35,18 +34,16 @@
#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
-#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
-#define SEC_ENGINE_PF_CFG_OFF 0x300000
-#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
-#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF)
-#define SEC_ECC_ADDR(err) ((err) >> 0)
+#define SEC_ECC_NUM 16
+#define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0
-#define SEC_CORE_INT_ENABLE 0x1ff
-#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_CORE_INT_ENABLE 0x7c1ff
+#define SEC_CORE_INT_CLEAR 0x7c1ff
#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
@@ -54,24 +51,24 @@
#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
-#define SEC_RAS_NFE_ENB_MSK 0x177
-#define SEC_RAS_DISABLE 0x0
-#define SEC_MEM_START_INIT_REG 0x0100
-#define SEC_MEM_INIT_DONE_REG 0x0104
+#define SEC_RAS_NFE_ENB_MSK 0x7c177
+#define SEC_RAS_DISABLE 0x0
+#define SEC_MEM_START_INIT_REG 0x301100
+#define SEC_MEM_INIT_DONE_REG 0x301104
-#define SEC_CONTROL_REG 0x0200
+#define SEC_CONTROL_REG 0x301200
#define SEC_TRNG_EN_SHIFT 8
#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
-#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
-#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
-#define SEC_SAA_EN_REG 0x0270
-#define SEC_BD_ERR_CHK_EN_REG0 0x0380
-#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG3 0x038c
+#define SEC_INTERFACE_USER_CTRL0_REG 0x301220
+#define SEC_INTERFACE_USER_CTRL1_REG 0x301224
+#define SEC_SAA_EN_REG 0x301270
+#define SEC_BD_ERR_CHK_EN_REG0 0x301380
+#define SEC_BD_ERR_CHK_EN_REG1 0x301384
+#define SEC_BD_ERR_CHK_EN_REG3 0x30138c
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
@@ -95,9 +92,6 @@
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
-#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
- SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -117,20 +111,66 @@ static struct hisi_qm_list sec_devices = {
};
static const struct sec_hw_error sec_hw_errors[] = {
- {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
- {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
- {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
- {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
- {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
- {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
- {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
- {.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
- {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
- { /* sentinel */ }
+ {
+ .int_msk = BIT(0),
+ .msg = "sec_axi_rresp_err_rint"
+ },
+ {
+ .int_msk = BIT(1),
+ .msg = "sec_axi_bresp_err_rint"
+ },
+ {
+ .int_msk = BIT(2),
+ .msg = "sec_ecc_2bit_err_rint"
+ },
+ {
+ .int_msk = BIT(3),
+ .msg = "sec_ecc_1bit_err_rint"
+ },
+ {
+ .int_msk = BIT(4),
+ .msg = "sec_req_trng_timeout_rint"
+ },
+ {
+ .int_msk = BIT(5),
+ .msg = "sec_fsm_hbeat_rint"
+ },
+ {
+ .int_msk = BIT(6),
+ .msg = "sec_channel_req_rng_timeout_rint"
+ },
+ {
+ .int_msk = BIT(7),
+ .msg = "sec_bd_err_rint"
+ },
+ {
+ .int_msk = BIT(8),
+ .msg = "sec_chain_buff_err_rint"
+ },
+ {
+ .int_msk = BIT(14),
+ .msg = "sec_no_secure_access"
+ },
+ {
+ .int_msk = BIT(15),
+ .msg = "sec_wrapping_key_auth_err"
+ },
+ {
+ .int_msk = BIT(16),
+ .msg = "sec_km_key_crc_fail"
+ },
+ {
+ .int_msk = BIT(17),
+ .msg = "sec_axi_poison_err"
+ },
+ {
+ .int_msk = BIT(18),
+ .msg = "sec_sva_err"
+ },
+ {}
};
static const char * const sec_dbg_file_name[] = {
- [SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
};
@@ -277,9 +317,7 @@ static u8 sec_get_endian(struct hisi_qm *qm)
"cannot access a register in VF!\n");
return SEC_LE;
}
- reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
-
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
/* BD little endian mode */
if (!(reg & BIT(0)))
return SEC_LE;
@@ -299,13 +337,13 @@ static int sec_engine_init(struct hisi_qm *qm)
u32 reg;
/* disable clock gate control */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg &= SEC_CLK_GATE_DISABLE;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+ writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
- ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
reg, reg & 0x1, SEC_DELAY_10_US,
SEC_POLL_TIMEOUT_US);
if (ret) {
@@ -313,40 +351,40 @@ static int sec_engine_init(struct hisi_qm *qm)
return ret;
}
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
- reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
reg |= SEC_USER0_SMMU_NORMAL;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
+ writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG);
- reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
reg &= SEC_USER1_SMMU_MASK;
if (qm->use_sva && qm->ver == QM_HW_V2)
reg |= SEC_USER1_SMMU_SVA;
else
reg |= SEC_USER1_SMMU_NORMAL;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG);
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
+ writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
/* Enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
/* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
writel_relaxed(SEC_BD_ERR_CHK_EN3,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
+ qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
/* config endian */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg |= sec_get_endian(qm);
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
return 0;
}
@@ -381,10 +419,6 @@ static void sec_debug_regs_clear(struct hisi_qm *qm)
{
int i;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* clear sec dfx regs */
writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
@@ -406,7 +440,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
return;
}
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
/* clear SEC hw error source if having */
writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
@@ -422,14 +456,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
/* enable SEC block master OOO when m-bit error occur */
val = val | SEC_AXI_SHUTDOWN_ENABLE;
- writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val, qm->io_base + SEC_CONTROL_REG);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
{
u32 val;
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
/* disable RAS int */
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
@@ -442,51 +476,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
/* disable SEC block master OOO when m-bit error occur */
val = val & SEC_AXI_SHUTDOWN_DISABLE;
- writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
-}
-
-static u32 sec_current_qm_read(struct sec_debug_file *file)
-{
- struct hisi_qm *qm = file->qm;
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
-{
- struct hisi_qm *qm = file->qm;
- u32 vfq_num;
- u32 tmp;
-
- if (val > qm->vfs_num)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (!val) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
-
- if (val == qm->vfs_num)
- qm->debug.curr_qm_qp_num =
- qm->ctrl_qp_num - qm->qp_num -
- (qm->vfs_num - 1) * vfq_num;
- else
- qm->debug.curr_qm_qp_num = vfq_num;
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
+ writel(val, qm->io_base + SEC_CONTROL_REG);
}
static u32 sec_clear_enable_read(struct sec_debug_file *file)
@@ -523,9 +513,6 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case SEC_CURRENT_QM:
- val = sec_current_qm_read(file);
- break;
case SEC_CLEAR_ENABLE:
val = sec_clear_enable_read(file);
break;
@@ -566,11 +553,6 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case SEC_CURRENT_QM:
- ret = sec_current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case SEC_CLEAR_ENABLE:
ret = sec_clear_enable_write(file, val);
if (ret)
@@ -655,7 +637,7 @@ static int sec_debug_init(struct hisi_qm *qm)
int i;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
spin_lock_init(&sec->debug.files[i].lock);
sec->debug.files[i].index = i;
sec->debug.files[i].qm = qm;
@@ -712,7 +694,8 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
err_val = readl(qm->io_base +
SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
+ ((err_val) >> SEC_ECC_NUM) &
+ SEC_ECC_MASH);
}
}
errs++;
@@ -733,9 +716,23 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
- val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ val = readl(qm->io_base + SEC_CONTROL_REG);
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
+}
+
+static void sec_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
+ err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+ err_info->msi_wr_port = BIT(0);
+ err_info->acpi_rst = "SRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT;
}
static const struct hisi_qm_err_ini sec_err_ini = {
@@ -746,16 +743,7 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.clear_dev_hw_err_status = sec_clear_hw_err_status,
.log_dev_hw_err = sec_log_hw_error,
.open_axi_master_ooo = sec_open_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC,
- .dev_ce_mask = SEC_RAS_CE_ENB_MSK,
- .msi_wr_port = BIT(0),
- .acpi_rst = "SRST",
- }
+ .err_info_init = sec_err_info_init,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -763,12 +751,8 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = SEC_QUEUE_NUM_V1;
- else
- qm->ctrl_qp_num = SEC_QUEUE_NUM_V2;
-
qm->err_ini = &sec_err_ini;
+ qm->err_ini->err_info_init(qm);
ret = sec_set_user_domain_and_cache(qm);
if (ret)
@@ -786,7 +770,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "cipher\ndigest\naead\n";
+ qm->algs = "cipher\ndigest\naead";
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -909,10 +893,15 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- ret = hisi_qm_alg_register(qm, &sec_devices);
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_qm_stop;
+ if (qm->qp_num >= ctx_q_num) {
+ ret = hisi_qm_alg_register(qm, &sec_devices);
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_qm_stop;
+ }
+ } else {
+ pci_warn(qm->pdev,
+ "Failed to use kernel mode, qp not enough!\n");
}
if (qm->uacce) {
@@ -948,7 +937,9 @@ static void sec_remove(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
hisi_qm_wait_task_finish(qm, &sec_devices);
- hisi_qm_alg_unregister(qm, &sec_devices);
+ if (qm->qp_num >= ctx_q_num)
+ hisi_qm_alg_unregister(qm, &sec_devices);
+
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, true);
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 3bff6394acaf..057273769f26 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -56,7 +56,7 @@ struct hisi_acc_sgl_pool {
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr)
{
- u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0;
+ u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl;
struct hisi_acc_sgl_pool *pool;
struct mem_block *block;
u32 i, j;
@@ -66,6 +66,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl);
+
+ /*
+ * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
+ * block size may exceed 2^31 on ia64, so the max of block size is 2^31
+ */
block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
PAGE_SHIFT + MAX_ORDER - 1 : 31);
sgl_num_per_block = block_size / sgl_size;
@@ -85,8 +90,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
block[i].sgl = dma_alloc_coherent(dev, block_size,
&block[i].sgl_dma,
GFP_KERNEL);
- if (!block[i].sgl)
+ if (!block[i].sgl) {
+ dev_err(dev, "Fail to allocate hw SG buffer!\n");
goto err_free_mem;
+ }
block[i].size = block_size;
}
@@ -95,8 +102,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
&block[i].sgl_dma,
GFP_KERNEL);
- if (!block[i].sgl)
+ if (!block[i].sgl) {
+ dev_err(dev, "Fail to allocate remained hw SG buffer!\n");
goto err_free_mem;
+ }
block[i].size = remain_sgl * sgl_size;
}
@@ -167,6 +176,7 @@ static void sg_map_to_hw_sg(struct scatterlist *sgl,
{
hw_sge->buf = sg_dma_address(sgl);
hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
+ hw_sge->page_ctrl = sg_virt(sgl);
}
static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
@@ -182,6 +192,18 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
}
+static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
+{
+ struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
+ int i;
+
+ for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
+ hw_sge[i].page_ctrl = NULL;
+ hw_sge[i].buf = 0;
+ hw_sge[i].len = 0;
+ }
+}
+
/**
* hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
* @dev: The device which hw sgl belongs to.
@@ -211,16 +233,19 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
sg_n = sg_nents(sgl);
sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
- if (!sg_n_mapped)
+ if (!sg_n_mapped) {
+ dev_err(dev, "DMA mapping for SG error!\n");
return ERR_PTR(-EINVAL);
+ }
if (sg_n_mapped > pool->sge_nr) {
- dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
+ dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
return ERR_PTR(-EINVAL);
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
+ dev_err(dev, "Get SGL error!\n");
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-ENOMEM);
@@ -256,7 +281,7 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
return;
dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
-
+ clear_hw_sgl_sge(hw_sgl);
hw_sgl->entry_sum_in_chain = 0;
hw_sgl->entry_sum_in_sgl = 0;
hw_sgl->entry_length_in_sgl = 0;
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 29712685498a..829f2caf0f67 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -18,6 +18,8 @@
#define HISI_TRNG_REG 0x00F0
#define HISI_TRNG_BYTES 4
#define HISI_TRNG_QUALITY 512
+#define HISI_TRNG_VERSION 0x01B8
+#define HISI_TRNG_VER_V1 GENMASK(31, 0)
#define SLEEP_US 10
#define TIMEOUT_US 10000
#define SW_DRBG_NUM_SHIFT 2
@@ -50,6 +52,7 @@ struct hisi_trng {
struct hisi_trng_list *trng_list;
struct list_head list;
struct hwrng rng;
+ u32 ver;
bool is_used;
struct mutex mutex;
};
@@ -260,6 +263,7 @@ static int hisi_trng_probe(struct platform_device *pdev)
return PTR_ERR(trng->base);
trng->is_used = false;
+ trng->ver = readl(trng->base + HISI_TRNG_VERSION);
if (!trng_devices.is_init) {
INIT_LIST_HEAD(&trng_devices.list);
mutex_init(&trng_devices.lock);
@@ -267,7 +271,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
}
hisi_trng_add_to_list(trng);
- if (atomic_inc_return(&trng_active_devs) == 1) {
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_inc_return(&trng_active_devs) == 1) {
ret = crypto_register_rng(&hisi_trng_alg);
if (ret) {
dev_err(&pdev->dev,
@@ -289,7 +294,8 @@ static int hisi_trng_probe(struct platform_device *pdev)
return ret;
err_crypto_unregister:
- if (atomic_dec_return(&trng_active_devs) == 0)
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_dec_return(&trng_active_devs) == 0)
crypto_unregister_rng(&hisi_trng_alg);
err_remove_from_list:
@@ -305,7 +311,8 @@ static int hisi_trng_remove(struct platform_device *pdev)
while (hisi_trng_del_from_list(trng))
;
- if (atomic_dec_return(&trng_active_devs) == 0)
+ if (trng->ver != HISI_TRNG_VER_V1 &&
+ atomic_dec_return(&trng_active_devs) == 0)
crypto_unregister_rng(&hisi_trng_alg);
return 0;
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 92397f993e23..517fdbdff3ea 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -33,35 +33,55 @@ struct hisi_zip_sqe {
u32 consumed;
u32 produced;
u32 comp_data_length;
+ /*
+ * status: 0~7 bits
+ * rsvd: 8~31 bits
+ */
u32 dw3;
u32 input_data_length;
- u32 lba_l;
- u32 lba_h;
+ u32 dw5;
+ u32 dw6;
+ /*
+ * in_sge_data_offset: 0~23 bits
+ * rsvd: 24~27 bits
+ * sqe_type: 29~31 bits
+ */
u32 dw7;
+ /*
+ * out_sge_data_offset: 0~23 bits
+ * rsvd: 24~31 bits
+ */
u32 dw8;
+ /*
+ * request_type: 0~7 bits
+ * buffer_type: 8~11 bits
+ * rsvd: 13~31 bits
+ */
u32 dw9;
u32 dw10;
- u32 priv_info;
+ u32 dw11;
u32 dw12;
- u32 tag;
+ /* tag: in sqe type 0 */
+ u32 dw13;
u32 dest_avail_out;
- u32 rsvd0;
- u32 comp_head_addr_l;
- u32 comp_head_addr_h;
+ u32 dw15;
+ u32 dw16;
+ u32 dw17;
u32 source_addr_l;
u32 source_addr_h;
u32 dest_addr_l;
u32 dest_addr_h;
- u32 stream_ctx_addr_l;
- u32 stream_ctx_addr_h;
- u32 cipher_key1_addr_l;
- u32 cipher_key1_addr_h;
- u32 cipher_key2_addr_l;
- u32 cipher_key2_addr_h;
+ u32 dw22;
+ u32 dw23;
+ u32 dw24;
+ u32 dw25;
+ /* tag: in sqe type 3 */
+ u32 dw26;
+ u32 dw27;
u32 rsvd1[4];
};
int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
-int hisi_zip_register_to_crypto(void);
-void hisi_zip_unregister_from_crypto(void);
+int hisi_zip_register_to_crypto(struct hisi_qm *qm);
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 08b4660b014c..9520a4113c81 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -10,6 +10,7 @@
#define HZIP_BD_STATUS_M GENMASK(7, 0)
/* hisi_zip_sqe dw7 */
#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
+#define HZIP_SQE_TYPE_M GENMASK(31, 28)
/* hisi_zip_sqe dw8 */
#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
@@ -91,8 +92,22 @@ struct hisi_zip_qp_ctx {
struct hisi_zip_ctx *ctx;
};
+struct hisi_zip_sqe_ops {
+ u8 sqe_type;
+ void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
+ void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
+ void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
+ void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
+ u32 (*get_tag)(struct hisi_zip_sqe *sqe);
+ u32 (*get_status)(struct hisi_zip_sqe *sqe);
+ u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
+};
+
struct hisi_zip_ctx {
struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
+ const struct hisi_zip_sqe_ops *ops;
};
static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
@@ -119,35 +134,367 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
+static u16 get_extra_field_size(const u8 *start)
+{
+ return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
+}
+
+static u32 get_name_field_size(const u8 *start)
+{
+ return strlen(start) + 1;
+}
+
+static u32 get_comment_field_size(const u8 *start)
+{
+ return strlen(start) + 1;
+}
+
+static u32 __get_gzip_head_size(const u8 *src)
+{
+ u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
+ u32 size = GZIP_HEAD_FEXTRA_SHIFT;
+
+ if (head_flg & GZIP_HEAD_FEXTRA_BIT)
+ size += get_extra_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FNAME_BIT)
+ size += get_name_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
+ size += get_comment_field_size(src + size);
+ if (head_flg & GZIP_HEAD_FHCRC_BIT)
+ size += GZIP_HEAD_FHCRC_SIZE;
+
+ return size;
+}
+
+static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+{
+ char buf[HZIP_GZIP_HEAD_BUF];
+
+ sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
+
+ return __get_gzip_head_size(buf);
+}
+
+static int add_comp_head(struct scatterlist *dst, u8 req_type)
+{
+ int head_size = TO_HEAD_SIZE(req_type);
+ const u8 *head = TO_HEAD(req_type);
+ int ret;
+
+ ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
+ if (ret != head_size) {
+ pr_err("the head size of buffer is wrong (%d)!\n", ret);
+ return -ENOMEM;
+ }
+
+ return head_size;
+}
+
+static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
+{
+ if (!acomp_req->src || !acomp_req->slen)
+ return -EINVAL;
+
+ if (req_type == HZIP_ALG_TYPE_GZIP &&
+ acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+ return -EINVAL;
+
+ switch (req_type) {
+ case HZIP_ALG_TYPE_ZLIB:
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
+ case HZIP_ALG_TYPE_GZIP:
+ return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
+ default:
+ pr_err("request type does not support!\n");
+ return -EINVAL;
+ }
+}
+
+static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
+ struct hisi_zip_qp_ctx *qp_ctx,
+ size_t head_size, bool is_comp)
+{
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+ struct hisi_zip_req *q = req_q->q;
+ struct hisi_zip_req *req_cache;
+ int req_id;
+
+ write_lock(&req_q->req_lock);
+
+ req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
+ if (req_id >= req_q->size) {
+ write_unlock(&req_q->req_lock);
+ dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
+ return ERR_PTR(-EAGAIN);
+ }
+ set_bit(req_id, req_q->req_bitmap);
+
+ req_cache = q + req_id;
+ req_cache->req_id = req_id;
+ req_cache->req = req;
+
+ if (is_comp) {
+ req_cache->sskip = 0;
+ req_cache->dskip = head_size;
+ } else {
+ req_cache->sskip = head_size;
+ req_cache->dskip = 0;
+ }
+
+ write_unlock(&req_q->req_lock);
+
+ return req_cache;
+}
+
+static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
+ struct hisi_zip_req *req)
+{
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+
+ write_lock(&req_q->req_lock);
+ clear_bit(req->req_id, req_q->req_bitmap);
+ memset(req, 0, sizeof(struct hisi_zip_req));
+ write_unlock(&req_q->req_lock);
+}
+
+static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ sqe->source_addr_l = lower_32_bits(req->dma_src);
+ sqe->source_addr_h = upper_32_bits(req->dma_src);
+ sqe->dest_addr_l = lower_32_bits(req->dma_dst);
+ sqe->dest_addr_h = upper_32_bits(req->dma_dst);
+}
+
+static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ struct acomp_req *a_req = req->req;
+
+ sqe->input_data_length = a_req->slen - req->sskip;
+ sqe->dest_avail_out = a_req->dlen - req->dskip;
+ sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip);
+ sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip);
+}
+
+static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
{
u32 val;
- val = (sqe->dw9) & ~HZIP_BUF_TYPE_M;
+ val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
sqe->dw9 = val;
}
-static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag)
+static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
{
- sqe->tag = tag;
+ u32 val;
+
+ val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
+ val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
+ sqe->dw9 = val;
}
-static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
- dma_addr_t s_addr, dma_addr_t d_addr, u32 slen,
- u32 dlen, u32 sskip, u32 dskip)
+static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
{
+ sqe->dw13 = req->req_id;
+}
+
+static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+{
+ sqe->dw26 = req->req_id;
+}
+
+static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
+{
+ u32 val;
+
+ val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
+ val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
+ sqe->dw7 = val;
+}
+
+static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
+ u8 req_type, struct hisi_zip_req *req)
+{
+ const struct hisi_zip_sqe_ops *ops = ctx->ops;
+
memset(sqe, 0, sizeof(struct hisi_zip_sqe));
- sqe->input_data_length = slen - sskip;
- sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip);
- sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip);
- sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
- sqe->dest_avail_out = dlen - dskip;
- sqe->source_addr_l = lower_32_bits(s_addr);
- sqe->source_addr_h = upper_32_bits(s_addr);
- sqe->dest_addr_l = lower_32_bits(d_addr);
- sqe->dest_addr_h = upper_32_bits(d_addr);
+ ops->fill_addr(sqe, req);
+ ops->fill_buf_size(sqe, req);
+ ops->fill_buf_type(sqe, HZIP_SGL);
+ ops->fill_req_type(sqe, req_type);
+ ops->fill_tag(sqe, req);
+ ops->fill_sqe_type(sqe, ops->sqe_type);
+}
+
+static int hisi_zip_do_work(struct hisi_zip_req *req,
+ struct hisi_zip_qp_ctx *qp_ctx)
+{
+ struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct acomp_req *a_req = req->req;
+ struct hisi_qp *qp = qp_ctx->qp;
+ struct device *dev = &qp->qm->pdev->dev;
+ struct hisi_zip_sqe zip_sqe;
+ int ret;
+
+ if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+ return -EINVAL;
+
+ req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
+ req->req_id << 1, &req->dma_src);
+ if (IS_ERR(req->hw_src)) {
+ dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
+ PTR_ERR(req->hw_src));
+ return PTR_ERR(req->hw_src);
+ }
+
+ req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
+ (req->req_id << 1) + 1,
+ &req->dma_dst);
+ if (IS_ERR(req->hw_dst)) {
+ ret = PTR_ERR(req->hw_dst);
+ dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
+ ret);
+ goto err_unmap_input;
+ }
+
+ hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
+
+ /* send command to start a task */
+ atomic64_inc(&dfx->send_cnt);
+ ret = hisi_qp_send(qp, &zip_sqe);
+ if (ret < 0) {
+ atomic64_inc(&dfx->send_busy_cnt);
+ ret = -EAGAIN;
+ dev_dbg_ratelimited(dev, "failed to send request!\n");
+ goto err_unmap_output;
+ }
+
+ return -EINPROGRESS;
+
+err_unmap_output:
+ hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
+err_unmap_input:
+ hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
+ return ret;
+}
+
+static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw13;
+}
+
+static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw26;
+}
+
+static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
+{
+ return sqe->dw3 & HZIP_BD_STATUS_M;
+}
+
+static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
+{
+ return sqe->produced;
+}
+
+static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
+{
+ struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
+ const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
+ struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
+ struct device *dev = &qp->qm->pdev->dev;
+ struct hisi_zip_sqe *sqe = data;
+ u32 tag = ops->get_tag(sqe);
+ struct hisi_zip_req *req = req_q->q + tag;
+ struct acomp_req *acomp_req = req->req;
+ u32 status, dlen, head_size;
+ int err = 0;
+
+ atomic64_inc(&dfx->recv_cnt);
+ status = ops->get_status(sqe);
+ if (status != 0 && status != HZIP_NC_ERR) {
+ dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
+ (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
+ sqe->produced);
+ atomic64_inc(&dfx->err_bd_cnt);
+ err = -EIO;
+ }
+
+ dlen = ops->get_dstlen(sqe);
+
+ hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
+ hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
+
+ head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
+ acomp_req->dlen = dlen + head_size;
+
+ if (acomp_req->base.complete)
+ acomp_request_complete(acomp_req, err);
+
+ hisi_zip_remove_req(qp_ctx, req);
+}
+
+static int hisi_zip_acompress(struct acomp_req *acomp_req)
+{
+ struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+ struct hisi_zip_req *req;
+ int head_size;
+ int ret;
+
+ /* let's output compression head now */
+ head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
+ head_size);
+ return head_size;
+ }
+
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = hisi_zip_do_work(req, qp_ctx);
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
+ hisi_zip_remove_req(qp_ctx, req);
+ }
+
+ return ret;
+}
+
+static int hisi_zip_adecompress(struct acomp_req *acomp_req)
+{
+ struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
+ struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
+ struct device *dev = &qp_ctx->qp->qm->pdev->dev;
+ struct hisi_zip_req *req;
+ int head_size, ret;
+
+ head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
+ if (head_size < 0) {
+ dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
+ head_size);
+ return head_size;
+ }
+
+ req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = hisi_zip_do_work(req, qp_ctx);
+ if (ret != -EINPROGRESS) {
+ dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
+ ret);
+ hisi_zip_remove_req(qp_ctx, req);
+ }
+
+ return ret;
}
static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
@@ -177,9 +524,36 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
hisi_qm_release_qp(ctx->qp);
}
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
+ .sqe_type = 0,
+ .fill_addr = hisi_zip_fill_addr,
+ .fill_buf_size = hisi_zip_fill_buf_size,
+ .fill_buf_type = hisi_zip_fill_buf_type,
+ .fill_req_type = hisi_zip_fill_req_type,
+ .fill_tag = hisi_zip_fill_tag_v1,
+ .fill_sqe_type = hisi_zip_fill_sqe_type,
+ .get_tag = hisi_zip_get_tag_v1,
+ .get_status = hisi_zip_get_status,
+ .get_dstlen = hisi_zip_get_dstlen,
+};
+
+static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = {
+ .sqe_type = 0x3,
+ .fill_addr = hisi_zip_fill_addr,
+ .fill_buf_size = hisi_zip_fill_buf_size,
+ .fill_buf_type = hisi_zip_fill_buf_type,
+ .fill_req_type = hisi_zip_fill_req_type,
+ .fill_tag = hisi_zip_fill_tag_v2,
+ .fill_sqe_type = hisi_zip_fill_sqe_type,
+ .get_tag = hisi_zip_get_tag_v2,
+ .get_status = hisi_zip_get_status,
+ .get_dstlen = hisi_zip_get_dstlen,
+};
+
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
+ struct hisi_zip_qp_ctx *qp_ctx;
struct hisi_zip *hisi_zip;
int ret, i, j;
@@ -193,8 +567,9 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
- req_type);
+ qp_ctx = &hisi_zip_ctx->qp_ctx[i];
+ qp_ctx->ctx = hisi_zip_ctx;
+ ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
if (ret) {
for (j = i - 1; j >= 0; j--)
hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
@@ -203,9 +578,14 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
return ret;
}
- hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip;
+ qp_ctx->zip_dev = hisi_zip;
}
+ if (hisi_zip->qm.ver < QM_HW_V3)
+ hisi_zip_ctx->ops = &hisi_zip_ops_v1;
+ else
+ hisi_zip_ctx->ops = &hisi_zip_ops_v2;
+
return 0;
}
@@ -217,38 +597,6 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
-static u16 get_extra_field_size(const u8 *start)
-{
- return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
-}
-
-static u32 get_name_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 get_comment_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 __get_gzip_head_size(const u8 *src)
-{
- u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
- u32 size = GZIP_HEAD_FEXTRA_SHIFT;
-
- if (head_flg & GZIP_HEAD_FEXTRA_BIT)
- size += get_extra_field_size(src + size);
- if (head_flg & GZIP_HEAD_FNAME_BIT)
- size += get_name_field_size(src + size);
- if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
- size += get_comment_field_size(src + size);
- if (head_flg & GZIP_HEAD_FHCRC_BIT)
- size += GZIP_HEAD_FHCRC_SIZE;
-
- return size;
-}
-
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
{
struct hisi_zip_req_q *req_q;
@@ -336,52 +684,6 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
ctx->qp_ctx[i].sgl_pool);
}
-static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
- struct hisi_zip_req *req)
-{
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
-
- write_lock(&req_q->req_lock);
- clear_bit(req->req_id, req_q->req_bitmap);
- memset(req, 0, sizeof(struct hisi_zip_req));
- write_unlock(&req_q->req_lock);
-}
-
-static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
-{
- struct hisi_zip_sqe *sqe = data;
- struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
- struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- struct hisi_zip_req *req = req_q->q + sqe->tag;
- struct acomp_req *acomp_req = req->req;
- struct device *dev = &qp->qm->pdev->dev;
- u32 status, dlen, head_size;
- int err = 0;
-
- atomic64_inc(&dfx->recv_cnt);
- status = sqe->dw3 & HZIP_BD_STATUS_M;
- if (status != 0 && status != HZIP_NC_ERR) {
- dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
- (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
- sqe->produced);
- atomic64_inc(&dfx->err_bd_cnt);
- err = -EIO;
- }
- dlen = sqe->produced;
-
- hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
- hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
-
- head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
- acomp_req->dlen = dlen + head_size;
-
- if (acomp_req->base.complete)
- acomp_request_complete(acomp_req, err);
-
- hisi_zip_remove_req(qp_ctx, req);
-}
-
static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
void (*fn)(struct hisi_qp *, void *))
{
@@ -439,204 +741,6 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
hisi_zip_ctx_exit(ctx);
}
-static int add_comp_head(struct scatterlist *dst, u8 req_type)
-{
- int head_size = TO_HEAD_SIZE(req_type);
- const u8 *head = TO_HEAD(req_type);
- int ret;
-
- ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size) {
- pr_err("the head size of buffer is wrong (%d)!\n", ret);
- return -ENOMEM;
- }
-
- return head_size;
-}
-
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
-{
- char buf[HZIP_GZIP_HEAD_BUF];
-
- sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
-
- return __get_gzip_head_size(buf);
-}
-
-static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
-{
- if (!acomp_req->src || !acomp_req->slen)
- return -EINVAL;
-
- if ((req_type == HZIP_ALG_TYPE_GZIP) &&
- (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
- return -EINVAL;
-
- switch (req_type) {
- case HZIP_ALG_TYPE_ZLIB:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
- case HZIP_ALG_TYPE_GZIP:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
- default:
- pr_err("request type does not support!\n");
- return -EINVAL;
- }
-}
-
-static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
- struct hisi_zip_qp_ctx *qp_ctx,
- size_t head_size, bool is_comp)
-{
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
- struct hisi_zip_req *q = req_q->q;
- struct hisi_zip_req *req_cache;
- int req_id;
-
- write_lock(&req_q->req_lock);
-
- req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
- if (req_id >= req_q->size) {
- write_unlock(&req_q->req_lock);
- dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
- return ERR_PTR(-EAGAIN);
- }
- set_bit(req_id, req_q->req_bitmap);
-
- req_cache = q + req_id;
- req_cache->req_id = req_id;
- req_cache->req = req;
-
- if (is_comp) {
- req_cache->sskip = 0;
- req_cache->dskip = head_size;
- } else {
- req_cache->sskip = head_size;
- req_cache->dskip = 0;
- }
-
- write_unlock(&req_q->req_lock);
-
- return req_cache;
-}
-
-static int hisi_zip_do_work(struct hisi_zip_req *req,
- struct hisi_zip_qp_ctx *qp_ctx)
-{
- struct acomp_req *a_req = req->req;
- struct hisi_qp *qp = qp_ctx->qp;
- struct device *dev = &qp->qm->pdev->dev;
- struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
- struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_sqe zip_sqe;
- dma_addr_t input, output;
- int ret;
-
- if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
- return -EINVAL;
-
- req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
- req->req_id << 1, &input);
- if (IS_ERR(req->hw_src)) {
- dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
- PTR_ERR(req->hw_src));
- return PTR_ERR(req->hw_src);
- }
- req->dma_src = input;
-
- req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
- (req->req_id << 1) + 1,
- &output);
- if (IS_ERR(req->hw_dst)) {
- ret = PTR_ERR(req->hw_dst);
- dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
- ret);
- goto err_unmap_input;
- }
- req->dma_dst = output;
-
- hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen,
- a_req->dlen, req->sskip, req->dskip);
- hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL);
- hisi_zip_config_tag(&zip_sqe, req->req_id);
-
- /* send command to start a task */
- atomic64_inc(&dfx->send_cnt);
- ret = hisi_qp_send(qp, &zip_sqe);
- if (ret < 0) {
- atomic64_inc(&dfx->send_busy_cnt);
- ret = -EAGAIN;
- dev_dbg_ratelimited(dev, "failed to send request!\n");
- goto err_unmap_output;
- }
-
- return -EINPROGRESS;
-
-err_unmap_output:
- hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
-err_unmap_input:
- hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
- return ret;
-}
-
-static int hisi_zip_acompress(struct acomp_req *acomp_req)
-{
- struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
- struct hisi_zip_req *req;
- int head_size;
- int ret;
-
- /* let's output compression head now */
- head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0) {
- dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
- head_size);
- return head_size;
- }
-
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
- dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
- hisi_zip_remove_req(qp_ctx, req);
- }
-
- return ret;
-}
-
-static int hisi_zip_adecompress(struct acomp_req *acomp_req)
-{
- struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
- struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
- struct device *dev = &qp_ctx->qp->qm->pdev->dev;
- struct hisi_zip_req *req;
- int head_size, ret;
-
- head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (head_size < 0) {
- dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
- head_size);
- return head_size;
- }
-
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
- dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
- ret);
- hisi_zip_remove_req(qp_ctx, req);
- }
-
- return ret;
-}
-
static struct acomp_alg hisi_zip_acomp_zlib = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
@@ -665,7 +769,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(void)
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
{
int ret;
@@ -684,7 +788,7 @@ int hisi_zip_register_to_crypto(void)
return ret;
}
-void hisi_zip_unregister_from_crypto(void)
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
{
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
crypto_unregister_acomp(&hisi_zip_acomp_zlib);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 02c445722445..2178b40e9f82 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -18,7 +18,6 @@
#define PCI_DEVICE_ID_ZIP_VF 0xa251
#define HZIP_QUEUE_NUM_V1 4096
-#define HZIP_QUEUE_NUM_V2 1024
#define HZIP_CLOCK_GATE_CTRL 0x301004
#define COMP0_ENABLE BIT(0)
@@ -69,10 +68,10 @@
#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
-#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
+#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
-#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
+#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
#define HZIP_COMP_CORE_NUM 2
#define HZIP_DECOMP_CORE_NUM 6
#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
@@ -134,17 +133,17 @@ static const struct hisi_zip_hw_error zip_hw_error[] = {
{ .int_msk = BIT(8), .msg = "zip_com_inf_err" },
{ .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
{ .int_msk = BIT(10), .msg = "zip_pre_out_err" },
+ { .int_msk = BIT(11), .msg = "zip_axi_poison_err" },
+ { .int_msk = BIT(12), .msg = "zip_sva_err" },
{ /* sentinel */ }
};
enum ctrl_debug_file_index {
- HZIP_CURRENT_QM,
HZIP_CLEAR_ENABLE,
HZIP_DEBUG_FILE_NUM,
};
static const char * const ctrl_debug_file_name[] = {
- [HZIP_CURRENT_QM] = "current_qm",
[HZIP_CLEAR_ENABLE] = "clear_enable",
};
@@ -363,48 +362,6 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
return &hisi_zip->qm;
}
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
- struct hisi_qm *qm = file_to_qm(file);
-
- return readl(qm->io_base + QM_DFX_MB_CNT_VF);
-}
-
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
-{
- struct hisi_qm *qm = file_to_qm(file);
- u32 vfq_num;
- u32 tmp;
-
- if (val > qm->vfs_num)
- return -EINVAL;
-
- /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
- qm->debug.curr_qm_qp_num = qm->qp_num;
- } else {
- vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
- if (val == qm->vfs_num)
- qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
- qm->qp_num - (qm->vfs_num - 1) * vfq_num;
- else
- qm->debug.curr_qm_qp_num = vfq_num;
- }
-
- writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
-
- tmp = val |
- (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
- writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
-
- return 0;
-}
-
static u32 clear_enable_read(struct ctrl_debug_file *file)
{
struct hisi_qm *qm = file_to_qm(file);
@@ -438,9 +395,6 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock);
switch (file->index) {
- case HZIP_CURRENT_QM:
- val = current_qm_read(file);
- break;
case HZIP_CLEAR_ENABLE:
val = clear_enable_read(file);
break;
@@ -478,11 +432,6 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
spin_lock_irq(&file->lock);
switch (file->index) {
- case HZIP_CURRENT_QM:
- ret = current_qm_write(file, val);
- if (ret)
- goto err_input;
- break;
case HZIP_CLEAR_ENABLE:
ret = clear_enable_write(file, val);
if (ret)
@@ -580,7 +529,7 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
int i;
- for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
+ for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) {
spin_lock_init(&zip->ctrl->files[i].lock);
zip->ctrl->files[i].ctrl = zip->ctrl;
zip->ctrl->files[i].index = i;
@@ -627,10 +576,6 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
int i, j;
- /* clear current_qm */
- writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
- writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
-
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
@@ -714,6 +659,22 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HZIP_CORE_INT_SET);
}
+static void hisi_zip_err_info_init(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ err_info->ce = QM_BASE_CE;
+ err_info->fe = 0;
+ err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+ err_info->msi_wr_port = HZIP_WR_PORT;
+ err_info->acpi_rst = "ZRST";
+ err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
+
+ if (qm->ver >= QM_HW_V3)
+ err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
+}
+
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.hw_init = hisi_zip_set_user_domain_and_cache,
.hw_err_enable = hisi_zip_hw_error_enable,
@@ -723,16 +684,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.log_dev_hw_err = hisi_zip_log_hw_error,
.open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
.close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
- .err_info = {
- .ce = QM_BASE_CE,
- .nfe = QM_BASE_NFE |
- QM_ACC_WB_NOT_READY_TIMEOUT,
- .fe = 0,
- .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
- .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE,
- .msi_wr_port = HZIP_WR_PORT,
- .acpi_rst = "ZRST",
- }
+ .err_info_init = hisi_zip_err_info_init,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -746,13 +698,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
-
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
- else
- qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
-
qm->err_ini = &hisi_zip_err_ini;
+ qm->err_ini->err_info_init(qm);
hisi_zip_set_user_domain_and_cache(qm);
hisi_qm_dev_err_init(qm);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index e813115d5432..aa4c7b2af3e2 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -963,8 +963,6 @@ static int img_hash_probe(struct platform_device *pdev)
hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdev->io_base)) {
err = PTR_ERR(hdev->io_base);
- dev_err(dev, "can't ioremap, returned %d\n", err);
-
goto res_err;
}
@@ -972,7 +970,6 @@ static int img_hash_probe(struct platform_device *pdev)
hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
if (IS_ERR(hdev->cpu_addr)) {
- dev_err(dev, "can't ioremap write port\n");
err = PTR_ERR(hdev->cpu_addr);
goto res_err;
}
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 6364583b88b2..9ff885d50edf 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -688,7 +688,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
/* Leave the DSE threads reset state */
writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
- /* Configure the procesing engine thresholds */
+ /* Configure the processing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8b0f17fc09fb..0616e369522e 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -265,7 +265,7 @@ static int setup_crypt_desc(void)
return 0;
}
-static spinlock_t desc_lock;
+static DEFINE_SPINLOCK(desc_lock);
static struct crypt_ctl *get_crypt_desc(void)
{
int i;
@@ -293,7 +293,7 @@ static struct crypt_ctl *get_crypt_desc(void)
}
}
-static spinlock_t emerg_lock;
+static DEFINE_SPINLOCK(emerg_lock);
static struct crypt_ctl *get_crypt_desc_emerg(void)
{
int i;
@@ -1379,9 +1379,6 @@ static int __init ixp_module_init(void)
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- spin_lock_init(&desc_lock);
- spin_lock_init(&emerg_lock);
-
err = init_ixp_crypto(&pdev->dev);
if (err) {
platform_device_unregister(pdev);
diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
index b6b25d994af3..e2a39fdaf623 100644
--- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
@@ -1623,10 +1623,8 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
}
aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem);
- if (IS_ERR(aes_dev->base_reg)) {
- dev_err(dev, "Failed to get base address\n");
+ if (IS_ERR(aes_dev->base_reg))
return PTR_ERR(aes_dev->base_reg);
- }
/* Get and request IRQ */
aes_dev->irq = platform_get_irq(pdev, 0);
@@ -1649,8 +1647,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
/* Initialize crypto engine */
aes_dev->engine = crypto_engine_alloc_init(dev, true);
- if (!aes_dev->engine)
+ if (!aes_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(aes_dev->engine);
if (rc) {
diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
index c4b97b4160e9..0379dbf32a4c 100644
--- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
@@ -1192,10 +1192,8 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
}
hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
- if (IS_ERR(hcu_dev->io_base)) {
- dev_err(dev, "Could not io-remap mem resource.\n");
+ if (IS_ERR(hcu_dev->io_base))
return PTR_ERR(hcu_dev->io_base);
- }
init_completion(&hcu_dev->irq_done);
@@ -1220,8 +1218,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
/* Initialize crypto engine */
hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
- if (!hcu_dev->engine)
+ if (!hcu_dev->engine) {
+ rc = -ENOMEM;
goto list_del;
+ }
rc = crypto_engine_start(hcu_dev->engine);
if (rc) {
diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c
index 81eecacf603a..deb9bd460ee6 100644
--- a/drivers/crypto/keembay/ocs-hcu.c
+++ b/drivers/crypto/keembay/ocs-hcu.c
@@ -93,7 +93,7 @@
#define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
/**
- * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list.
+ * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
* @src_addr: Source address of the data.
* @src_len: Length of data to be fetched.
* @nxt_desc: Next descriptor to fetch.
@@ -107,7 +107,7 @@ struct ocs_hcu_dma_entry {
};
/**
- * struct ocs_dma_list - OCS-specific DMA linked list.
+ * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
* @head: The head of the list (points to the array backing the list).
* @tail: The current tail of the list; NULL if the list is empty.
* @dma_addr: The DMA address of @head (i.e., the DMA address of the backing
@@ -597,7 +597,7 @@ int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
}
/**
- * ocs_hcu_digest() - Perform a hashing iteration.
+ * ocs_hcu_hash_update() - Perform a hashing iteration.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
@@ -632,7 +632,7 @@ int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
}
/**
- * ocs_hcu_hash_final() - Update and finalize hash computation.
+ * ocs_hcu_hash_finup() - Update and finalize hash computation.
* @hcu_dev: The OCS HCU device to use.
* @ctx: The OCS HCU hashing context.
* @dma_list: The OCS DMA list mapping the input data to process.
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 3518fac29834..ecedd91a8d85 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -121,14 +121,14 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
struct pci_dev *pdev);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
- struct pci_dev *pdev, u64 reg, u64 *val);
+int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ u64 reg, u64 *val, int blkaddr);
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val);
+ u64 reg, u64 val, int blkaddr);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val);
+ u64 reg, u64 *val, int blkaddr);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val);
+ u64 reg, u64 val, int blkaddr);
struct otx2_cptlfs_info;
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 51cb6404ded7..9074876d38e5 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -43,7 +43,7 @@ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val)
+ u64 reg, u64 *val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -62,12 +62,13 @@ int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 0;
reg_msg->reg_offset = reg;
reg_msg->ret_val = val;
+ reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val)
+ u64 reg, u64 val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -86,16 +87,17 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
reg_msg->is_write = 1;
reg_msg->reg_offset = reg;
reg_msg->val = val;
+ reg_msg->blkaddr = blkaddr;
return 0;
}
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val)
+ u64 reg, u64 *val, int blkaddr)
{
int ret;
- ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
+ ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
@@ -103,11 +105,11 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
}
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 val)
+ u64 reg, u64 val, int blkaddr)
{
int ret;
- ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
+ ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 823a4571fd67..34aba1532761 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -56,7 +56,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- &lf_ctrl.u);
+ &lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@@ -64,7 +64,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- lf_ctrl.u);
+ lf_ctrl.u, lfs->blkaddr);
return ret;
}
@@ -77,7 +77,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- &lf_ctrl.u);
+ &lf_ctrl.u, lfs->blkaddr);
if (ret)
return ret;
@@ -85,7 +85,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
CPT_AF_LFX_CTL(lf->slot),
- lf_ctrl.u);
+ lf_ctrl.u, lfs->blkaddr);
return ret;
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 314e97354100..ab1678fc564d 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -95,6 +95,7 @@ struct otx2_cptlfs_info {
u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
+ int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
};
static inline void otx2_cpt_free_instruction_queues(
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index 8c899ad531a5..e19af1356f12 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -51,6 +51,7 @@ struct otx2_cptpf_dev {
u8 max_vfs; /* Maximum number of VFs supported by CPT */
u8 enabled_vfs; /* Number of enabled VFs */
u8 kvf_limits; /* Kernel crypto limits */
+ bool has_cpt1;
};
irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index 5277e04badd9..58f47e3ab62e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -451,19 +451,19 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
return 0;
}
-static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
{
int timeout = 10, ret;
u64 reg = 0;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, 0x1);
+ CPT_AF_BLK_RST, 0x1, blkaddr);
if (ret)
return ret;
do {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, &reg);
+ CPT_AF_BLK_RST, &reg, blkaddr);
if (ret)
return ret;
@@ -478,11 +478,35 @@ static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
return ret;
}
+static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
+{
+ int ret = 0;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_device_reset(cptpf, BLKADDR_CPT0);
+}
+
+static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
+{
+ u64 cfg;
+
+ cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
+ RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
+ if (cfg & BIT_ULL(11))
+ cptpf->has_cpt1 = true;
+}
+
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
{
union otx2_cptx_af_constants1 af_cnsts1 = {0};
int ret = 0;
+ /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
+ cptpf_check_block_implemented(cptpf);
/* Reset the CPT PF device */
ret = cptpf_device_reset(cptpf);
if (ret)
@@ -490,7 +514,8 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_CONSTANTS1, &af_cnsts1.u);
+ CPT_AF_CONSTANTS1, &af_cnsts1.u,
+ BLKADDR_CPT0);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1dc3ba298139..a531f4c8b441 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -153,16 +153,16 @@ static int get_ucode_type(struct device *dev,
}
static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
- dma_addr_t dma_addr)
+ dma_addr_t dma_addr, int blkaddr)
{
return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_UCODE_BASE(eng),
- (u64)dma_addr);
+ (u64)dma_addr, blkaddr);
}
-static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
+static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf, int blkaddr)
{
- struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_engs_rsvd *engs;
dma_addr_t dma_addr;
int i, bit, ret;
@@ -170,7 +170,7 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT);
+ cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
if (ret)
return ret;
@@ -187,7 +187,8 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
*/
for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
if (!eng_grp->g->eng_ref_cnt[bit]) {
- ret = __write_ucode_base(cptpf, bit, dma_addr);
+ ret = __write_ucode_base(cptpf, bit, dma_addr,
+ blkaddr);
if (ret)
return ret;
}
@@ -195,23 +196,32 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
return 0;
}
-static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
- void *obj)
+static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
- struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
+}
+
+static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
int i, timeout = 10;
int busy, ret;
u64 reg = 0;
- bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
- if (!bmap.size)
- return -EINVAL;
-
/* Detach the cores from group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), &reg);
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@@ -221,7 +231,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), reg);
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
if (ret)
return ret;
}
@@ -237,7 +248,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_STS(i), &reg);
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
if (ret)
return ret;
@@ -253,7 +265,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
if (!eng_grp->g->eng_ref_cnt[i]) {
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x0);
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
if (ret)
return ret;
}
@@ -262,22 +275,39 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
return 0;
}
-static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
- void *obj)
+static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
{
struct otx2_cptpf_dev *cptpf = obj;
struct otx2_cpt_bitmap bmap;
- u64 reg = 0;
- int i, ret;
+ int ret;
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
if (!bmap.size)
return -EINVAL;
+ if (cptpf->has_cpt1) {
+ ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT0);
+}
+
+static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ struct otx2_cptpf_dev *cptpf,
+ struct otx2_cpt_bitmap bmap,
+ int blkaddr)
+{
+ u64 reg = 0;
+ int i, ret;
+
/* Attach the cores to the group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), &reg);
+ CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
if (ret)
return ret;
@@ -287,7 +317,8 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), reg);
+ CPT_AF_EXEX_CTL2(i), reg,
+ blkaddr);
if (ret)
return ret;
}
@@ -295,15 +326,33 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
/* Enable the cores */
for_each_set_bit(i, bmap.bits, bmap.size) {
- ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox,
- cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x1);
+ ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
+ CPT_AF_EXEX_CTL(i), 0x1,
+ blkaddr);
if (ret)
return ret;
}
- ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+ return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
+}
- return ret;
+static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
+ void *obj)
+{
+ struct otx2_cptpf_dev *cptpf = obj;
+ struct otx2_cpt_bitmap bmap;
+ int ret;
+
+ bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
+ if (!bmap.size)
+ return -EINVAL;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
+ BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
}
static int load_fw(struct device *dev, struct fw_info_t *fw_info,
@@ -1140,20 +1189,18 @@ release_fw:
return ret;
}
-int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
+ int blkaddr)
{
- int i, ret, busy, total_cores;
- int timeout = 10;
- u64 reg = 0;
-
- total_cores = cptpf->eng_grps.avail.max_se_cnt +
- cptpf->eng_grps.avail.max_ie_cnt +
- cptpf->eng_grps.avail.max_ae_cnt;
+ int timeout = 10, ret;
+ int i, busy;
+ u64 reg;
/* Disengage the cores from groups */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL2(i), 0x0);
+ CPT_AF_EXEX_CTL2(i), 0x0,
+ blkaddr);
if (ret)
return ret;
@@ -1173,7 +1220,8 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
- CPT_AF_EXEX_STS(i), &reg);
+ CPT_AF_EXEX_STS(i), &reg,
+ blkaddr);
if (ret)
return ret;
@@ -1187,13 +1235,30 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
/* Disable the cores */
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_EXEX_CTL(i), 0x0);
+ CPT_AF_EXEX_CTL(i), 0x0,
+ blkaddr);
if (ret)
return ret;
}
return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
+int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
+{
+ int total_cores, ret;
+
+ total_cores = cptpf->eng_grps.avail.max_se_cnt +
+ cptpf->eng_grps.avail.max_ie_cnt +
+ cptpf->eng_grps.avail.max_ae_cnt;
+
+ if (cptpf->has_cpt1) {
+ ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
+ if (ret)
+ return ret;
+ }
+ return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
+}
+
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps)
{
@@ -1354,6 +1419,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
lfs->pdev = pdev;
lfs->reg_base = cptpf->reg_base;
lfs->mbox = &cptpf->afpf_mbox;
+ lfs->blkaddr = BLKADDR_CPT0;
ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1);
if (ret)
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 92e921eceed7..d6314ea9ae89 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 4c9362eebefd..e7384d107573 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 6d5ce1a66f1e..13f518802343 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CTR routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 77e338dc33f1..7a729dc2bc17 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES ECB routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 19c6ed5baea4..fc9baca13920 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES GCM routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 48dc1c98ca52..eb5c8f689360 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES XCBC routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index 13c65deda8e9..446f611726df 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn)
ret = find_nx_device_tree(dn, chip_id, vasid,
NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
- if (ret)
+ if (ret) {
+ of_node_put(dn);
return ret;
+ }
}
if (!ct_842 || !ct_gzip) {
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 90d9a37a57f6..b0ad665e4bda 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SHA-256 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index eb8627a0f317..c29103a1a0b6 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* SHA-512 routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 1d0e8a1ba160..010e87d9da36 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
@@ -200,7 +200,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* @sg: sg list head
* @end: sg lisg end
* @delta: is the amount we need to crop in order to bound the list.
- *
+ * @nbytes: length of data in the scatterlists or data length - whichever
+ * is greater.
*/
static long int trim_sg_list(struct nx_sg *sg,
struct nx_sg *end,
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c
index 1975bcbee997..ee7cd88bb10a 100644
--- a/drivers/crypto/nx/nx_debugfs.c
+++ b/drivers/crypto/nx/nx_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* debugfs routines supporting the Power 7+ Nest Accelerators driver
*
* Copyright (C) 2011-2012 International Business Machines Inc.
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a45bdcf3026d..0dd4c6b157de 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
dd->err = 0;
}
- err = pm_runtime_get_sync(dd->dev);
+ err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
- pm_runtime_put_noidle(dd->dev);
dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
}
@@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- err = pm_runtime_get_sync(dev);
+ err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "%s: failed to get_sync(%d)\n",
__func__, err);
@@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
static int omap_aes_resume(struct device *dev)
{
- pm_runtime_get_sync(dev);
+ pm_runtime_resume_and_get(dev);
return 0;
}
#endif
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 6a9be01fdf33..3524ddd48930 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -224,6 +224,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->uof_get_name = uof_get_name;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index f5990d042c9a..1dd64af22bea 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -212,6 +212,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1d1532e8fb6d..067ca5e17d38 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index cadcf12884c8..30337390513c 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -214,6 +214,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+ hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 04742a6d91ca..51ea88c0b17d 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5527344546e5..ac435b44f1d2 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -173,6 +173,7 @@ struct adf_hw_device_data {
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
+ void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 1aa17303838d..9e560c7d4163 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -179,3 +179,28 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
return capabilities;
}
EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
+
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ unsigned long accel_mask = hw_data->accel_mask;
+ void __iomem *pmisc_addr;
+ struct adf_bar *pmisc;
+ int pmisc_id;
+ u32 i = 0;
+
+ pmisc_id = hw_data->get_misc_bar_id(hw_data);
+ pmisc = &GET_BARS(accel_dev)[pmisc_id];
+ pmisc_addr = pmisc->virt_addr;
+
+ /* Configures WDT timers */
+ for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index 3816e6500352..756b0ddfac5e 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -113,11 +113,24 @@ do { \
/* Power gating */
#define ADF_POWERGATE_PKE BIT(24)
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x2000000
+#define ADF_SSMWDT_OFFSET 0x54
+#define ADF_SSMWDTPKE_OFFSET 0x58
+#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
+#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev);
+void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
index b72ff58e0bc7..000528327b29 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c
@@ -99,3 +99,43 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+
+static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
+ u32 *lower)
+{
+ *lower = lower_32_bits(value);
+ *upper = upper_32_bits(value);
+}
+
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
+ u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
+ u32 ssm_wdt_pke_high = 0;
+ u32 ssm_wdt_pke_low = 0;
+ u32 ssm_wdt_high = 0;
+ u32 ssm_wdt_low = 0;
+ void __iomem *pmisc_addr;
+ struct adf_bar *pmisc;
+ int pmisc_id;
+
+ pmisc_id = hw_data->get_misc_bar_id(hw_data);
+ pmisc = &GET_BARS(accel_dev)[pmisc_id];
+ pmisc_addr = pmisc->virt_addr;
+
+ /* Convert 64bit WDT timer value into 32bit values for
+ * mmio write to 32bit CSRs.
+ */
+ adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
+ adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
+ &ssm_wdt_pke_low);
+
+ /* Enable WDT for sym and dc */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
+ /* Enable WDT for pke */
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
+ ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 8ab62b2ac311..b8fca1ff7aab 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -94,6 +94,18 @@ do { \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_SRV_ARB_EN, (value))
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+/* WDT timers
+ *
+ * Timeout is in cycles. Clock speed may vary across products but this
+ * value should be a few milli-seconds.
+ */
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
+#define ADF_SSMWDTL_OFFSET 0x54
+#define ADF_SSMWDTH_OFFSET 0x5C
+#define ADF_SSMWDTPKEL_OFFSET 0x58
+#define ADF_SSMWDTPKEH_OFFSET 0x60
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 42029153408e..744c40351428 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -162,6 +162,10 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ /* Set ssm watch dog timer */
+ if (hw_data->set_ssm_wdtimer)
+ hw_data->set_ssm_wdtimer(accel_dev);
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index c45853463530..e3ad5587be49 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
ret = adf_isr_alloc_msix_entry_table(accel_dev);
if (ret)
- return ret;
- if (adf_enable_msix(accel_dev))
goto err_out;
- if (adf_setup_bh(accel_dev))
- goto err_out;
+ ret = adf_enable_msix(accel_dev);
+ if (ret)
+ goto err_free_msix_table;
- if (adf_request_irqs(accel_dev))
- goto err_out;
+ ret = adf_setup_bh(accel_dev);
+ if (ret)
+ goto err_disable_msix;
+
+ ret = adf_request_irqs(accel_dev);
+ if (ret)
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_disable_msix:
+ adf_disable_msix(&accel_dev->accel_pci_dev);
+
+err_free_msix_table:
+ adf_isr_free_msix_entry_table(accel_dev);
+
err_out:
- adf_isr_resource_free(accel_dev);
- return -EFAULT;
+ return ret;
}
EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index 8b090b7ae8c6..a1b77bd7a894 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -169,7 +169,7 @@ out:
* @msg: Message to send
* @vf_nr: VF number to which the message will be sent
*
- * Function sends a messge from the PF to a VF
+ * Function sends a message from the PF to a VF
*
* Return: 0 on success, error code otherwise.
*/
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 888c1e047295..8ba28409fb74 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
ring->base_addr, ring->dma_addr);
+ ring->base_addr = NULL;
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index 2c98fb63f7b7..e85bd62d134a 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -8,7 +8,7 @@
* adf_vf2pf_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends an init messge from the VF to a PF
+ * Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init);
* adf_vf2pf_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
- * Function sends a shutdown messge from the VF to a PF
+ * Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 38d316a42ba6..888388acb6bd 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
goto err_out;
if (adf_setup_pf2vf_bh(accel_dev))
- goto err_out;
+ goto err_disable_msi;
if (adf_setup_bh(accel_dev))
- goto err_out;
+ goto err_cleanup_pf2vf_bh;
if (adf_request_msi_irq(accel_dev))
- goto err_out;
+ goto err_cleanup_bh;
return 0;
+
+err_cleanup_bh:
+ adf_cleanup_bh(accel_dev);
+
+err_cleanup_pf2vf_bh:
+ adf_cleanup_pf2vf_bh(accel_dev);
+
+err_disable_msi:
+ adf_disable_msi(accel_dev);
+
err_out:
- adf_vf_isr_resource_free(accel_dev);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index ff78c73c47e3..f998ed58457c 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
int n = sg_nents(sgl);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
- dma_addr_t blp;
- dma_addr_t bloutp = 0;
+ dma_addr_t blp = DMA_MAPPING_ERROR;
+ dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
@@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!bufl))
return -ENOMEM;
- blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, blp)))
- goto err_in;
+ for_each_sg(sgl, sg, n, i)
+ bufl->bufers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
+ blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, blp)))
+ goto err_in;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err_in;
- bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err_out;
+
bufers = buflout->bufers;
+ for_each_sg(sglout, sg, n, i)
+ bufers[i].addr = DMA_MAPPING_ERROR;
+
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
}
buflout->num_bufs = sg_nctr;
buflout->num_mapped_bufs = sg_nctr;
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, bloutp)))
+ goto err_out;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
qat_req->buf.sz_out = sz_out;
@@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
return 0;
err_out:
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+
n = sg_nents(sglout);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
kfree(buflout);
err_in:
+ if (!dma_mapping_error(dev, blp))
+ dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+
n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
@@ -813,8 +823,6 @@ err_in:
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, blp))
- dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n");
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index c972554a755e..29999da716cc 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
-
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index cffa9fc628ff..850f257d00f3 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -40,7 +40,6 @@ struct qce_cipher_reqctx {
struct scatterlist result_sg;
struct sg_table dst_tbl;
struct scatterlist *dst_sg;
- struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
struct skcipher_request fallback_req; // keep at the end
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index a73db2a5637f..dceb9579d87a 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg;
}
-static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
{
struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
@@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
{
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
- unsigned int xtsdusize;
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
enckeylen / 2);
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
- /* xts du size 512B */
- xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
- qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+ /* Set data unit size to cryptlen. Anything else causes
+ * crypto engine to return back incorrect results.
+ */
+ qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
}
-static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
- u32 totallen, u32 offset)
+static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
- qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+ qce_write(qce, REG_ENCR_SEG_START, 0);
if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0);
@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0);
}
- qce_write(qce, REG_SEG_SIZE, totallen);
+ qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
/* get little endianness */
config = qce_config_reg(qce, 1);
@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
}
#endif
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset)
+int qce_start(struct crypto_async_request *async_req, u32 type)
{
switch (type) {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
- return qce_setup_regs_skcipher(async_req, totallen, offset);
+ return qce_setup_regs_skcipher(async_req);
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
- return qce_setup_regs_ahash(async_req, totallen, offset);
+ return qce_setup_regs_ahash(async_req);
#endif
default:
return -EINVAL;
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 85ba16418a04..3bc244bcca2d 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -94,7 +94,6 @@ struct qce_alg_template {
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
int qce_check_status(struct qce_device *qce, u32 *status);
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
-int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
- u32 offset);
+int qce_start(struct crypto_async_request *async_req, u32 type);
#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 61c418c12345..8e6fcf2c21cc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -12,9 +12,15 @@
#include "core.h"
#include "sha.h"
-/* crypto hw padding constant for first operation */
-#define SHA_PADDING 64
-#define SHA_PADDING_MASK (SHA_PADDING - 1)
+struct qce_sha_saved_state {
+ u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE];
+ u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE];
+ __be32 byte_count[2];
+ unsigned int pending_buflen;
+ unsigned int flags;
+ u64 count;
+ bool first_blk;
+};
static LIST_HEAD(ahash_algs);
@@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req)
static int qce_ahash_export(struct ahash_request *req, void *out)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned long flags = rctx->flags;
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize =
- crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- struct sha1_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buffer, rctx->buf, blocksize);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- struct sha256_state *out_state = out;
-
- out_state->count = rctx->count;
- qce_cpu_to_be32p_array((__be32 *)out_state->state,
- rctx->digest, digestsize);
- memcpy(out_state->buf, rctx->buf, blocksize);
- } else {
- return -EINVAL;
- }
+ struct qce_sha_saved_state *export_state = out;
- return 0;
-}
-
-static int qce_import_common(struct ahash_request *req, u64 in_count,
- const u32 *state, const u8 *buffer, bool hmac)
-{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned int digestsize = crypto_ahash_digestsize(ahash);
- unsigned int blocksize;
- u64 count = in_count;
-
- blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
- rctx->count = in_count;
- memcpy(rctx->buf, buffer, blocksize);
-
- if (in_count <= blocksize) {
- rctx->first_blk = 1;
- } else {
- rctx->first_blk = 0;
- /*
- * For HMAC, there is a hardware padding done when first block
- * is set. Therefore the byte_count must be incremened by 64
- * after the first block operation.
- */
- if (hmac)
- count += SHA_PADDING;
- }
-
- rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
- rctx->byte_count[1] = (__force __be32)(count >> 32);
- qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
- digestsize);
- rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+ memcpy(export_state->pending_buf, rctx->buf, rctx->buflen);
+ memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest));
+ export_state->byte_count[0] = rctx->byte_count[0];
+ export_state->byte_count[1] = rctx->byte_count[1];
+ export_state->pending_buflen = rctx->buflen;
+ export_state->count = rctx->count;
+ export_state->first_blk = rctx->first_blk;
+ export_state->flags = rctx->flags;
return 0;
}
static int qce_ahash_import(struct ahash_request *req, const void *in)
{
- struct qce_sha_reqctx *rctx;
- unsigned long flags;
- bool hmac;
- int ret;
-
- ret = qce_ahash_init(req);
- if (ret)
- return ret;
-
- rctx = ahash_request_ctx(req);
- flags = rctx->flags;
- hmac = IS_SHA_HMAC(flags);
-
- if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
- const struct sha1_state *state = in;
-
- ret = qce_import_common(req, state->count, state->state,
- state->buffer, hmac);
- } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
- const struct sha256_state *state = in;
+ struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+ const struct qce_sha_saved_state *import_state = in;
- ret = qce_import_common(req, state->count, state->state,
- state->buf, hmac);
- }
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->count = import_state->count;
+ rctx->buflen = import_state->pending_buflen;
+ rctx->first_blk = import_state->first_blk;
+ rctx->flags = import_state->flags;
+ rctx->byte_count[0] = import_state->byte_count[0];
+ rctx->byte_count[1] = import_state->byte_count[1];
+ memcpy(rctx->buf, import_state->pending_buf, rctx->buflen);
+ memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest));
- return ret;
+ return 0;
}
static int qce_ahash_update(struct ahash_request *req)
@@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req)
/* calculate how many bytes will be hashed later */
hash_later = total % blocksize;
+
+ /*
+ * At this point, there is more than one block size of data. If
+ * the available data to transfer is exactly a multiple of block
+ * size, save the last block to be transferred in qce_ahash_final
+ * (with the last block bit set) if this is indeed the end of data
+ * stream. If not this saved block will be transferred as part of
+ * next update. If this block is not held back and if this is
+ * indeed the end of data stream, the digest obtained will be wrong
+ * since qce_ahash_final will see that rctx->buflen is 0 and return
+ * doing nothing which in turn means that a digest will not be
+ * copied to the destination result buffer. qce_ahash_final cannot
+ * be made to alter this behavior and allowed to proceed if
+ * rctx->buflen is 0 because the crypto engine BAM does not allow
+ * for zero length transfers.
+ */
+ if (!hash_later)
+ hash_later = blocksize;
+
if (hash_later) {
unsigned int src_offset = req->nbytes - hash_later;
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
@@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
{
@@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha1-qce",
.digestsize = SHA1_DIGEST_SIZE,
.blocksize = SHA1_BLOCK_SIZE,
- .statesize = sizeof(struct sha1_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha1,
},
{
@@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = {
.drv_name = "hmac-sha256-qce",
.digestsize = SHA256_DIGEST_SIZE,
.blocksize = SHA256_BLOCK_SIZE,
- .statesize = sizeof(struct sha256_state),
+ .statesize = sizeof(struct qce_sha_saved_state),
.std_iv = std_iv_sha256,
},
};
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..c0a0d8c4fce1 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/errno.h>
#include <crypto/aes.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
@@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma);
- ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
+ ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret)
goto error_terminate;
@@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
+ unsigned int __keylen;
int ret;
if (!key || !keylen)
return -EINVAL;
- switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
+ /*
+ * AES XTS key1 = key2 not supported by crypto engine.
+ * Revisit to request a fallback cipher in this case.
+ */
+ if (IS_XTS(flags)) {
+ __keylen = keylen >> 1;
+ if (!memcmp(key, key + __keylen, __keylen))
+ return -ENOKEY;
+ } else {
+ __keylen = keylen;
+ }
+
+ switch (__keylen) {
case AES_KEYSIZE_128:
case AES_KEYSIZE_256:
memcpy(ctx->enc_key, key, keylen);
break;
+ case AES_KEYSIZE_192:
+ break;
+ default:
+ return -EINVAL;
}
ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
unsigned int keylen)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
+ u32 _key[6];
int err;
err = verify_skcipher_des3_key(ablk, key);
if (err)
return err;
+ /*
+ * The crypto engine does not support any two keys
+ * being the same for triple des algorithms. The
+ * verify_skcipher_des3_key does not check for all the
+ * below conditions. Return -ENOKEY in case any two keys
+ * are the same. Revisit to see if a fallback cipher
+ * is needed to handle this condition.
+ */
+ memcpy(_key, key, DES3_EDE_KEY_SIZE);
+ if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
+ !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
+ !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
+ return -ENOKEY;
+
ctx->enc_keylen = keylen;
memcpy(ctx->enc_key, key, keylen);
return 0;
@@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+ unsigned int blocksize = crypto_skcipher_blocksize(tfm);
int keylen;
int ret;
@@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
- /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
- * is not a multiple of it; pass such requests to the fallback
+ /* CE does not handle 0 length messages */
+ if (!req->cryptlen)
+ return 0;
+
+ /*
+ * ECB and CBC algorithms require message lengths to be
+ * multiples of block size.
+ */
+ if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
+ if (!IS_ALIGNED(req->cryptlen, blocksize))
+ return -EINVAL;
+
+ /*
+ * Conditions for requesting a fallback cipher
+ * AES-192 (not supported by crypto engine (CE))
+ * AES-XTS request with len <= 512 byte (not recommended to use CE)
+ * AES-XTS request with len > QCE_SECTOR_SIZE and
+ * is not a multiple of it.(Revisit this condition to check if it is
+ * needed in all versions of CE)
*/
if (IS_AES(rctx->flags) &&
- (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
- req->cryptlen <= aes_sw_max_len) ||
- (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
- req->cryptlen % QCE_SECTOR_SIZE))) {
+ ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
+ (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
+ (req->cryptlen > QCE_SECTOR_SIZE &&
+ req->cryptlen % QCE_SECTOR_SIZE))))) {
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
@@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = {
.name = "ecb(aes)",
.drv_name = "ecb-aes-qce",
.blocksize = AES_BLOCK_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 81befe7febaa..ed03058497bc 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -48,7 +48,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
{
struct ahash_request *req = ahash_request_cast(dev->async_req);
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
- int reg_status = 0;
+ int reg_status;
reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 682c8a450a57..55aa3a71169b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
@@ -401,7 +402,7 @@ static const struct samsung_aes_variant exynos_aes_data = {
static const struct samsung_aes_variant exynos5433_slim_aes_data = {
.aes_offset = 0x400,
.hash_offset = 0x800,
- .clk_names = { "pclk", "aclk", },
+ .clk_names = { "aclk", "pclk", },
};
static const struct of_device_id s5p_sss_dt_match[] = {
@@ -424,13 +425,9 @@ MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
static inline const struct samsung_aes_variant *find_s5p_sss_version
(const struct platform_device *pdev)
{
- if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
- const struct of_device_id *match;
+ if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
+ return of_device_get_match_data(&pdev->dev);
- match = of_match_node(s5p_sss_dt_match,
- pdev->dev.of_node);
- return (const struct samsung_aes_variant *)match->data;
- }
return (const struct samsung_aes_variant *)
platform_get_device_id(pdev)->driver_data;
}
@@ -2159,7 +2156,7 @@ static struct skcipher_alg algs[] = {
static int s5p_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int i, j, err = -ENODEV;
+ int i, j, err;
const struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
@@ -2189,14 +2186,14 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
pdata->res = res;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ioaddr)) {
if (!pdata->use_hash)
return PTR_ERR(pdata->ioaddr);
/* try AES without HASH */
res->end -= 0x300;
pdata->use_hash = false;
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_ioremap_resource(dev, res);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
}
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index f300b0a5958a..1c6929fb3a13 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -69,8 +69,24 @@
/* Max Authentication tag size */
#define SA_MAX_AUTH_TAG_SZ 64
-#define PRIV_ID 0x1
-#define PRIV 0x1
+enum sa_algo_id {
+ SA_ALG_CBC_AES = 0,
+ SA_ALG_EBC_AES,
+ SA_ALG_CBC_DES3,
+ SA_ALG_ECB_DES3,
+ SA_ALG_SHA1,
+ SA_ALG_SHA256,
+ SA_ALG_SHA512,
+ SA_ALG_AUTHENC_SHA1_AES,
+ SA_ALG_AUTHENC_SHA256_AES,
+};
+
+struct sa_match_data {
+ u8 priv;
+ u8 priv_id;
+ u32 supported_algos;
+ bool skip_engine_control;
+};
static struct device *sa_k3_dev;
@@ -696,8 +712,9 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
}
static
-int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
- u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
+ const u8 *enc_key, u16 enc_key_sz,
+ const u8 *auth_key, u16 auth_key_sz,
struct algo_data *ad, u8 enc, u32 *swinfo)
{
int enc_sc_offset = 0;
@@ -732,8 +749,8 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
memcpy(&sc_buf[2], &sc_id, 2);
sc_buf[4] = 0x0;
- sc_buf[5] = PRIV_ID;
- sc_buf[6] = PRIV;
+ sc_buf[5] = match_data->priv_id;
+ sc_buf[6] = match_data->priv;
sc_buf[7] = 0x0;
/* Prepare context for encryption engine */
@@ -892,8 +909,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
return ret;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 1, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -905,8 +922,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
- &ctx->dec.epib[1]))
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
+ ad, 0, &ctx->dec.epib[1]))
goto badkey;
cfg.enc_eng_id = ad->enc_eng.eng_id;
@@ -1106,7 +1123,7 @@ static int sa_run(struct sa_req *req)
else
dma_rx = pdata->dma_rx1;
- ddev = dma_rx->device->dev;
+ ddev = dmaengine_get_dma_device(pdata->dma_tx);
rxd->ddev = ddev;
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
@@ -1146,8 +1163,10 @@ static int sa_run(struct sa_req *req)
mapped_sg->sgt.sgl = src;
mapped_sg->sgt.orig_nents = src_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
- if (ret)
+ if (ret) {
+ kfree(rxd);
return ret;
+ }
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
@@ -1155,8 +1174,10 @@ static int sa_run(struct sa_req *req)
mapped_sg->sgt.sgl = req->src;
mapped_sg->sgt.orig_nents = sg_nents;
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
- if (ret)
+ if (ret) {
+ kfree(rxd);
return ret;
+ }
mapped_sg->dir = dir_src;
mapped_sg->mapped = true;
@@ -1446,9 +1467,10 @@ static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
cfg.akey = NULL;
cfg.akey_len = 0;
+ ctx->dev_data = dev_get_drvdata(sa_k3_dev);
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
- &ctx->enc.epib[1]))
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
+ ad, 0, &ctx->enc.epib[1]))
goto badkey;
cmdl_len = sa_format_cmdl_gen(&cfg,
@@ -1716,6 +1738,7 @@ static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
int ret;
memzero_explicit(ctx, sizeof(*ctx));
+ ctx->dev_data = data;
ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->shash)) {
@@ -1817,8 +1840,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
cfg.akey_len = keys.authkeylen;
/* Setup Encryption Security Context & Command label template */
- if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 1, &ctx->enc.epib[1]))
return -EINVAL;
@@ -1831,8 +1854,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc,
ctx->enc.cmdl_size = cmdl_len;
/* Setup Decryption Security Context & Command label template */
- if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
- keys.authkey, keys.authkeylen,
+ if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
+ keys.enckeylen, keys.authkey, keys.authkeylen,
ad, 0, &ctx->dec.epib[1]))
return -EINVAL;
@@ -1950,7 +1973,7 @@ static int sa_aead_decrypt(struct aead_request *req)
}
static struct sa_alg_tmpl sa_algs[] = {
- {
+ [SA_ALG_CBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(aes)",
@@ -1973,7 +1996,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_EBC_AES] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(aes)",
@@ -1995,7 +2018,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_CBC_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
@@ -2018,7 +2041,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_ECB_DES3] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
.base.cra_name = "ecb(des3_ede)",
@@ -2040,7 +2063,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_decrypt,
}
},
- {
+ [SA_ALG_SHA1] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2069,7 +2092,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA256] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2098,7 +2121,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_SHA512] = {
.type = CRYPTO_ALG_TYPE_AHASH,
.alg.ahash = {
.halg.base = {
@@ -2127,7 +2150,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.import = sa_sha_import,
},
},
- {
+ [SA_ALG_AUTHENC_SHA1_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2154,7 +2177,7 @@ static struct sa_alg_tmpl sa_algs[] = {
.decrypt = sa_aead_decrypt,
},
},
- {
+ [SA_ALG_AUTHENC_SHA256_AES] = {
.type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2185,13 +2208,19 @@ static struct sa_alg_tmpl sa_algs[] = {
};
/* Register the algorithms in crypto framework */
-static void sa_register_algos(const struct device *dev)
+static void sa_register_algos(struct sa_crypto_data *dev_data)
{
+ const struct sa_match_data *match_data = dev_data->match_data;
+ struct device *dev = dev_data->dev;
char *alg_name;
u32 type;
int i, err;
for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ /* Skip unsupported algos */
+ if (!(match_data->supported_algos & BIT(i)))
+ continue;
+
type = sa_algs[i].type;
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
alg_name = sa_algs[i].alg.skcipher.base.cra_name;
@@ -2329,14 +2358,39 @@ static int sa_link_child(struct device *dev, void *data)
return 0;
}
+static struct sa_match_data am654_match_data = {
+ .priv = 1,
+ .priv_id = 1,
+ .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0),
+};
+
+static struct sa_match_data am64_match_data = {
+ .priv = 0,
+ .priv_id = 0,
+ .supported_algos = BIT(SA_ALG_CBC_AES) |
+ BIT(SA_ALG_EBC_AES) |
+ BIT(SA_ALG_SHA256) |
+ BIT(SA_ALG_SHA512) |
+ BIT(SA_ALG_AUTHENC_SHA256_AES),
+ .skip_engine_control = true,
+};
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
+ { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
static int sa_ul_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct resource *res;
static void __iomem *saul_base;
struct sa_crypto_data *dev_data;
- u32 val;
int ret;
dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
@@ -2350,7 +2404,7 @@ static int sa_ul_probe(struct platform_device *pdev)
dev_set_drvdata(sa_k3_dev, dev_data);
pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
ret);
@@ -2362,18 +2416,28 @@ static int sa_ul_probe(struct platform_device *pdev)
if (ret)
goto disable_pm_runtime;
+ match = of_match_node(of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ dev_data->match_data = match->data;
+
spin_lock_init(&dev_data->scid_lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
saul_base = devm_ioremap_resource(dev, res);
dev_data->base = saul_base;
- val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
- SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
- SA_EEC_TRNG_EN;
- writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+ if (!dev_data->match_data->skip_engine_control) {
+ u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
- sa_register_algos(dev);
+ writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+ }
+
+ sa_register_algos(dev_data);
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (ret)
@@ -2419,13 +2483,6 @@ static int sa_ul_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id of_match[] = {
- {.compatible = "ti,j721e-sa2ul",},
- {.compatible = "ti,am654-sa2ul",},
- {},
-};
-MODULE_DEVICE_TABLE(of, of_match);
-
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
.remove = sa_ul_remove,
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
index f597ddecde34..ed66d1f111db 100644
--- a/drivers/crypto/sa2ul.h
+++ b/drivers/crypto/sa2ul.h
@@ -171,9 +171,12 @@ struct sa_tfm_ctx;
#define SA_UNSAFE_DATA_SZ_MIN 240
#define SA_UNSAFE_DATA_SZ_MAX 256
+struct sa_match_data;
+
/**
* struct sa_crypto_data - Crypto driver instance data
* @base: Base address of the register space
+ * @soc_data: Pointer to SoC specific data
* @pdev: Platform device pointer
* @sc_pool: security context pool
* @dev: Device pointer
@@ -189,6 +192,7 @@ struct sa_tfm_ctx;
*/
struct sa_crypto_data {
void __iomem *base;
+ const struct sa_match_data *match_data;
struct platform_device *pdev;
struct dma_pool *sc_pool;
struct device *dev;
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 2a4793176c71..7389a0536ff0 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
- pm_runtime_get_sync(cryp->dev);
+ pm_runtime_resume_and_get(cryp->dev);
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
if (!cryp)
return -ENODEV;
- ret = pm_runtime_get_sync(cryp->dev);
+ ret = pm_runtime_resume_and_get(cryp->dev);
if (ret < 0)
return ret;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 7ac0573ef663..389de9e3302d 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
@@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
@@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
- pm_runtime_get_sync(hdev->dev);
+ pm_runtime_resume_and_get(hdev->dev);
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
@@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (!hdev)
return -ENODEV;
- ret = pm_runtime_get_sync(hdev->dev);
+ ret = pm_runtime_resume_and_get(hdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 9866c2a5e9a7..759d0d9786fd 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -15,7 +15,7 @@
#include "cryp_p.h"
#include "cryp.h"
-/**
+/*
* cryp_wait_until_done - wait until the device logic is not busy
*/
void cryp_wait_until_done(struct cryp_device_data *device_data)
@@ -285,6 +285,7 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data,
* other device context parameter
* @device_data: Pointer to the device data struct for base address.
* @ctx: Crypto device context
+ * @cryp_mode: Mode: Polling, Interrupt or DMA
*/
void cryp_save_device_context(struct cryp_device_data *device_data,
struct cryp_device_context *ctx,
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index 8da7f87b339b..db5713d7c940 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index c3adeb2e5823..30cdd5253929 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
@@ -62,7 +62,7 @@ struct cryp_driver_data {
/**
* struct cryp_ctx - Crypto context
* @config: Crypto mode.
- * @key[CRYP_MAX_KEY_SIZE]: Key.
+ * @key: Key array.
* @keylen: Length of key.
* @iv: Pointer to initialization vector.
* @indata: Pointer to indata.
@@ -73,6 +73,7 @@ struct cryp_driver_data {
* @updated: Updated flag.
* @dev_ctx: Device dependent context.
* @device: Pointer to the device.
+ * @session_id: Atomic session ID.
*/
struct cryp_ctx {
struct cryp_config config;
@@ -608,12 +609,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
chan = ctx->device->dma.chan_mem2cryp;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
- ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
+ ctx->device->dma.nents_src, DMA_TO_DEVICE);
chan = ctx->device->dma.chan_cryp2mem;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
- ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
+ ctx->device->dma.nents_dst, DMA_FROM_DEVICE);
}
static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg,
@@ -1290,7 +1291,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(device_data->base)) {
- dev_err(dev, "[%s]: ioremap failed!", __func__);
ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c
index 7ebde69e8c76..6d2f07bec98a 100644
--- a/drivers/crypto/ux500/cryp/cryp_irq.c
+++ b/drivers/crypto/ux500/cryp/cryp_irq.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h
index 1984f30100ff..da90029ea141 100644
--- a/drivers/crypto/ux500/cryp/cryp_irq.h
+++ b/drivers/crypto/ux500/cryp/cryp_irq.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -19,7 +19,7 @@ enum cryp_irq_src_id {
CRYP_IRQ_SRC_ALL = 0x3
};
-/**
+/*
* M0 Funtions
*/
void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src);
diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h
index 879ed68a12d7..4981a3f461e5 100644
--- a/drivers/crypto/ux500/cryp/cryp_irqp.h
+++ b/drivers/crypto/ux500/cryp/cryp_irqp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -13,7 +13,7 @@
#include "cryp_irq.h"
-/**
+/*
*
* CRYP Registers - Offset mapping
* +-----------------+
diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h
index 0df84eaa8531..60b47fe4de35 100644
--- a/drivers/crypto/ux500/cryp/cryp_p.h
+++ b/drivers/crypto/ux500/cryp/cryp_p.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/**
+/*
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
@@ -17,7 +17,7 @@
#include "cryp.h"
#include "cryp_irqp.h"
-/**
+/*
* Generic Macros
*/
#define CRYP_SET_BITS(reg_name, mask) \
@@ -34,7 +34,7 @@
writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \
(((u32)val << shift) & (mask))), reg)
-/**
+/*
* CRYP specific Macros
*/
#define CRYP_PERIPHERAL_ID0 0xE3
@@ -48,7 +48,7 @@
#define CRYP_PCELL_ID2 0x05
#define CRYP_PCELL_ID3 0xB1
-/**
+/*
* CRYP register default values
*/
#define MAX_DEVICE_SUPPORT 2
@@ -62,7 +62,7 @@
#define CRYP_KEY_DEFAULT 0x0
#define CRYP_INIT_VECT_DEFAULT 0x0
-/**
+/*
* CRYP Control register specific mask
*/
#define CRYP_CR_SECURE_MASK BIT(0)
@@ -81,7 +81,6 @@
CRYP_CR_PRLG_MASK |\
CRYP_CR_ALGODIR_MASK |\
CRYP_CR_ALGOMODE_MASK |\
- CRYP_CR_DATATYPE_MASK |\
CRYP_CR_KEYSIZE_MASK |\
CRYP_CR_KEYRDEN_MASK |\
CRYP_CR_DATATYPE_MASK)
@@ -91,7 +90,7 @@
#define CRYP_SR_IFEM_MASK BIT(0)
#define CRYP_SR_BUSY_MASK BIT(4)
-/**
+/*
* Bit position used while setting bits in register
*/
#define CRYP_CR_PRLG_POS 1
@@ -107,7 +106,7 @@
#define CRYP_SR_BUSY_POS 4
-/**
+/*
* CRYP PCRs------PC_NAND control register
* BIT_MASK
*/
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index da284b0ea1b2..ecb7412e84e3 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -190,7 +190,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
chan = ctx->device->dma.chan_mem2hash;
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
- ctx->device->dma.sg_len, DMA_TO_DEVICE);
+ ctx->device->dma.nents, DMA_TO_DEVICE);
}
static int hash_dma_write(struct hash_ctx *ctx,
@@ -356,7 +356,7 @@ out:
/**
* hash_get_device_data - Checks for an available hash device and return it.
- * @hash_ctx: Structure for the hash context.
+ * @ctx: Structure for the hash context.
* @device_data: Structure for the hash device.
*
* This function check for an available hash device and return it to
@@ -542,7 +542,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
}
/**
- * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
+ * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*
* Initialize structures.
@@ -585,6 +585,7 @@ static int ux500_hash_init(struct ahash_request *req)
* @device_data: Structure for the hash device.
* @message: Block (512 bits) of message to be written to
* the HASH hardware.
+ * @length: Message length
*
*/
static void hash_processblock(struct hash_device_data *device_data,
@@ -1295,7 +1296,7 @@ void hash_get_digest(struct hash_device_data *device_data,
}
/**
- * hash_update - The hash update function for SHA1/SHA2 (SHA256).
+ * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_update(struct ahash_request *req)
@@ -1315,7 +1316,7 @@ static int ahash_update(struct ahash_request *req)
}
/**
- * hash_final - The hash final function for SHA1/SHA2 (SHA256).
+ * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
* @req: The hash request for the job.
*/
static int ahash_final(struct ahash_request *req)
@@ -1615,9 +1616,6 @@ static struct hash_algo_template hash_algs[] = {
}
};
-/**
- * hash_algs_register_all -
- */
static int ahash_algs_register_all(struct hash_device_data *device_data)
{
int ret;
@@ -1640,9 +1638,6 @@ unreg:
return ret;
}
-/**
- * hash_algs_unregister_all -
- */
static void ahash_algs_unregister_all(struct hash_device_data *device_data)
{
int i;
@@ -1681,7 +1676,6 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(device_data->base)) {
- dev_err(dev, "%s: ioremap() failed!\n", __func__);
ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d05c02baebcf..ec06189fbf99 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index d88084447f1c..ed0debc7acb5 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CBC routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 79ba062ee1c1..9a3da8cd62f3 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES CTR routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 9fee1b1532a4..dabbccb41550 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* AES XTS routines supporting VMX In-core instructions on Power 8
*
* Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 14807ac2e3b9..5bc5710a6de0 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* GHASH routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015, 2019 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index a40d08e75fc0..7eb713cc87c8 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Routines supporting VMX instructions on the Power 8
*
* Copyright (C) 2015 International Business Machines Inc.