summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 14:22:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 14:22:45 -0800
commita103950e0dd2058df5e8a8d4a915707bdcf205f0 (patch)
treeaf5d091f768db4ed7a12fc3c5484d3e20ad9d514 /drivers
parent2cfa1cd3da14814a1e9ec6a4fce8612637d3ee3d (diff)
parent2d55807b7f7bf62bb05a8b91247c5eb7cd19ac04 (diff)
downloadlinux-stable-a103950e0dd2058df5e8a8d4a915707bdcf205f0.tar.gz
linux-stable-a103950e0dd2058df5e8a8d4a915707bdcf205f0.tar.bz2
linux-stable-a103950e0dd2058df5e8a8d4a915707bdcf205f0.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Enforce the setting of keys for keyed aead/hash/skcipher algorithms. - Add multibuf speed tests in tcrypt. Algorithms: - Improve performance of sha3-generic. - Add native sha512 support on arm64. - Add v8.2 Crypto Extentions version of sha3/sm3 on arm64. - Avoid hmac nesting by requiring underlying algorithm to be unkeyed. - Add cryptd_max_cpu_qlen module parameter to cryptd. Drivers: - Add support for EIP97 engine in inside-secure. - Add inline IPsec support to chelsio. - Add RevB core support to crypto4xx. - Fix AEAD ICV check in crypto4xx. - Add stm32 crypto driver. - Add support for BCM63xx platforms in bcm2835 and remove bcm63xx. - Add Derived Key Protocol (DKP) support in caam. - Add Samsung Exynos True RNG driver. - Add support for Exynos5250+ SoCs in exynos PRNG driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (166 commits) crypto: picoxcell - Fix error handling in spacc_probe() crypto: arm64/sha512 - fix/improve new v8.2 Crypto Extensions code crypto: arm64/sm3 - new v8.2 Crypto Extensions implementation crypto: arm64/sha3 - new v8.2 Crypto Extensions implementation crypto: testmgr - add new testcases for sha3 crypto: sha3-generic - export init/update/final routines crypto: sha3-generic - simplify code crypto: sha3-generic - rewrite KECCAK transform to help the compiler optimize crypto: sha3-generic - fixes for alignment and big endian operation crypto: aesni - handle zero length dst buffer crypto: artpec6 - remove select on non-existing CRYPTO_SHA384 hwrng: bcm2835 - Remove redundant dev_err call in bcm2835_rng_probe() crypto: stm32 - remove redundant dev_err call in stm32_cryp_probe() crypto: axis - remove unnecessary platform_get_resource() error check crypto: testmgr - test misuse of result in ahash crypto: inside-secure - make function safexcel_try_push_requests static crypto: aes-generic - fix aes-generic regression on powerpc crypto: chelsio - Fix indentation warning crypto: arm64/sha1-ce - get rid of literal pool crypto: arm64/sha2-ce - move the round constant table to .rodata section ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/hw_random/Kconfig32
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c169
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c154
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/hw_random/exynos-trng.c235
-rw-r--r--drivers/char/hw_random/imx-rngc.c13
-rw-r--r--drivers/char/hw_random/mtk-rng.c1
-rw-r--r--drivers/char/random.c24
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c6
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c131
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c8
-rw-r--r--drivers/crypto/bcm/cipher.c1
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/caamalg.c120
-rw-r--r--drivers/crypto/caam/caamalg_desc.c182
-rw-r--r--drivers/crypto/caam/caamalg_desc.h10
-rw-r--r--drivers/crypto/caam/caamalg_qi.c68
-rw-r--r--drivers/crypto/caam/caamhash.c73
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/desc.h29
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/key_gen.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig10
-rw-r--r--drivers/crypto/chelsio/Makefile1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c540
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h15
-rw-r--r--drivers/crypto/chelsio/chcr_core.c14
-rw-r--r--drivers/crypto/chelsio/chcr_core.h38
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h76
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c654
-rw-r--r--drivers/crypto/exynos-rng.c108
-rw-r--r--drivers/crypto/hifn_795x.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c370
-rw-r--r--drivers/crypto/inside-secure/safexcel.h173
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c53
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c125
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/marvell/cesa.c19
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c27
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c133
-rw-r--r--drivers/crypto/s5p-sss.c26
-rw-r--r--drivers/crypto/stm32/Kconfig13
-rw-r--r--drivers/crypto/stm32/Makefile5
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1170
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c102
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c1
64 files changed, 3918 insertions, 1204 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 88044eda0ac6..4d0f571c15f9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -73,26 +73,14 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
-config HW_RANDOM_BCM63XX
- tristate "Broadcom BCM63xx Random Number Generator support"
- depends on BCM63XX || BMIPS_GENERIC
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM63xx SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called bcm63xx-rng
-
- If unusure, say Y.
-
config HW_RANDOM_BCM2835
- tristate "Broadcom BCM2835 Random Number Generator support"
- depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
+ tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
+ ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM2835 SoCs.
+ Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs.
To compile this driver as a module, choose M here: the
module will be called bcm2835-rng
@@ -436,6 +424,18 @@ config HW_RANDOM_S390
If unsure, say Y.
+config HW_RANDOM_EXYNOS
+ tristate "Samsung Exynos True Random Number Generator support"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default HW_RANDOM
+ ---help---
+ This driver provides support for the True Random Number
+ Generator available in Exynos SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called exynos-trng.
+
+ If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 0ef05c61d9c8..b780370bd4eb 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -9,11 +9,11 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
-obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 574211a49549..7a84cec30c3a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/clk.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -29,116 +30,180 @@
#define RNG_INT_OFF 0x1
-static void __init nsp_rng_init(void __iomem *base)
+struct bcm2835_rng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+ bool mask_interrupts;
+ struct clk *clk;
+};
+
+static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
{
- u32 val;
+ return container_of(rng, struct bcm2835_rng_priv, rng);
+}
+
+static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(priv->base + offset);
+ else
+ return readl(priv->base + offset);
+}
- /* mask the interrupt */
- val = readl(base + RNG_INT_MASK);
- val |= RNG_INT_OFF;
- writel(val, base + RNG_INT_MASK);
+static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val,
+ u32 offset)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, priv->base + offset);
+ else
+ writel(val, priv->base + offset);
}
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
- void __iomem *rng_base = (void __iomem *)rng->priv;
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
u32 max_words = max / sizeof(u32);
u32 num_words, count;
- while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
cpu_relax();
}
- num_words = readl(rng_base + RNG_STATUS) >> 24;
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
if (num_words > max_words)
num_words = max_words;
for (count = 0; count < num_words; count++)
- ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
+ ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA);
return num_words * sizeof(u32);
}
-static struct hwrng bcm2835_rng_ops = {
- .name = "bcm2835",
- .read = bcm2835_rng_read,
+static int bcm2835_rng_init(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+ int ret = 0;
+ u32 val;
+
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->mask_interrupts) {
+ /* mask the interrupt */
+ val = rng_readl(priv, RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ rng_writel(priv, val, RNG_INT_MASK);
+ }
+
+ /* set warm-up count & enable */
+ rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS);
+ rng_writel(priv, RNG_RBGEN, RNG_CTRL);
+
+ return ret;
+}
+
+static void bcm2835_rng_cleanup(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+
+ /* disable rng hardware */
+ rng_writel(priv, 0, RNG_CTRL);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+}
+
+struct bcm2835_rng_of_data {
+ bool mask_interrupts;
+};
+
+static const struct bcm2835_rng_of_data nsp_rng_of_data = {
+ .mask_interrupts = true,
};
static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm2835-rng"},
- { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
- { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
+ { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm6368-rng"},
{},
};
static int bcm2835_rng_probe(struct platform_device *pdev)
{
+ const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- void (*rng_setup)(void __iomem *base);
const struct of_device_id *rng_id;
- void __iomem *rng_base;
+ struct bcm2835_rng_priv *priv;
+ struct resource *r;
int err;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* map peripheral */
- rng_base = of_iomap(np, 0);
- if (!rng_base) {
- dev_err(dev, "failed to remap rng regs");
- return -ENODEV;
- }
- bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ priv->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Clock is optional on most platforms */
+ priv->clk = devm_clk_get(dev, NULL);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = bcm2835_rng_init;
+ priv->rng.read = bcm2835_rng_read;
+ priv->rng.cleanup = bcm2835_rng_cleanup;
rng_id = of_match_node(bcm2835_rng_of_match, np);
- if (!rng_id) {
- iounmap(rng_base);
+ if (!rng_id)
return -EINVAL;
- }
- /* Check for rng init function, execute it */
- rng_setup = rng_id->data;
- if (rng_setup)
- rng_setup(rng_base);
- /* set warm-up count & enable */
- __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
- __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+ /* Check for rng init function, execute it */
+ of_data = rng_id->data;
+ if (of_data)
+ priv->mask_interrupts = of_data->mask_interrupts;
/* register driver */
- err = hwrng_register(&bcm2835_rng_ops);
- if (err) {
+ err = devm_hwrng_register(dev, &priv->rng);
+ if (err)
dev_err(dev, "hwrng registration failed\n");
- iounmap(rng_base);
- } else
+ else
dev_info(dev, "hwrng registered\n");
return err;
}
-static int bcm2835_rng_remove(struct platform_device *pdev)
-{
- void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv;
-
- /* disable rng hardware */
- __raw_writel(0, rng_base + RNG_CTRL);
-
- /* unregister driver */
- hwrng_unregister(&bcm2835_rng_ops);
- iounmap(rng_base);
-
- return 0;
-}
-
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+static struct platform_device_id bcm2835_rng_devtype[] = {
+ { .name = "bcm2835-rng" },
+ { .name = "bcm63xx-rng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype);
+
static struct platform_driver bcm2835_rng_driver = {
.driver = {
.name = "bcm2835-rng",
.of_match_table = bcm2835_rng_of_match,
},
.probe = bcm2835_rng_probe,
- .remove = bcm2835_rng_remove,
+ .id_table = bcm2835_rng_devtype,
};
module_platform_driver(bcm2835_rng_driver);
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
deleted file mode 100644
index 5132c9cde50d..000000000000
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Broadcom BCM63xx Random Number Generator support
- *
- * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
- * Copyright (C) 2009, Broadcom Corporation
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/of.h>
-
-#define RNG_CTRL 0x00
-#define RNG_EN (1 << 0)
-
-#define RNG_STAT 0x04
-#define RNG_AVAIL_MASK (0xff000000)
-
-#define RNG_DATA 0x08
-#define RNG_THRES 0x0c
-#define RNG_MASK 0x10
-
-struct bcm63xx_rng_priv {
- struct hwrng rng;
- struct clk *clk;
- void __iomem *regs;
-};
-
-#define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng)
-
-static int bcm63xx_rng_init(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
- int error;
-
- error = clk_prepare_enable(priv->clk);
- if (error)
- return error;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val |= RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- return 0;
-}
-
-static void bcm63xx_rng_cleanup(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val &= ~RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- clk_disable_unprepare(priv->clk);
-}
-
-static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
-}
-
-static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- *data = __raw_readl(priv->regs + RNG_DATA);
-
- return 4;
-}
-
-static int bcm63xx_rng_probe(struct platform_device *pdev)
-{
- struct resource *r;
- int ret;
- struct bcm63xx_rng_priv *priv;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no iomem resource\n");
- return -ENXIO;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->rng.name = pdev->name;
- priv->rng.init = bcm63xx_rng_init;
- priv->rng.cleanup = bcm63xx_rng_cleanup;
- priv->rng.data_present = bcm63xx_rng_data_present;
- priv->rng.data_read = bcm63xx_rng_data_read;
-
- priv->clk = devm_clk_get(&pdev->dev, "ipsec");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "no clock for device: %d\n", ret);
- return ret;
- }
-
- if (!devm_request_mem_region(&pdev->dev, r->start,
- resource_size(r), pdev->name)) {
- dev_err(&pdev->dev, "request mem failed");
- return -EBUSY;
- }
-
- priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
- resource_size(r));
- if (!priv->regs) {
- dev_err(&pdev->dev, "ioremap failed");
- return -ENOMEM;
- }
-
- ret = devm_hwrng_register(&pdev->dev, &priv->rng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rng device: %d\n",
- ret);
- return ret;
- }
-
- dev_info(&pdev->dev, "registered RNG driver\n");
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id bcm63xx_rng_of_match[] = {
- { .compatible = "brcm,bcm6368-rng", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
-#endif
-
-static struct platform_driver bcm63xx_rng_driver = {
- .probe = bcm63xx_rng_probe,
- .driver = {
- .name = "bcm63xx-rng",
- .of_match_table = of_match_ptr(bcm63xx_rng_of_match),
- },
-};
-
-module_platform_driver(bcm63xx_rng_driver);
-
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 657b8770b6b9..91bb98c42a1c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -306,6 +306,10 @@ static int enable_best_rng(void)
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
if (!ret)
cur_rng_set_by_user = 0;
+ } else {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ ret = 0;
}
return ret;
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
new file mode 100644
index 000000000000..1947aed7c044
--- /dev/null
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RNG driver for Exynos TRNGs
+ *
+ * Author: Łukasz Stelmach <l.stelmach@samsung.com>
+ *
+ * Copyright 2017 (c) Samsung Electronics Software, Inc.
+ *
+ * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by
+ * Krzysztof Kozłowski <krzk@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define EXYNOS_TRNG_CLKDIV (0x0)
+
+#define EXYNOS_TRNG_CTRL (0x20)
+#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL (0x30)
+#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
+#define EXYNOS_TRNG_ONLINE_STAT (0x44)
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
+#define EXYNOS_TRNG_FIFO_CTRL (0x50)
+#define EXYNOS_TRNG_FIFO_0 (0x80)
+#define EXYNOS_TRNG_FIFO_1 (0x84)
+#define EXYNOS_TRNG_FIFO_2 (0x88)
+#define EXYNOS_TRNG_FIFO_3 (0x8c)
+#define EXYNOS_TRNG_FIFO_4 (0x90)
+#define EXYNOS_TRNG_FIFO_5 (0x94)
+#define EXYNOS_TRNG_FIFO_6 (0x98)
+#define EXYNOS_TRNG_FIFO_7 (0x9c)
+#define EXYNOS_TRNG_FIFO_LEN (8)
+#define EXYNOS_TRNG_CLOCK_RATE (500000)
+
+
+struct exynos_trng_dev {
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct exynos_trng_dev *trng;
+ int val;
+
+ max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
+
+ trng = (struct exynos_trng_dev *)rng->priv;
+
+ writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
+ val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
+ val == 0, 200, 1000000);
+ if (val < 0)
+ return val;
+
+ memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max);
+
+ return max;
+}
+
+static int exynos_trng_init(struct hwrng *rng)
+{
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+ unsigned long sss_rate;
+ u32 val;
+
+ sss_rate = clk_get_rate(trng->clk);
+
+ /*
+ * For most TRNG circuits the clock frequency of under 500 kHz
+ * is safe.
+ */
+ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
+ if (val > 0x7fff) {
+ dev_err(trng->dev, "clock divider too large: %d", val);
+ return -ERANGE;
+ }
+ val = val << 1;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV);
+
+ /* Enable the generator. */
+ val = EXYNOS_TRNG_CTRL_RNGEN;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL);
+
+ /*
+ * Disable post-processing. /dev/hwrng is supposed to deliver
+ * unprocessed data.
+ */
+ writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL);
+
+ return 0;
+}
+
+static int exynos_trng_probe(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return ret;
+
+ trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
+ GFP_KERNEL);
+ if (!trng->rng.name)
+ return ret;
+
+ trng->rng.init = exynos_trng_init;
+ trng->rng.read = exynos_trng_do_read;
+ trng->rng.priv = (unsigned long) trng;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not get runtime PM.\n");
+ goto err_pm_get;
+ }
+
+ trng->clk = devm_clk_get(&pdev->dev, "secss");
+ if (IS_ERR(trng->clk)) {
+ ret = PTR_ERR(trng->clk);
+ dev_err(&pdev->dev, "Could not get clock.\n");
+ goto err_clock;
+ }
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable the clk.\n");
+ goto err_clock;
+ }
+
+ ret = hwrng_register(&trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(trng->clk);
+
+err_clock:
+ pm_runtime_put_sync(&pdev->dev);
+
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int exynos_trng_remove(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&trng->rng);
+ clk_disable_unprepare(trng->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not get runtime PM.\n");
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+ exynos_trng_resume);
+
+static const struct of_device_id exynos_trng_dt_match[] = {
+ {
+ .compatible = "samsung,exynos5250-trng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
+
+static struct platform_driver exynos_trng_driver = {
+ .driver = {
+ .name = "exynos-trng",
+ .pm = &exynos_trng_pm_ops,
+ .of_match_table = exynos_trng_dt_match,
+ },
+ .probe = exynos_trng_probe,
+ .remove = exynos_trng_remove,
+};
+
+module_platform_driver(exynos_trng_driver);
+MODULE_AUTHOR("Łukasz Stelmach");
+MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 88db42d30760..eca87249bcff 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -282,8 +282,7 @@ static int __exit imx_rngc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int imx_rngc_suspend(struct device *dev)
+static int __maybe_unused imx_rngc_suspend(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -292,7 +291,7 @@ static int imx_rngc_suspend(struct device *dev)
return 0;
}
-static int imx_rngc_resume(struct device *dev)
+static int __maybe_unused imx_rngc_resume(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -301,11 +300,7 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops imx_rngc_pm_ops = {
- .suspend = imx_rngc_suspend,
- .resume = imx_rngc_resume,
-};
-#endif
+SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
@@ -316,9 +311,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = "imx_rngc",
-#ifdef CONFIG_PM
.pm = &imx_rngc_pm_ops,
-#endif
.of_match_table = imx_rngc_dt_ids,
},
.remove = __exit_p(imx_rngc_remove),
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 8da7bcf54105..7f99cd52b40e 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -135,6 +135,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
#endif
priv->rng.read = mtk_rng_read;
priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 64b59562c872..80f2c326db47 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -431,9 +431,9 @@ static int crng_init = 0;
static int crng_init_cnt = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE]);
+ __u32 out[CHACHA20_BLOCK_WORDS]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -817,7 +817,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u8 block[CHACHA20_BLOCK_SIZE];
+ __u32 block[CHACHA20_BLOCK_WORDS];
__u32 key[8];
} buf;
@@ -851,7 +851,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE])
+ __u32 out[CHACHA20_BLOCK_WORDS])
{
unsigned long v, flags;
@@ -867,7 +867,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
+static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
{
struct crng_state *crng = NULL;
@@ -885,7 +885,7 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -897,14 +897,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = &tmp[used / sizeof(__u32)];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
struct crng_state *crng = NULL;
@@ -920,7 +920,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1507,7 +1507,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -2114,7 +2114,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
+ extract_crng((__u32 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2144,7 +2144,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
+ extract_crng(batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47ec920d5b71..4b741b83e23f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -723,7 +723,6 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- select CRYPTO_SHA384
select CRYPTO_SHA512
help
Enables the driver for the on-chip crypto accelerator
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index eeaf27859d80..ea83d0bff0e9 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
return true;
- /* CCM - fix CBC MAC mismatch in special case */
- if (is_ccm && decrypt && !req->assoclen)
- return true;
-
return false;
}
@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index c44954e274bc..76f459ad2821 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
@@ -275,14 +282,12 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
- memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
-
return 0;
}
@@ -570,15 +575,14 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct aead_request *aead_req;
- struct crypto4xx_ctx *ctx;
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
int err = 0;
- aead_req = container_of(pd_uinfo->async_req, struct aead_request,
- base);
- ctx = crypto_tfm_ctx(aead_req->base.tfm);
-
if (pd_uinfo->using_sd) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
@@ -590,38 +594,39 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
- size_t cp_len = crypto_aead_authsize(
- crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
-
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
cp_len);
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd->pd_ctl.bf.status & 0xff) {
- if (pd->pd_ctl.bf.status & 0x1) {
- /* authentication error */
- err = -EBADMSG;
- } else {
- if (!__ratelimit(&dev->aead_ratelimit)) {
- if (pd->pd_ctl.bf.status & 2)
- pr_err("pad fail error\n");
- if (pd->pd_ctl.bf.status & 4)
- pr_err("seqnum fail\n");
- if (pd->pd_ctl.bf.status & 8)
- pr_err("error _notify\n");
- pr_err("aead return err status = 0x%02x\n",
- pd->pd_ctl.bf.status & 0xff);
- pr_err("pd pad_ctl = 0x%08x\n",
- pd->pd_ctl.bf.pd_pad_ctl);
- }
- err = -EINVAL;
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
}
+ err = -EINVAL;
}
if (pd_uinfo->state & PD_ENTRY_BUSY)
@@ -1070,21 +1075,29 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
/**
* Top Half of isr.
*/
-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (!core_dev->dev->ce_base)
- return 0;
-
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1266,6 +1279,8 @@ static int crypto4xx_probe(struct platform_device *ofdev)
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ u32 pvr;
+ bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
@@ -1282,6 +1297,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1304,7 +1320,22 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (!core_dev->dev)
goto err_alloc_dev;
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
@@ -1325,13 +1356,6 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- /* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
- if (rc)
- goto err_request_irq;
-
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
@@ -1339,6 +1363,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
goto err_iomap;
}
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
+ KBUILD_MODNAME, dev);
+ if (rc)
+ goto err_request_irq;
+
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1352,11 +1385,11 @@ static int crypto4xx_probe(struct platform_device *ofdev)
return 0;
err_start_dev:
- iounmap(core_dev->dev->ce_base);
-err_iomap:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
@@ -1397,7 +1430,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 8ac3bd37203b..23b726da6534 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -28,8 +28,6 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
-#define MODULE_NAME "crypto4xx"
-
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -82,7 +80,6 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
- char *name;
void __iomem *ce_base;
void __iomem *trng_base;
@@ -109,6 +106,7 @@ struct crypto4xx_device {
struct list_head alg_list; /* List of algorithm supported
by this device */
struct ratelimit_state aead_ratelimit;
+ bool is_revb;
};
struct crypto4xx_core_device {
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 0a22ec5d1a96..472331787e04 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -121,13 +121,15 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17fd223..5e63742b0d22 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
if (!rng)
goto err_out;
- rng->name = MODULE_NAME;
+ rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 456278440863..0fb8bbf41a8d 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
// The HW omits the initial increment of the counter field.
- crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+ memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
@@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = {
.setkey = artpec6_crypto_aead_set_key,
.encrypt = artpec6_crypto_aead_encrypt,
.decrypt = artpec6_crypto_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -3041,9 +3042,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
variant = (enum artpec6_crypto_variant)match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index ce70b44d0fb6..2b75f95bbe1b 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,6 @@
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/hash.h>
-#include <crypto/aes.h>
#include <crypto/sha3.h>
#include "util.h"
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a118b9bed669..bfbf8bf77f03 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -494,7 +494,8 @@ static struct ahash_alg algs = {
.cra_driver_name = DRIVER_NAME,
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
.cra_alignmask = 3,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index baa8dd52472d..2188235be02d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
- false);
+ false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_enc:
/*
@@ -266,9 +271,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
- nonce, ctx1_iv_off, false);
+ nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -300,9 +305,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, false);
+ ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4106_set_sh_desc(aead);
}
@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4543_set_sh_desc(aead);
}
@@ -625,7 +654,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -648,9 +676,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
@@ -658,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -691,23 +718,21 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -979,9 +1004,6 @@ static void init_aead_job(struct aead_request *req,
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
-
- /* REG3 = assoclen */
- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
}
static void init_gcm_job(struct aead_request *req,
@@ -996,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req,
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
/* BUG This should not be specific to generic GCM. */
last = 0;
@@ -1022,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -1045,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
+ /*
+ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+ * having DPOVRD as destination.
+ */
+ if (ctrlpriv->era < 3)
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ else
+ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
@@ -3228,9 +3261,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3238,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -3269,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3279,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
offsetof(struct caam_ctx, sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..ceb93fbb76e6 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type)
* cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
* (non-protocol) with no (null) encryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
- KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* assoclen + cryptlen = seqinlen */
@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
* cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
* (non-protocol) with no (null) decryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* Class 2 operation */
@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
static void init_sh_desc_key_aead(u32 * const desc,
struct alginfo * const cdata,
struct alginfo * const adata,
- const bool is_rfc3686, u32 *nonce)
+ const bool is_rfc3686, u32 *nonce, int era)
{
u32 *key_jump_cmd;
unsigned int enckeylen = cdata->keylen;
@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc,
if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE;
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, enckeylen,
@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc,
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+ int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi)
+ const u32 ctx1_iv_off, const bool is_qi, int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
- else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+ CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+ CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi)
+ const bool is_qi, int era)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
if (is_qi) {
u32 *wait_load_cmd;
@@ -528,8 +561,13 @@ copy_iv:
OP_ALG_ENCRYPT);
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -1075,7 +1113,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1140,7 +1178,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1209,7 +1247,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index e412ec8f7005..5f9445ae2114 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi);
+ const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index f9f08fce4356..4aecc9435f69 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_enc:
/* aead_decrypt shared descriptor */
@@ -149,7 +151,8 @@ skip_enc:
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv,
- is_rfc3686, nonce, ctx1_iv_off, true);
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ ctrlpriv->era);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -176,7 +179,7 @@ skip_enc:
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_givenc:
return 0;
@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
@@ -272,7 +298,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
int ret = 0;
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -295,9 +320,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
@@ -356,10 +380,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts ablkcipher encrypt, decrypt shared descriptors */
@@ -668,7 +690,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -905,7 +927,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -1058,7 +1080,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, GIVENCRYPT, 0, 0);
@@ -2123,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
struct caam_drv_private *priv;
@@ -2137,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
@@ -2149,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock);
@@ -2167,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2177,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -2186,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 616720a04e7a..0beb28196e20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
* read and write to seqout
*/
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx)
+ struct caam_hash_ctx *ctx, bool import_ctx,
+ int era)
{
u32 op = ctx->adata.algtype;
u32 *skip_key_load;
@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6)
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load);
@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
- CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.key_inline = true;
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad, 1);
-#endif
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ memcpy(ctx->key, key, keylen);
+ } else {
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+ keylen, CAAM_MAX_HASH_KEY_SIZE);
+ if (ret)
+ goto bad_free_key;
+ }
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..75d280cb2dc0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
+ ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+ ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present,
caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 8142de7ba050..f76ff160a02c 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
@@ -1093,6 +1105,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index ba1ca0806f0a..d4256fa4a1d6 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 * const desc, void *data, int len)
+static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 * const desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -452,7 +452,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
- void *key_virt;
+ const void *key_virt;
};
bool key_inline;
};
@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * keylen should be the length of initial key, while keylen_pad
+ * the length of the derived (split) key.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+ u32 protid;
+
+ /*
+ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+ * to OP_PCLID_DKP_{MD5, SHA*}
+ */
+ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+ (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+ if (adata->key_inline) {
+ int words;
+
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+ adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ /* Reserve space in descriptor buffer for the derived key */
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+ if (words)
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+ adata->keylen);
+ append_ptr(desc, adata->key_dma);
+ }
+}
+
#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 91f1107276e5..7696a774a362 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -84,6 +84,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8c79c3a153dc..312b5f042f31 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -11,36 +11,6 @@
#include "desc_constr.h"
#include "key_gen.h"
-/**
- * split_key_len - Compute MDHA split key length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key length
- */
-static inline u32 split_key_len(u32 hash)
-{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- u32 idx;
-
- idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
-
- return (u32)(mdpadlen[idx] * 2);
-}
-
-/**
- * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key pad length
- */
-static inline u32 split_key_pad_len(u32 hash)
-{
- return ALIGN(split_key_len(hash), 16);
-}
-
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index 5db055c25bd2..818f78f6fc1a 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -6,6 +6,36 @@
*
*/
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
struct split_key_result {
struct completion completion;
int err;
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 169e66231bcf..b0ba4331944b 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto request_cleanup;
}
result = (union cpt_res_s *)info->completion_addr;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4addc238a6ef..deaefd532aaa 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -6,7 +6,6 @@
#include "nitrox_dev.h"
#include "nitrox_req.h"
#include "nitrox_csr.h"
-#include "nitrox_req.h"
/* SLC_STORE_INFO */
#define MIN_UDD_LEN 16
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff02b713c6f6..ca1f0d780b61 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -21,7 +21,6 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/delay.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index b56b3f711d94..5ae9f8706f17 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -19,3 +19,13 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
+
+config CHELSIO_IPSEC_INLINE
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on CRYPTO_DEV_CHELSIO
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index bebdf06687ad..eaecaf1ebcf3 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 4eed7171e2ae..34a02d690548 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -73,6 +73,29 @@
#define IV AES_BLOCK_SIZE
+static unsigned int sgl_ent_len[] = {
+ 0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+ 0, 32, 32, 48, 48, 64, 64, 80, 80,
+ 112, 112, 128, 128, 144, 144, 160, 160,
+ 192, 192, 208, 208, 224, 224, 240, 240,
+ 272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+ unsigned char *input, int err);
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -108,18 +131,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
return (skb->len <= SGE_MAX_WR_LEN);
}
-/*
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
-}
-
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
@@ -160,7 +171,6 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
if (input == NULL)
goto out;
- reqctx = ahash_request_ctx(req);
digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
if (reqctx->is_sg_map)
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
@@ -183,30 +193,17 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
}
out:
req->base.complete(&req->base, err);
+}
- }
-
-static inline void chcr_handle_aead_resp(struct aead_request *req,
- unsigned char *input,
- int err)
+static inline int get_aead_subtype(struct crypto_aead *aead)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
-
-
- chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
- if (reqctx->b0_dma)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
- reqctx->b0_len, DMA_BIDIRECTIONAL);
- if (reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(req, input, &err);
- reqctx->verify = VERIFY_HW;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
- req->base.complete(&req->base, err);
-}
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -231,6 +228,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
+ req->base.complete(&req->base, err);
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
@@ -558,7 +574,8 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
skip = 0;
}
}
- if (walk->nents == 0) {
+ WARN(!sg, "SG should not be null here\n");
+ if (sg && (walk->nents == 0)) {
small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
walk->sgl->len0 = cpu_to_be32(sgmin);
@@ -595,14 +612,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_aead_subtype(struct crypto_aead *aead)
-{
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.aead);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
-}
-
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -675,7 +684,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
if (srclen <= dstlen)
break;
less = min_t(unsigned int, sg_dma_len(dst) - offset -
- dstskip, CHCR_DST_SG_SIZE);
+ dstskip, CHCR_DST_SG_SIZE);
dstlen += less;
offset += less;
if (offset == sg_dma_len(dst)) {
@@ -686,7 +695,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
dstskip = 0;
}
src = sg_next(src);
- srcskip = 0;
+ srcskip = 0;
}
return min(srclen, dstlen);
}
@@ -1008,7 +1017,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+ u32 isfinal)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
@@ -1035,7 +1045,8 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_decrypt_one(cipher, iv, iv);
+ if (!isfinal)
+ crypto_cipher_decrypt_one(cipher, iv, iv);
out:
return ret;
}
@@ -1056,7 +1067,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
AES_BLOCK_SIZE) + 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1087,7 +1098,7 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
ctr_add_iv(iv, req->info, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1101,7 +1112,6 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
@@ -1135,10 +1145,10 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
@@ -1239,15 +1249,15 @@ static int process_cipher(struct ablkcipher_request *req,
MIN_CIPHER_SG,
SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
bytes = req->nbytes;
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
@@ -2014,11 +2024,8 @@ static int chcr_aead_common_init(struct aead_request *req,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int error = -EINVAL;
- unsigned int dst_size;
unsigned int authsize = crypto_aead_authsize(tfm);
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
@@ -2083,7 +2090,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, temp;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
@@ -2097,24 +2104,19 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
return NULL;
reqctx->b0_dma = 0;
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
assoclen = 0;
}
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
- dnents += MIN_AUTH_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
@@ -2162,16 +2164,23 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
temp & 0xF,
null ? 0 : assoclen + IV + 1,
temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+ else
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
- CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ temp,
actx->auth_mode, aeadctx->hmac_ctrl,
IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (op_type == CHCR_ENCRYPT_OP)
+ if (op_type == CHCR_ENCRYPT_OP ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key,
aeadctx->enckey_len);
else
@@ -2181,7 +2190,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
- memcpy(reqctx->iv, req->iv, IV);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+ memcpy(reqctx->iv, req->iv, IV);
+ }
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
@@ -2202,9 +2220,9 @@ err:
return ERR_PTR(error);
}
-static int chcr_aead_dma_map(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
int error;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2246,9 +2264,9 @@ err:
return -ENOMEM;
}
-static void chcr_aead_dma_unmap(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2273,10 +2291,10 @@ static void chcr_aead_dma_unmap(struct device *dev,
}
}
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type)
+void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2308,11 +2326,11 @@ static inline void chcr_add_aead_src_ent(struct aead_request *req,
}
}
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid)
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2330,9 +2348,9 @@ static inline void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam)
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2355,10 +2373,10 @@ static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid)
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct dsgl_walk dsgl_walk;
@@ -2373,9 +2391,9 @@ static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param)
+void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
{
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
@@ -2395,16 +2413,13 @@ static inline void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
&reqctx->dma_addr);
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
- 0);
-// reqctx->srcsg = ulp_walk.last_sg;
-// reqctx->src_ofst = ulp_walk.last_sg_len;
- ulptx_walk_end(&ulp_walk);
+ 0);
+ ulptx_walk_end(&ulp_walk);
}
}
-
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req)
+int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
int error = 0;
@@ -2414,13 +2429,13 @@ static inline int chcr_hash_dma_map(struct device *dev,
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
- return error;
+ return -ENOMEM;
req_ctx->is_sg_map = 1;
return 0;
}
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req)
+void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -2433,9 +2448,8 @@ static inline void chcr_hash_dma_unmap(struct device *dev,
}
-
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
{
int error;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2469,8 +2483,9 @@ err:
dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+
+void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2666,8 +2681,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sub_type = get_aead_subtype(tfm);
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen -= 8;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
@@ -2677,15 +2690,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
- + (op_type ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_CCM_SG; // For IV and B0
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
@@ -2780,19 +2789,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
assoclen = req->assoclen - 8;
reqctx->b0_dma = 0;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
error = chcr_aead_common_init(req, op_type);
- if (error)
- return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst,
- req->cryptlen + (op_type ? -authsize : authsize),
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_GCM_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
@@ -2829,10 +2833,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
assoclen + IV + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
temp, temp);
- chcr_req->sec_cpl.seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -3212,7 +3216,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs;
+ unsigned int bs, subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3241,6 +3245,15 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
pr_err("chcr : Unsupported digest size\n");
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3258,9 +3271,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
*/
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
@@ -3333,6 +3349,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
struct crypto_authenc_keys keys;
int err;
/* it contains auth and cipher key both*/
+ unsigned int subtype;
int key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3350,6 +3367,15 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3357,13 +3383,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
key_ctx_len = sizeof(struct _key_ctx)
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
@@ -3375,6 +3404,40 @@ out:
aeadctx->enckey_len = 0;
return -EINVAL;
}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx;
+ struct sk_buff *skb;
+
+ if (!a_ctx(tfm)->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ u_ctx = ULD_CTX(a_ctx(tfm));
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ a_ctx(tfm)->tx_qidx)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
+ op_type);
+
+ if (IS_ERR(skb) || !skb)
+ return PTR_ERR(skb);
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -3383,8 +3446,10 @@ static int chcr_aead_encrypt(struct aead_request *req)
reqctx->verify = VERIFY_HW;
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3413,8 +3478,10 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3427,38 +3494,6 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
}
-static int chcr_aead_op(struct aead_request *req,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx;
- struct sk_buff *skb;
-
- if (!a_ctx(tfm)->dev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
- return -ENXIO;
- }
- u_ctx = ULD_CTX(a_ctx(tfm));
- if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
- }
-
- /* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
- op_type);
-
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
-
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return -EINPROGRESS;
-}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -3742,7 +3777,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3763,7 +3798,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3785,7 +3820,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3805,7 +3840,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3826,7 +3861,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3847,7 +3882,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3867,6 +3902,133 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-digest_null-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 96c9335ee728..d1673a5d4bf5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -226,15 +226,6 @@
#define SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
- 96, 112, 120, 136, 144, 160, 168, 184,
- 192, 208, 216, 232, 240, 256, 264, 280,
- 288, 304, 312, 328, 336, 352, 360, 376};
-unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
- 112, 112, 128, 128, 144, 144, 160, 160,
- 192, 192, 208, 208, 224, 224, 240, 240,
- 272, 272, 288, 288, 304, 304, 320, 320};
-
struct algo_param {
unsigned int auth_mode;
unsigned int mk_size;
@@ -404,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w)
return *(u32 *)(&bytes[0]);
}
-static u32 round_constant[11] = {
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000, 0x6C000000
-};
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f5a2624081dc..04f277cade7c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ .tx_handler = chcr_uld_tx_handler,
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
struct uld_ctx *assign_chcr_device(void)
@@ -164,6 +167,10 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(lld);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
out:
return u_ctx;
}
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
+{
+ return chcr_ipsec_xmit(skb, dev);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 94e7412f6164..3c29ee09b8b5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -39,6 +39,7 @@
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
+#include "t4_msg.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
@@ -89,12 +90,49 @@ struct uld_ctx {
struct chcr_dev *dev;
};
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 94a87e3ad9bc..7daf0a17a7d2 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -134,14 +134,16 @@
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000
#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000
#define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000
#define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
@@ -210,8 +212,6 @@ struct dsgl_walk {
struct phys_sge_pairs *to;
};
-
-
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -227,21 +227,18 @@ struct __aead_ctx {
struct chcr_authenc_ctx authenc[0];
};
-
-
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
u16 hmac_ctrl;
u16 mayverify;
struct __aead_ctx ctx[0];
};
-
-
struct hmac_ctx {
struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -307,44 +304,29 @@ typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
int size,
unsigned short op_type);
-static int chcr_aead_op(struct aead_request *req_base,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn);
-static inline int get_aead_subtype(struct crypto_aead *aead);
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err);
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
-static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
- unsigned short op_type);
-static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
- *req, unsigned short op_type);
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid);
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type);
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam);
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req);
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req);
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid);
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen, unsigned short op_type,
+ unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+ unsigned int assoclen, unsigned short op_type);
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param);
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req);
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
new file mode 100644
index 000000000000..db1e241104ed
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -0,0 +1,654 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE 8
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+ struct net_device *netdev = NULL;
+ int i;
+
+ for (i = 0; i < lld->nports; i++) {
+ netdev = lld->ports[i];
+ if (!netdev)
+ continue;
+ netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ rtnl_lock();
+ netdev_change_features(netdev);
+ rtnl_unlock();
+ }
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int hmac_ctrl;
+ int authsize = x->aead->alg_icv_len / 8;
+
+ sa_entry->authsize = authsize;
+
+ switch (authsize) {
+ case ICV_8:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ break;
+ case ICV_12:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ break;
+ case ICV_16:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct crypto_cipher *cipher;
+ int keylen = (x->aead->alg_key_len + 7) / 8;
+ unsigned char *key = x->aead->alg_key;
+ int ck_size, key_ctx_size = 0;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ int ret = 0;
+
+ if (keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(sa_entry->salt, key + keylen, 4);
+ }
+
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(sa_entry->key, key, keylen);
+ sa_entry->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+
+ sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+ if (IS_ERR(cipher)) {
+ sa_entry->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret) {
+ sa_entry->enckey_len = 0;
+ goto out1;
+ }
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+ memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+ 16), ghash_h, AEAD_H_SIZE);
+ sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+ int res = 0;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128 &&
+ x->aead->alg_icv_len != 96) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+out:
+ return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kfree(sa_entry);
+ module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+
+ return true;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+{
+ int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ unsigned int kctx_len)
+{
+ unsigned int flits;
+ int hdrlen = is_eth_imm(skb, kctx_len);
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ struct cpl_tx_pkt_core *cpl;
+ u64 cntrl = 0;
+ u32 ctrl0, qidx;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cpl = (struct cpl_tx_pkt_core *)pos;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+ if (skb_vlan_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(ctrl0);
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
+ return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ unsigned int len, qidx;
+ struct _key_ctx *key_ctx;
+ int left, eoq, key_len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+ key_len = sa_entry->kctx_len;
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ left = eoq;
+ if (!eoq) {
+ pos = q->q.desc;
+ left = 64 * q->q.size;
+ }
+
+ /* Copy the Key context header */
+ key_ctx = (struct _key_ctx *)pos;
+ key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+ memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+ pos += sizeof(struct _key_ctx);
+ left -= sizeof(struct _key_ctx);
+
+ if (likely(len <= left)) {
+ memcpy(key_ctx->key, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ if (key_len <= left) {
+ memcpy(pos, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ }
+ /* Copy CPL TX PKT XT */
+ pos = copy_cpltx_pktxt(skb, dev, pos);
+
+ return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ unsigned int immdatalen = 0;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
+ unsigned int flits;
+ u32 wr_mid;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+ int qid = q->q.cntxt_id;
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+ flits = calc_tx_sec_flits(skb, kctx_len);
+
+ if (is_eth_imm(skb, kctx_len))
+ immdatalen = skb->len;
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+ wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+
+ /* Sub-command */
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ immdatalen);
+
+ /* CPL_SEC_PDU */
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1)));
+
+ wr->req.sec_cpl.pldlen = htonl(skb->len);
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ (skb_transport_offset(skb) + 1),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr)),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1), 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1,
+ sa_entry->authsize,
+ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata) +
+ sizeof(struct cpl_tx_sec_pdu);
+
+ pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/*
+ * chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ int qidx, left, credits;
+ unsigned int flits = 0, ndesc, kctx_len;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ struct port_info *pi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
+
+ if (!x->xso.offload_handle)
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kctx_len = sa_entry->kctx_len;
+
+ if (skb->sp->len != 1) {
+out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+ dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+ flits);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (is_eth_imm(skb, kctx_len))
+ immediate = true;
+
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ /* Setup IPSec CPL */
+ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ if (pos == (u64 *)q->q.stat) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ pos = (void *)q->q.desc;
+ }
+
+ sgl = (void *)pos;
+ if (immediate) {
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
+ dev_consume_skb_any(skb);
+ } else {
+ int last_desc;
+
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+ 0, addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ }
+ txq_advance(&q->q, ndesc);
+
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 451620b475a0..86f5f459762e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* exynos-rng.c - Random Number Generator driver for the Exynos
*
@@ -6,15 +7,6 @@
* Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
* Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -22,12 +14,18 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
#define EXYNOS_RNG_CONTROL 0x0
#define EXYNOS_RNG_STATUS 0x10
+
+#define EXYNOS_RNG_SEED_CONF 0x14
+#define EXYNOS_RNG_GEN_PRNG BIT(1)
+
#define EXYNOS_RNG_SEED_BASE 0x140
#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
#define EXYNOS_RNG_OUT_BASE 0x160
@@ -43,13 +41,21 @@
#define EXYNOS_RNG_SEED_REGS 5
#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
+enum exynos_prng_type {
+ EXYNOS_PRNG_UNKNOWN = 0,
+ EXYNOS_PRNG_EXYNOS4,
+ EXYNOS_PRNG_EXYNOS5,
+};
+
/*
- * Driver re-seeds itself with generated random numbers to increase
- * the randomness.
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
*
* Time for next re-seed in ms.
*/
-#define EXYNOS_RNG_RESEED_TIME 100
+#define EXYNOS_RNG_RESEED_TIME 1000
+#define EXYNOS_RNG_RESEED_BYTES 65536
+
/*
* In polling mode, do not wait infinitely for the engine to finish the work.
*/
@@ -63,13 +69,17 @@ struct exynos_rng_ctx {
/* Device associated memory */
struct exynos_rng_dev {
struct device *dev;
+ enum exynos_prng_type type;
void __iomem *mem;
struct clk *clk;
+ struct mutex lock;
/* Generated numbers stored for seeding during resume */
u8 seed_save[EXYNOS_RNG_SEED_SIZE];
unsigned int seed_save_len;
/* Time of last seeding in jiffies */
unsigned long last_seeding;
+ /* Bytes generated since last seeding */
+ unsigned long bytes_seeding;
};
static struct exynos_rng_dev *exynos_rng_dev;
@@ -114,39 +124,12 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
}
rng->last_seeding = jiffies;
+ rng->bytes_seeding = 0;
return 0;
}
/*
- * Read from output registers and put the data under 'dst' array,
- * up to dlen bytes.
- *
- * Returns number of bytes actually stored in 'dst' (dlen
- * or EXYNOS_RNG_SEED_SIZE).
- */
-static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
- u8 *dst, unsigned int dlen)
-{
- unsigned int cnt = 0;
- int i, j;
- u32 val;
-
- for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
- val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
-
- for (i = 0; i < 4; i++) {
- dst[cnt] = val & 0xff;
- val >>= 8;
- if (++cnt >= dlen)
- return cnt;
- }
- }
-
- return cnt;
-}
-
-/*
* Start the engine and poll for finish. Then read from output registers
* filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
* random data (EXYNOS_RNG_SEED_SIZE).
@@ -160,8 +143,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
{
int retry = EXYNOS_RNG_WAIT_RETRIES;
- exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
- EXYNOS_RNG_CONTROL);
+ if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+ exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+ } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+ exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+ EXYNOS_RNG_SEED_CONF);
+ }
while (!(exynos_rng_readl(rng,
EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
@@ -173,7 +161,9 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
/* Clear status bit */
exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
EXYNOS_RNG_STATUS);
- *read = exynos_rng_copy_random(rng, dst, dlen);
+ *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+ memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+ rng->bytes_seeding += *read;
return 0;
}
@@ -187,13 +177,18 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
unsigned int read = 0;
u8 seed[EXYNOS_RNG_SEED_SIZE];
- if (time_before(now, next_seeding))
+ if (time_before(now, next_seeding) &&
+ rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
return;
if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
return;
exynos_rng_set_seed(rng, seed, read);
+
+ /* Let others do some of their job. */
+ mutex_unlock(&rng->lock);
+ mutex_lock(&rng->lock);
}
static int exynos_rng_generate(struct crypto_rng *tfm,
@@ -209,6 +204,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
do {
ret = exynos_rng_get_random(rng, dst, dlen, &read);
if (ret)
@@ -219,6 +215,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
exynos_rng_reseed(rng);
} while (dlen > 0);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -236,7 +233,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -259,7 +258,7 @@ static struct rng_alg exynos_rng_alg = {
.base = {
.cra_name = "stdrng",
.cra_driver_name = "exynos_rng",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_ctxsize = sizeof(struct exynos_rng_ctx),
.cra_module = THIS_MODULE,
.cra_init = exynos_rng_kcapi_init,
@@ -279,6 +278,10 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
+ rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+
+ mutex_init(&rng->lock);
+
rng->dev = &pdev->dev;
rng->clk = devm_clk_get(&pdev->dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -329,9 +332,14 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
/* Get new random numbers and store them for seeding on resume. */
exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
&(rng->seed_save_len));
+
+ mutex_unlock(&rng->lock);
+
dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
rng->seed_save_len);
@@ -354,8 +362,12 @@ static int __maybe_unused exynos_rng_resume(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+ mutex_unlock(&rng->lock);
+
clk_disable_unprepare(rng->clk);
return ret;
@@ -367,6 +379,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS4,
+ }, {
+ .compatible = "samsung,exynos5250-prng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS5,
},
{ },
};
@@ -386,4 +402,4 @@ module_platform_driver(exynos_rng_driver);
MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e09d4055b19e..a5a36fe7bf2c 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2579,6 +2579,7 @@ err_out_unmap_bars:
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
+ kfree(dev);
err_out_free_regions:
pci_release_regions(pdev);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4bcef78a08aa..225e74a7f724 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
- priv->base + ctrl);
+ EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Release engine from reset */
- val = readl(priv->base + ctrl);
+ val = readl(EIP197_PE(priv) + ctrl);
val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
- writel(val, priv->base + ctrl);
+ writel(val, EIP197_PE(priv) + ctrl);
}
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
@@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
}
/* Clear the scratchpad memory */
- val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
+ memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
@@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, cd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
priv->config.cd_size,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.cd_offset),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- writel(val,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(5, 0),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
}
return 0;
@@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, rd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
priv->config.rd_size,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
writel(val,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(7, 0),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* enable ring interrupt */
- val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
val |= EIP197_RDR_IRQ(i);
- writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
}
return 0;
@@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
int i, ret;
/* Determine endianess and configure byte swap */
- version = readl(priv->base + EIP197_HIA_VERSION);
- val = readl(priv->base + EIP197_HIA_MST_CTRL);
+ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
val |= EIP197_MST_CTRL_BYTE_SWAP;
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
- writel(val, priv->base + EIP197_HIA_MST_CTRL);
-
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
- priv->base + EIP197_MST_CTRL);
+ EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
/* Interrupts reset */
/* Disable all global interrupts */
- writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
+ writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
/* Clear any pending interrupt */
- writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
/* Data Fetch Engine configuration */
/* Reset all DFE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
@@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, priv->base + EIP197_HIA_DFE_CFG);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
/* Leave the DFE threads reset state */
- writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- priv->base + EIP197_PE_IN_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- priv->base + EIP197_PE_IN_TBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Wait for all DSE threads to complete */
- while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
GENMASK(15, 12)) != GENMASK(15, 12))
;
@@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, priv->base + EIP197_HIA_DSE_CFG);
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
/* Leave the DSE threads reset state */
- writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- priv->base + EIP197_PE_OUT_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
/* Processing Engine configuration */
@@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
val |= EIP197_ALG_SHA2;
- writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Clear interrupts for this ring */
writel(GENMASK(31, 0),
- priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+ EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
/* Disable external triggering */
- writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Result Descriptor Ring prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Disable external triggering*/
- writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Enable command descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Enable result descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Clear any HIA interrupt */
- writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- eip197_trc_cache_init(priv);
+ if (priv->version == EIP197) {
+ eip197_trc_cache_init(priv);
- ret = eip197_load_firmwares(priv);
- if (ret)
- return ret;
+ ret = eip197_load_firmwares(priv);
+ if (ret)
+ return ret;
+ }
safexcel_hw_setup_cdesc_rings(priv);
safexcel_hw_setup_rdesc_rings(priv);
@@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0;
}
+/* Called with ring's lock taken */
+static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring, int reqs)
+{
+ int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+
+ if (!coal)
+ return 0;
+
+ /* Configure when we want an interrupt */
+ writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+ EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+
+ return coal;
+}
+
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
@@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
- priv->ring[ring].need_dequeue = false;
+ /* If a request wasn't properly dequeued because of a lack of resources,
+ * proceeded it first,
+ */
+ req = priv->ring[ring].req;
+ backlog = priv->ring[ring].backlog;
+ if (req)
+ goto handle_req;
- do {
+ while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!req)
+ if (!req) {
+ priv->ring[ring].req = NULL;
+ priv->ring[ring].backlog = NULL;
goto finalize;
+ }
+handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request) {
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, req);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
-
- priv->ring[ring].need_dequeue = true;
- goto finalize;
- }
+ if (!request)
+ goto request_failed;
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
- req->complete(req, ret);
- priv->ring[ring].need_dequeue = true;
- goto finalize;
+ goto request_failed;
}
if (backlog)
@@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
cdesc += commands;
rdesc += results;
- } while (nreq++ < EIP197_MAX_BATCH_SZ);
+ nreq++;
+ }
+
+request_failed:
+ /* Not enough resources to handle all the requests. Bail out and save
+ * the request and the backlog for the next dequeue call (per-ring).
+ */
+ priv->ring[ring].req = req;
+ priv->ring[ring].backlog = backlog;
finalize:
- if (nreq == EIP197_MAX_BATCH_SZ)
- priv->ring[ring].need_dequeue = true;
- else if (!nreq)
+ if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].lock);
+ spin_lock_bh(&priv->ring[ring].egress_lock);
- /* Configure when we want an interrupt */
- writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
- EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
+ if (!priv->ring[ring].busy) {
+ nreq -= safexcel_try_push_requests(priv, ring, nreq);
+ if (nreq)
+ priv->ring[ring].busy = true;
+ }
+
+ priv->ring[ring].requests_left += nreq;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
-
- spin_unlock_bh(&priv->ring[ring].lock);
+ EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
@@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
}
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request)
@@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc = 0;
+ int ret, i, nreq, ndesc, tot_descs, done;
bool should_complete;
- nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
- nreq >>= 24;
- nreq &= GENMASK(6, 0);
+handle_results:
+ tot_descs = 0;
+
+ nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+ nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
if (!nreq)
- return;
+ goto requests_left;
for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -609,13 +648,9 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
if (ndesc < 0) {
kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
- return;
+ goto acknowledge;
}
- writel(EIP197_xDR_PROC_xD_PKT(1) |
- EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
-
if (should_complete) {
local_bh_disable();
sreq->req->complete(sreq->req, ret);
@@ -623,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
}
kfree(sreq);
+ tot_descs += ndesc;
}
+
+acknowledge:
+ if (i) {
+ writel(EIP197_xDR_PROC_xD_PKT(i) |
+ EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ }
+
+ /* If the number of requests overflowed the counter, try to proceed more
+ * requests.
+ */
+ if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+ goto handle_results;
+
+requests_left:
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+
+ done = safexcel_try_push_requests(priv, ring,
+ priv->ring[ring].requests_left);
+
+ priv->ring[ring].requests_left -= done;
+ if (!done && !priv->ring[ring].requests_left)
+ priv->ring[ring].busy = false;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
}
-static void safexcel_handle_result_work(struct work_struct *work)
+static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
- struct safexcel_crypto_priv *priv = data->priv;
-
- safexcel_handle_result_descriptor(priv, data->ring);
- if (priv->ring[data->ring].need_dequeue)
- safexcel_dequeue(data->priv, data->ring);
+ safexcel_dequeue(data->priv, data->ring);
}
struct safexcel_ring_irq_data {
@@ -647,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
- int ring = irq_data->ring;
+ int ring = irq_data->ring, rc = IRQ_NONE;
u32 status, stat;
- status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+ status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
if (!status)
- return IRQ_NONE;
+ return rc;
/* RDR interrupts */
if (status & EIP197_RDR_IRQ(ring)) {
- stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
if (unlikely(stat & EIP197_xDR_ERR)) {
/*
@@ -666,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
*/
dev_err(priv->dev, "RDR: fatal error.");
} else if (likely(stat & EIP197_xDR_THRESH)) {
- queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+ rc = IRQ_WAKE_THREAD;
}
/* ACK the interrupts */
writel(stat & 0xff,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
}
/* ACK the interrupts */
- writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
+ writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+ return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+ struct safexcel_ring_irq_data *irq_data = data;
+ struct safexcel_crypto_priv *priv = irq_data->priv;
+ int ring = irq_data->ring;
+
+ safexcel_handle_result_descriptor(priv, ring);
+
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return IRQ_HANDLED;
}
static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
irq_handler_t handler,
+ irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq = platform_get_irq_byname(pdev, name);
@@ -691,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
return irq;
}
- ret = devm_request_irq(&pdev->dev, irq, handler, 0,
- dev_name(&pdev->dev), ring_irq_priv);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+ threaded_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), ring_irq_priv);
if (ret) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
return ret;
@@ -755,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
u32 val, mask;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
@@ -769,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+ struct safexcel_register_offsets *offsets = &priv->offsets;
+
+ if (priv->version == EIP197) {
+ offsets->hia_aic = EIP197_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP197_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP197_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP197_PE_BASE;
+ } else {
+ offsets->hia_aic = EIP97_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP97_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP97_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP97_PE_BASE;
+ }
+}
+
static int safexcel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -781,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
+ priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+ safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
@@ -839,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+ safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
ret = irq;
@@ -847,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
- INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+ INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
@@ -856,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}
+ priv->ring[i].requests_left = 0;
+ priv->ring[i].busy = false;
+
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
@@ -903,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev)
}
static const struct of_device_id safexcel_of_match_table[] = {
- { .compatible = "inside-secure,safexcel-eip197" },
+ {
+ .compatible = "inside-secure,safexcel-eip97",
+ .data = (void *)EIP97,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197",
+ .data = (void *)EIP197,
+ },
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 304c5838c11a..4e219c21608b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -19,64 +19,103 @@
#define EIP197_HIA_VERSION_BE 0x35ca
/* Static configuration */
-#define EIP197_DEFAULT_RING_SIZE 64
+#define EIP197_DEFAULT_RING_SIZE 400
#define EIP197_MAX_TOKENS 5
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_COUNT 1
-#define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE
+#define EIP197_MAX_BATCH_SZ 64
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE 0x90000
+#define EIP197_HIA_AIC_G_BASE 0x90000
+#define EIP197_HIA_AIC_R_BASE 0x90800
+#define EIP197_HIA_AIC_xDR_BASE 0x80000
+#define EIP197_HIA_DFE_BASE 0x8c000
+#define EIP197_HIA_DFE_THR_BASE 0x8c040
+#define EIP197_HIA_DSE_BASE 0x8d000
+#define EIP197_HIA_DSE_THR_BASE 0x8d040
+#define EIP197_HIA_GEN_CFG_BASE 0xf0000
+#define EIP197_PE_BASE 0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE 0x0
+#define EIP97_HIA_AIC_G_BASE 0x0
+#define EIP97_HIA_AIC_R_BASE 0x0
+#define EIP97_HIA_AIC_xDR_BASE 0x0
+#define EIP97_HIA_DFE_BASE 0xf000
+#define EIP97_HIA_DFE_THR_BASE 0xf200
+#define EIP97_HIA_DSE_BASE 0xf400
+#define EIP97_HIA_DSE_THR_BASE 0xf600
+#define EIP97_HIA_GEN_CFG_BASE 0x10000
+#define EIP97_PE_BASE 0x10000
+
/* CDR/RDR register offsets */
-#define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000)
-#define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r))
-#define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800)
-#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0
-#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4
-#define EIP197_HIA_xDR_RING_SIZE 0x18
-#define EIP197_HIA_xDR_DESC_SIZE 0x1c
-#define EIP197_HIA_xDR_CFG 0x20
-#define EIP197_HIA_xDR_DMA_CFG 0x24
-#define EIP197_HIA_xDR_THRESH 0x28
-#define EIP197_HIA_xDR_PREP_COUNT 0x2c
-#define EIP197_HIA_xDR_PROC_COUNT 0x30
-#define EIP197_HIA_xDR_PREP_PNTR 0x34
-#define EIP197_HIA_xDR_PROC_PNTR 0x38
-#define EIP197_HIA_xDR_STAT 0x3c
+#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004
+#define EIP197_HIA_xDR_RING_SIZE 0x0018
+#define EIP197_HIA_xDR_DESC_SIZE 0x001c
+#define EIP197_HIA_xDR_CFG 0x0020
+#define EIP197_HIA_xDR_DMA_CFG 0x0024
+#define EIP197_HIA_xDR_THRESH 0x0028
+#define EIP197_HIA_xDR_PREP_COUNT 0x002c
+#define EIP197_HIA_xDR_PROC_COUNT 0x0030
+#define EIP197_HIA_xDR_PREP_PNTR 0x0034
+#define EIP197_HIA_xDR_PROC_PNTR 0x0038
+#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x8c000
-#define EIP197_HIA_DFE_THR_CTRL 0x8c040
-#define EIP197_HIA_DFE_THR_STAT 0x8c044
-#define EIP197_HIA_DSE_CFG 0x8d000
-#define EIP197_HIA_DSE_THR_CTRL 0x8d040
-#define EIP197_HIA_DSE_THR_STAT 0x8d044
-#define EIP197_HIA_RA_PE_CTRL 0x90010
-#define EIP197_HIA_RA_PE_STAT 0x90014
+#define EIP197_HIA_DFE_CFG 0x0000
+#define EIP197_HIA_DFE_THR_CTRL 0x0000
+#define EIP197_HIA_DFE_THR_STAT 0x0004
+#define EIP197_HIA_DSE_CFG 0x0000
+#define EIP197_HIA_DSE_THR_CTRL 0x0000
+#define EIP197_HIA_DSE_THR_STAT 0x0004
+#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
-#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808
-#define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810
-#define EIP197_HIA_AIC_G_ACK 0x9f810
-#define EIP197_HIA_MST_CTRL 0x9fff4
-#define EIP197_HIA_OPTIONS 0x9fff8
-#define EIP197_HIA_VERSION 0x9fffc
-#define EIP197_PE_IN_DBUF_THRES 0xa0000
-#define EIP197_PE_IN_TBUF_THRES 0xa0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0xa0800
-#define EIP197_PE_ICE_PUE_CTRL 0xa0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04
-#define EIP197_PE_ICE_FPP_CTRL 0xa0d80
-#define EIP197_PE_ICE_RAM_CTRL 0xa0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0xa1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c
-#define EIP197_PE_OUT_DBUF_THRES 0xa1c00
-#define EIP197_PE_OUT_TBUF_THRES 0xa1d00
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
+#define EIP197_HIA_AIC_G_ACK 0xf810
+#define EIP197_HIA_MST_CTRL 0xfff4
+#define EIP197_HIA_OPTIONS 0xfff8
+#define EIP197_HIA_VERSION 0xfffc
+#define EIP197_PE_IN_DBUF_THRES 0x0000
+#define EIP197_PE_IN_TBUF_THRES 0x0100
+#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
+#define EIP197_PE_ICE_PUE_CTRL 0x0c80
+#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
+#define EIP197_PE_ICE_FPP_CTRL 0x0d80
+#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
+#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
+#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
+#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
+#define EIP197_PE_OUT_DBUF_THRES 0x1c00
+#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_MST_CTRL 0xfff4
+
+/* EIP197-specific registers, no indirection */
#define EIP197_CLASSIFICATION_RAMS 0xe0000
#define EIP197_TRC_CTRL 0xf0800
#define EIP197_TRC_LASTRES 0xf0804
@@ -90,7 +129,6 @@
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
#define EIP197_CS_RAM_CTRL 0xf7ff0
-#define EIP197_MST_CTRL 0xffff4
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
@@ -117,6 +155,8 @@
#define EIP197_xDR_PREP_CLR_COUNT BIT(31)
/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
+#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -463,12 +503,33 @@ struct safexcel_work_data {
int ring;
};
+enum safexcel_eip_version {
+ EIP97,
+ EIP197,
+};
+
+struct safexcel_register_offsets {
+ u32 hia_aic;
+ u32 hia_aic_g;
+ u32 hia_aic_r;
+ u32 hia_aic_xdr;
+ u32 hia_dfe;
+ u32 hia_dfe_thr;
+ u32 hia_dse;
+ u32 hia_dse_thr;
+ u32 hia_gen_cfg;
+ u32 pe;
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
struct safexcel_config config;
+ enum safexcel_eip_version version;
+ struct safexcel_register_offsets offsets;
+
/* context DMA pool */
struct dma_pool *context_pool;
@@ -489,7 +550,20 @@ struct safexcel_crypto_priv {
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;
- bool need_dequeue;
+
+ /* Number of requests in the engine that needs the threshold
+ * interrupt to be set up.
+ */
+ int requests_left;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
} ring[EIP197_MAX_RINGS];
};
@@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req,
int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index fcc0a606d748..63a8768ed2ae 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -27,7 +27,6 @@ struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
- enum safexcel_cipher_direction direction;
u32 mode;
__le32 key[8];
@@ -35,6 +34,7 @@ struct safexcel_cipher_ctx {
};
struct safexcel_cipher_req {
+ enum safexcel_cipher_direction direction;
bool needs_inv;
};
@@ -69,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -78,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
}
- for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+ if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
@@ -95,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+ struct crypto_async_request *async,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->priv;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ctrl_size;
- if (ctx->direction == SAFEXCEL_ENCRYPT)
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
else
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
@@ -243,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
n_cdesc++;
if (n_cdesc == 1) {
- safexcel_context_control(ctx, cdesc);
+ safexcel_context_control(ctx, async, cdesc);
safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
}
@@ -353,8 +359,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
@@ -390,7 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(async, &ctx->base, priv,
+ ret = safexcel_invalidate_cache(async, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -406,9 +412,13 @@ static int safexcel_send(struct crypto_async_request *async,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
+ BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+
if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, request,
commands, results);
@@ -443,8 +453,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -467,11 +477,11 @@ static int safexcel_aes(struct skcipher_request *req,
int ret, ring;
sreq->needs_inv = false;
- ctx->direction = dir;
+ sreq->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv) {
+ if (priv->version == EIP197 && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
@@ -490,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req,
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -539,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
- ret = safexcel_cipher_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_cipher_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_ecb_aes = {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 0c5a5820b06e..122a2a58e98f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -14,7 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include "safexcel.h"
struct safexcel_ahash_ctx {
@@ -34,6 +33,8 @@ struct safexcel_ahash_req {
bool hmac;
bool needs_inv;
+ int nents;
+
u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
@@ -152,8 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
- dma_unmap_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+ if (sreq->nents) {
+ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ sreq->nents = 0;
+ }
safexcel_free_context(priv, async, sreq->state_sz);
@@ -178,7 +181,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -186,17 +189,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
else
cache_len = queued - areq->nbytes;
- /*
- * If this is not the last request and the queued data does not fit
- * into full blocks, cache it for the next send() call.
- */
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
- if (!req->last_req && extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra, areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ if (!req->last_req) {
+ /* If this is not the last request and the queued data does not
+ * fit into full blocks, cache it for the next send() call.
+ */
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (!extra)
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+ extra = queued - crypto_ahash_blocksize(ahash);
+
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
+
+ queued -= extra;
+ len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
+ }
}
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -234,15 +251,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
- DMA_TO_DEVICE);
- if (!nents) {
+ req->nents = dma_map_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
+ if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
- for_each_sg(areq->src, sg, nents, i) {
+ for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
@@ -382,8 +399,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
@@ -398,6 +415,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
+ BUG_ON(priv->version == EIP97 && req->needs_inv);
+
if (req->needs_inv) {
req->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async,
@@ -418,7 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -471,8 +490,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -485,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
return 0;
}
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
int queued, cache_len;
+ /* cache_len: everyting accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
cache_len = req->len - areq->nbytes - req->processed;
+ /* queued: everything accepted by the driver which will be handled by
+ * the next send() calls.
+ * tot sz handled by update() - tot sz handled by send()
+ */
queued = req->len - req->processed;
/*
@@ -505,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
return areq->nbytes;
}
- /* We could'nt cache all the data */
+ /* We couldn't cache all the data */
return -E2BIG;
}
@@ -518,10 +547,17 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
req->needs_inv = false;
- if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
- ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
-
if (ctx->base.ctxr) {
+ if (priv->version == EIP197 &&
+ !ctx->base.needs_inv && req->processed &&
+ ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ /* We're still setting needs_inv here, even though it is
+ * cleared right away, because the needs_inv flag can be
+ * set in other functions and we want to keep the same
+ * logic.
+ */
+ ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
if (ctx->base.needs_inv) {
ctx->base.needs_inv = false;
req->needs_inv = true;
@@ -541,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -625,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->processed = req->processed;
memcpy(export->state, req->state, req->state_sz);
- memset(export->cache, 0, crypto_ahash_blocksize(ahash));
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
return 0;
@@ -707,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- ret = safexcel_ahash_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_ahash_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_sha1 = {
@@ -848,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS)
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
return ret;
wait_for_completion_interruptible(&result.completion);
@@ -913,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
@@ -920,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
- ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+ ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8705b28eb02c..717a26607bdb 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,12 +260,11 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_alloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_zalloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
- memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 3a0c40081ffb..aca2373fa1de 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -15,6 +15,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -410,8 +411,11 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
- engine->sram_dma = phys_to_dma(cesa->dev,
- (phys_addr_t)res->start);
+ engine->sram_dma = dma_map_resource(cesa->dev, res->start,
+ cesa->sram_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(cesa->dev, engine->sram_dma))
+ return -ENOMEM;
return 0;
}
@@ -421,11 +425,12 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- if (!engine->pool)
- return;
-
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
- cesa->sram_size);
+ if (engine->pool)
+ gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ cesa->sram_size);
+ else
+ dma_unmap_resource(cesa->dev, engine->sram_dma,
+ cesa->sram_size, DMA_BIDIRECTIONAL, 0);
}
static int mv_cesa_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index f2246a5abcf6..1e87637c412d 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -743,8 +743,8 @@ static int nx842_open_percpu_txwins(void)
}
if (!per_cpu(cpu_txwin, i)) {
- /* shoudn't happen, Each chip will have NX engine */
- pr_err("NX engine is not availavle for CPU %d\n", i);
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
return -EINVAL;
}
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 5a6dc53b2b9d..4ef52c9d72fc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1618,7 +1618,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table);
static int spacc_probe(struct platform_device *pdev)
{
- int i, err, ret = -EINVAL;
+ int i, err, ret;
struct resource *mem, *irq;
struct device_node *np = pdev->dev.of_node;
struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
@@ -1679,22 +1679,18 @@ static int spacc_probe(struct platform_device *pdev)
engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
return PTR_ERR(engine->clk);
}
if (clk_prepare_enable(engine->clk)) {
dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- clk_put(engine->clk);
- return -EIO;
+ ret = -EIO;
+ goto err_clk_put;
}
- err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (err) {
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
- return err;
- }
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
/*
@@ -1725,6 +1721,7 @@ static int spacc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, engine);
+ ret = -EINVAL;
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
@@ -1759,6 +1756,16 @@ static int spacc_probe(struct platform_device *pdev)
engine->aeads[i].alg.base.cra_name);
}
+ if (!ret)
+ return 0;
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+err_clk_disable:
+ clk_disable_unprepare(engine->clk);
+err_clk_put:
+ clk_put(engine->clk);
+
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 8c4fd255a601..ff149e176f64 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
#define CSR_RETRY_TIMES 500
static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned int csr,
- unsigned int *value)
+ unsigned char ae, unsigned int csr)
{
unsigned int iterations = CSR_RETRY_TIMES;
+ int value;
do {
- *value = GET_AE_CSR(handle, ae, csr);
+ value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
- return 0;
+ return value;
} while (iterations--);
pr_err("QAT: Read CSR timeout\n");
- return -EFAULT;
+ return 0;
}
static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+ *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
elapsed_cycles = cur_cnt - base_cnt;
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
}
/* Sets the accelaration engine context mode to either four or eight */
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
-static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- unsigned int ae_csr, unsigned int *csr_val)
+ unsigned int ae_csr)
{
- unsigned int cur_ctx;
+ unsigned int cur_ctx, csr_val;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+ return csr_val;
}
static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
@@ -358,7 +360,7 @@ static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -374,7 +376,7 @@ static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -392,13 +394,11 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
int times = MAX_RETRY_TIMES;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
do {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
} while (times-- && (cur_cnt == base_cnt));
@@ -416,8 +416,8 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
{
unsigned int enable = 0, active = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+ enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
@@ -540,7 +540,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK &
(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
@@ -583,7 +583,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_addr;
unsigned int i;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -604,7 +604,7 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK;
ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
ctx |= (ctx_mask << CE_ENABLE_BITPOS);
@@ -636,10 +636,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr_val &= IGNORE_W1C_MASK;
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
@@ -648,7 +648,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, ctx_mask,
@@ -760,7 +760,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
unsigned int csr_val = 0;
- qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
}
@@ -826,16 +826,16 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+ misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
misc_control & 0xfffffffb);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
for (i = 0; i < words_num; i++) {
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
uaddr++;
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+ uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+ uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
uword[i] = uwrd_hi;
uword[i] = (uword[i] << 0x20) | uwrd_lo;
}
@@ -849,7 +849,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
{
unsigned int i, ustore_addr;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -890,26 +890,27 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
/* save current context */
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
- &ind_lm_addr_byte0);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
- &ind_lm_addr_byte1);
+ ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+ ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+ ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX);
+ ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX);
if (inst_num <= MAX_EXEC_INST)
qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
- qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
- qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
- &ind_cnt_sig);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
- qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+ savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ FUTURE_COUNT_SIGNAL_INDIRECT);
+ ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_SIG_EVENTS_INDIRECT);
+ act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
/* execute micro codes */
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
@@ -927,8 +928,8 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
if (endpc) {
unsigned int ctx_status;
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
- &ctx_status);
+ ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_STS_INDIRECT);
*endpc = ctx_status & handle->hal_handle->upc_mask;
}
/* retore to saved context */
@@ -938,7 +939,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & savpc);
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
@@ -986,16 +987,16 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
ctx & ACS_ACNO);
qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr = UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
insts = qat_hal_set_uword_ecc(insts);
@@ -1011,7 +1012,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
* the instruction should have been executed
* prior to clearing the ECS in putUwords
*/
- qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+ *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
if (ctx != (savctx & ACS_ACNO))
@@ -1188,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned short mask;
unsigned short dr_offset = 0x10;
- status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1238,7 +1239,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
const unsigned short gprnum = 0, dly = num_inst * 0x5;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1282,7 +1283,7 @@ static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
unsigned int ctx_enables;
int stat = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
@@ -1299,7 +1300,7 @@ static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
{
unsigned int ctx_enables;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (ctx_enables & CE_INUSE_CONTEXTS) {
/* 4-ctx mode */
*relreg = absreg_num & 0x1F;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 142c6020cec7..188f44b7eb27 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,17 +1,13 @@
-/*
- * Cryptographic API.
- *
- * Support for Samsung S5PV210 and Exynos HW acceleration.
- *
- * Copyright (C) 2011 NetUP Inc. All rights reserved.
- * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Hash part based on omap-sham.c driver.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Cryptographic API.
+//
+// Support for Samsung S5PV210 and Exynos HW acceleration.
+//
+// Copyright (C) 2011 NetUP Inc. All rights reserved.
+// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
+//
+// Hash part based on omap-sham.c driver.
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -1461,7 +1457,7 @@ static void s5p_hash_tasklet_cb(unsigned long data)
&dd->hash_flags)) {
/* hash or semi-hash ready */
clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
- goto finish;
+ goto finish;
}
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 602332e02729..63aa78c0b12b 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,4 +1,4 @@
-config CRC_DEV_STM32
+config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
@@ -6,7 +6,7 @@ config CRC_DEV_STM32
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
-config HASH_DEV_STM32
+config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32
depends on HAS_DMA
@@ -18,3 +18,12 @@ config HASH_DEV_STM32
help
This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_CRYP
+ tristate "Support for STM32 cryp accelerators"
+ depends on ARCH_STM32
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73cd56cad0cc..53d1bb94b221 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
-obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
new file mode 100644
index 000000000000..4a06a7a665ee
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+
+#define DRIVER_NAME "stm32-cryp"
+
+/* Bit [0] encrypt / decrypt */
+#define FLG_ENCRYPT BIT(0)
+/* Bit [8..1] algo & operation mode */
+#define FLG_AES BIT(1)
+#define FLG_DES BIT(2)
+#define FLG_TDES BIT(3)
+#define FLG_ECB BIT(4)
+#define FLG_CBC BIT(5)
+#define FLG_CTR BIT(6)
+/* Mode mask = bits [15..0] */
+#define FLG_MODE_MASK GENMASK(15, 0)
+
+/* Registers */
+#define CRYP_CR 0x00000000
+#define CRYP_SR 0x00000004
+#define CRYP_DIN 0x00000008
+#define CRYP_DOUT 0x0000000C
+#define CRYP_DMACR 0x00000010
+#define CRYP_IMSCR 0x00000014
+#define CRYP_RISR 0x00000018
+#define CRYP_MISR 0x0000001C
+#define CRYP_K0LR 0x00000020
+#define CRYP_K0RR 0x00000024
+#define CRYP_K1LR 0x00000028
+#define CRYP_K1RR 0x0000002C
+#define CRYP_K2LR 0x00000030
+#define CRYP_K2RR 0x00000034
+#define CRYP_K3LR 0x00000038
+#define CRYP_K3RR 0x0000003C
+#define CRYP_IV0LR 0x00000040
+#define CRYP_IV0RR 0x00000044
+#define CRYP_IV1LR 0x00000048
+#define CRYP_IV1RR 0x0000004C
+
+/* Registers values */
+#define CR_DEC_NOT_ENC 0x00000004
+#define CR_TDES_ECB 0x00000000
+#define CR_TDES_CBC 0x00000008
+#define CR_DES_ECB 0x00000010
+#define CR_DES_CBC 0x00000018
+#define CR_AES_ECB 0x00000020
+#define CR_AES_CBC 0x00000028
+#define CR_AES_CTR 0x00000030
+#define CR_AES_KP 0x00000038
+#define CR_AES_UNKNOWN 0xFFFFFFFF
+#define CR_ALGO_MASK 0x00080038
+#define CR_DATA32 0x00000000
+#define CR_DATA16 0x00000040
+#define CR_DATA8 0x00000080
+#define CR_DATA1 0x000000C0
+#define CR_KEY128 0x00000000
+#define CR_KEY192 0x00000100
+#define CR_KEY256 0x00000200
+#define CR_FFLUSH 0x00004000
+#define CR_CRYPEN 0x00008000
+
+#define SR_BUSY 0x00000010
+#define SR_OFNE 0x00000004
+
+#define IMSCR_IN BIT(0)
+#define IMSCR_OUT BIT(1)
+
+#define MISR_IN BIT(0)
+#define MISR_OUT BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+
+struct stm32_cryp_ctx {
+ struct stm32_cryp *cryp;
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct stm32_cryp_reqctx {
+ unsigned long mode;
+};
+
+struct stm32_cryp {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long flags;
+ u32 irq_status;
+ struct stm32_cryp_ctx *ctx;
+
+ struct crypto_engine *engine;
+
+ struct mutex lock; /* protects req */
+ struct ablkcipher_request *req;
+
+ size_t hw_blocksize;
+
+ size_t total_in;
+ size_t total_in_save;
+ size_t total_out;
+ size_t total_out_save;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct scatterlist *out_sg_save;
+
+ struct scatterlist in_sgl;
+ struct scatterlist out_sgl;
+ bool sgs_copied;
+
+ int in_sg_len;
+ int out_sg_len;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+
+ u32 last_ctr[4];
+};
+
+struct stm32_cryp_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct stm32_cryp_list cryp_list = {
+ .dev_list = LIST_HEAD_INIT(cryp_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock),
+};
+
+static inline bool is_aes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_AES;
+}
+
+static inline bool is_des(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_DES;
+}
+
+static inline bool is_tdes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_TDES;
+}
+
+static inline bool is_ecb(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ECB;
+}
+
+static inline bool is_cbc(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CBC;
+}
+
+static inline bool is_ctr(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CTR;
+}
+
+static inline bool is_encrypt(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static inline bool is_decrypt(struct stm32_cryp *cryp)
+{
+ return !is_encrypt(cryp);
+}
+
+static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst)
+{
+ return readl_relaxed(cryp->regs + ofst);
+}
+
+static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val)
+{
+ writel_relaxed(val, cryp->regs + ofst);
+}
+
+static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ !(status & SR_BUSY), 10, 100000);
+}
+
+static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
+{
+ struct stm32_cryp *tmp, *cryp = NULL;
+
+ spin_lock_bh(&cryp_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &cryp_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&cryp_list.lock);
+
+ return cryp;
+}
+
+static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
+ size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
+{
+ int ret;
+
+ ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
+ cryp->hw_blocksize);
+ if (ret)
+ return ret;
+
+ ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
+ cryp->hw_blocksize);
+
+ return ret;
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
+{
+ void *buf_in, *buf_out;
+ int pages, total_in, total_out;
+
+ if (!stm32_cryp_check_io_aligned(cryp)) {
+ cryp->sgs_copied = 0;
+ return 0;
+ }
+
+ total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
+ pages = total_in ? get_order(total_in) : 1;
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
+ pages = total_out ? get_order(total_out) : 1;
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in || !buf_out) {
+ dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
+ cryp->sgs_copied = 0;
+ return -EFAULT;
+ }
+
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
+
+ sg_init_one(&cryp->in_sgl, buf_in, total_in);
+ cryp->in_sg = &cryp->in_sgl;
+ cryp->in_sg_len = 1;
+
+ sg_init_one(&cryp->out_sgl, buf_out, total_out);
+ cryp->out_sg_save = cryp->out_sg;
+ cryp->out_sg = &cryp->out_sgl;
+ cryp->out_sg_len = 1;
+
+ cryp->sgs_copied = 1;
+
+ return 0;
+}
+
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+{
+ if (!iv)
+ return;
+
+ stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+
+ if (is_aes(cryp)) {
+ stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ }
+}
+
+static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
+{
+ unsigned int i;
+ int r_id;
+
+ if (is_des(c)) {
+ stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ } else {
+ r_id = CRYP_K3RR;
+ for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+ stm32_cryp_write(c, r_id,
+ cpu_to_be32(c->ctx->key[i - 1]));
+ }
+}
+
+static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
+{
+ if (is_aes(cryp) && is_ecb(cryp))
+ return CR_AES_ECB;
+
+ if (is_aes(cryp) && is_cbc(cryp))
+ return CR_AES_CBC;
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ return CR_AES_CTR;
+
+ if (is_des(cryp) && is_ecb(cryp))
+ return CR_DES_ECB;
+
+ if (is_des(cryp) && is_cbc(cryp))
+ return CR_DES_CBC;
+
+ if (is_tdes(cryp) && is_ecb(cryp))
+ return CR_TDES_ECB;
+
+ if (is_tdes(cryp) && is_cbc(cryp))
+ return CR_TDES_CBC;
+
+ dev_err(cryp->dev, "Unknown mode\n");
+ return CR_AES_UNKNOWN;
+}
+
+static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+{
+ int ret;
+ u32 cfg, hw_mode;
+
+ /* Disable interrupt */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ /* Set key */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Set configuration */
+ cfg = CR_DATA8 | CR_FFLUSH;
+
+ switch (cryp->ctx->keylen) {
+ case AES_KEYSIZE_128:
+ cfg |= CR_KEY128;
+ break;
+
+ case AES_KEYSIZE_192:
+ cfg |= CR_KEY192;
+ break;
+
+ default:
+ case AES_KEYSIZE_256:
+ cfg |= CR_KEY256;
+ break;
+ }
+
+ hw_mode = stm32_cryp_get_hw_mode(cryp);
+ if (hw_mode == CR_AES_UNKNOWN)
+ return -EINVAL;
+
+ /* AES ECB/CBC decrypt: run key preparation first */
+ if (is_decrypt(cryp) &&
+ ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_busy(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (key preparation)\n");
+ return ret;
+ }
+ }
+
+ cfg |= hw_mode;
+
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
+
+ /* Apply config and flush (valid when CRYPEN = 0) */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ switch (hw_mode) {
+ case CR_DES_CBC:
+ case CR_TDES_CBC:
+ case CR_AES_CBC:
+ case CR_AES_CTR:
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Enable now */
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return 0;
+}
+
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+{
+ int err = 0;
+
+ if (cryp->sgs_copied) {
+ void *buf_in, *buf_out;
+ int pages, len;
+
+ buf_in = sg_virt(&cryp->in_sgl);
+ buf_out = sg_virt(&cryp->out_sgl);
+
+ sg_copy_buf(buf_out, cryp->out_sg_save, 0,
+ cryp->total_out_save, 1);
+
+ len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_in, pages);
+
+ len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_out, pages);
+ }
+
+ crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
+ cryp->req = NULL;
+
+ memset(cryp->ctx->key, 0, cryp->ctx->keylen);
+
+ mutex_unlock(&cryp->lock);
+}
+
+static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
+{
+ /* Enable interrupt and let the IRQ handler do everything */
+ stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+
+ return 0;
+}
+
+static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ return 0;
+}
+
+static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != (3 * DES_KEY_SIZE))
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
+}
+
+static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
+}
+
+static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
+}
+
+static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
+}
+
+static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
+}
+
+static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
+}
+
+static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
+}
+
+static int stm32_cryp_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx;
+ struct stm32_cryp *cryp;
+ struct stm32_cryp_reqctx *rctx;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ mutex_lock(&cryp->lock);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode &= FLG_MODE_MASK;
+
+ ctx->cryp = cryp;
+
+ cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
+ cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
+ cryp->ctx = ctx;
+
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+
+ cryp->total_in_save = cryp->total_in;
+ cryp->total_out_save = cryp->total_out;
+
+ cryp->in_sg = req->src;
+ cryp->out_sg = req->dst;
+ cryp->out_sg_save = cryp->out_sg;
+
+ cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
+ if (cryp->in_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get in_sg_len\n");
+ ret = cryp->in_sg_len;
+ goto out;
+ }
+
+ cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
+ if (cryp->out_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get out_sg_len\n");
+ ret = cryp->out_sg_len;
+ goto out;
+ }
+
+ ret = stm32_cryp_copy_sgs(cryp);
+ if (ret)
+ goto out;
+
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+
+ ret = stm32_cryp_hw_init(cryp);
+out:
+ if (ret)
+ mutex_unlock(&cryp->lock);
+
+ return ret;
+}
+
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return stm32_cryp_prepare_req(engine, req);
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
+static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->out_walk, n);
+
+ if (unlikely(cryp->out_sg->length == _walked_out)) {
+ cryp->out_sg = sg_next(cryp->out_sg);
+ if (cryp->out_sg) {
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ return (sg_virt(cryp->out_sg) + _walked_out);
+ }
+ }
+
+ return (u32 *)((u8 *)dst + n);
+}
+
+static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->in_walk, n);
+
+ if (unlikely(cryp->in_sg->length == _walked_in)) {
+ cryp->in_sg = sg_next(cryp->in_sg);
+ if (cryp->in_sg) {
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ return (sg_virt(cryp->in_sg) + _walked_in);
+ }
+ }
+
+ return (u32 *)((u8 *)src + n);
+}
+
+static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
+{
+ u32 cr;
+
+ if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
+ cryp->last_ctr[3] = 0;
+ cryp->last_ctr[2]++;
+ if (!cryp->last_ctr[2]) {
+ cryp->last_ctr[1]++;
+ if (!cryp->last_ctr[1])
+ cryp->last_ctr[0]++;
+ }
+
+ cr = stm32_cryp_read(cryp, CRYP_CR);
+ stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+
+ stm32_cryp_write(cryp, CRYP_CR, cr);
+ }
+
+ cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
+ cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
+ cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
+ cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+}
+
+static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 d32, *dst;
+ u8 *d8;
+
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_out >= sizeof(u32))) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+
+ return !cryp->total_out || !cryp->total_in;
+}
+
+static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_in >= sizeof(u32))) {
+ /* Write a full u32 */
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= sizeof(u32);
+ } else if (!cryp->total_in) {
+ /* Write padding data */
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ } else {
+ /* Write less than an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (j = 0; j < cryp->total_in; j++) {
+ d8[j] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ cryp->total_in = 0;
+ }
+ }
+}
+
+static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
+{
+ if (unlikely(!cryp->total_in)) {
+ dev_warn(cryp->dev, "No more data to process\n");
+ return;
+ }
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ stm32_cryp_check_ctr_counter(cryp);
+
+ stm32_cryp_irq_write_block(cryp);
+}
+
+static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ if (cryp->irq_status & MISR_OUT)
+ /* Output FIFO IRQ: read data */
+ if (unlikely(stm32_cryp_irq_read_data(cryp))) {
+ /* All bytes processed, finish */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp);
+ return IRQ_HANDLED;
+ }
+
+ if (cryp->irq_status & MISR_IN) {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cryp_irq(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "stm32-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "stm32-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "stm32-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "stm32-ecb-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "stm32-cbc-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "stm32-ecb-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "stm32-cbc-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ }
+},
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "st,stm32f756-cryp", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static int stm32_cryp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_cryp *cryp;
+ struct resource *res;
+ struct reset_control *rst;
+ int irq, ret;
+
+ cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ cryp->dev = dev;
+
+ mutex_init(&cryp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cryp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cryp->regs))
+ return PTR_ERR(cryp->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
+ stm32_cryp_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), cryp);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ cryp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cryp->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(cryp->clk);
+ }
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ rst = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ platform_set_drvdata(pdev, cryp);
+
+ spin_lock(&cryp_list.lock);
+ list_add(&cryp->list, &cryp_list.dev_list);
+ spin_unlock(&cryp_list.lock);
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(dev, 1);
+ if (!cryp->engine) {
+ dev_err(dev, "Could not init crypto engine\n");
+ ret = -ENOMEM;
+ goto err_engine1;
+ }
+
+ cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
+ cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto err_engine2;
+ }
+
+ ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ if (ret) {
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs;
+ }
+
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+
+err_algs:
+err_engine2:
+ crypto_engine_exit(cryp->engine);
+err_engine1:
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return ret;
+}
+
+static int stm32_cryp_remove(struct platform_device *pdev)
+{
+ struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+
+ if (!cryp)
+ return -ENODEV;
+
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+ crypto_engine_exit(cryp->engine);
+
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_cryp_driver = {
+ .probe = stm32_cryp_probe,
+ .remove = stm32_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = stm32_dt_ids,
+ },
+};
+
+module_platform_driver(stm32_cryp_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 090582baecfe..8f09b8430893 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
@@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32c",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d8424ed16c33..351f4bf37ca9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -58,6 +58,13 @@
extern struct list_head adapter_list;
extern struct mutex uld_mutex;
+/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -563,6 +570,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
};
struct rx_sw_desc;
@@ -967,6 +975,11 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
@@ -1699,4 +1712,14 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
void free_txq(struct adapter *adap, struct sge_txq *q);
+void cxgb4_reclaim_completed_tx(struct adapter *adap,
+ struct sge_txq *q, bool unmap);
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr);
+void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos);
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr);
+void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 917663b35603..cf471831ee71 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3096,6 +3096,8 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6f900ffe25cc..05a4abfd5ec1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4096,7 +4096,7 @@ static int adap_init0(struct adapter *adap)
} else {
adap->vres.ncrypto_fc = val[0];
}
- adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
+ adap->params.crypto = ntohs(caps_cmd.cryptocaps);
adap->num_uld += 1;
}
#undef FW_PARAM_PFVF
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 71a315bc1409..6b5fea4532f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -637,6 +637,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->crypto = adap->params.crypto;
lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 08e709ab6dd4..1d37672902da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -297,6 +297,7 @@ struct chcr_stats_debug {
atomic_t complete;
atomic_t error;
atomic_t fallback;
+ atomic_t ipsec_cnt;
};
#define OCQ_WIN_OFFSET(pdev, vres) \
@@ -322,6 +323,7 @@ struct cxgb4_lld_info {
unsigned char wr_cred; /* WR 16-byte credits */
unsigned char adapter_type; /* type of adapter */
unsigned char fw_api_ver; /* FW API version */
+ unsigned char crypto; /* crypto support */
unsigned int fw_vers; /* FW version */
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned int cclk_ps; /* Core clock period in psec */
@@ -370,6 +372,7 @@ struct cxgb4_uld_info {
struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
+ int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 922f2f937789..6c7b0ac0b48b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -41,6 +41,7 @@
#include <linux/jiffies.h>
#include <linux/prefetch.h>
#include <linux/export.h>
+#include <net/xfrm.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/busy_poll.h>
@@ -53,6 +54,7 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
+#include "cxgb4_uld.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -110,14 +112,6 @@
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/*
- * Suspend an Ethernet Tx queue with fewer available descriptors than this.
- * This is the same as calc_tx_descs() for a TSO packet with
- * nr_frags == MAX_SKB_FRAGS.
- */
-#define ETHTXQ_STOP_THRES \
- (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
-
-/*
* Suspension threshold for non-Ethernet Tx queues. We require enough room
* for a full sized WR.
*/
@@ -134,11 +128,6 @@
*/
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
struct rx_sw_desc { /* SW state per Rx descriptor */
struct page *page;
dma_addr_t dma_addr;
@@ -248,8 +237,8 @@ static inline bool fl_starving(const struct adapter *adapter,
return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
-static int map_skb(struct device *dev, const struct sk_buff *skb,
- dma_addr_t *addr)
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
@@ -277,6 +266,7 @@ unwind:
out_err:
return -ENOMEM;
}
+EXPORT_SYMBOL(cxgb4_map_skb);
#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
@@ -411,7 +401,7 @@ static inline int reclaimable(const struct sge_txq *q)
}
/**
- * reclaim_completed_tx - reclaims completed Tx descriptors
+ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
* @unmap: whether the buffers should be unmapped for DMA
@@ -420,7 +410,7 @@ static inline int reclaimable(const struct sge_txq *q)
* and frees the associated buffers if possible. Called with the Tx
* queue locked.
*/
-static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
bool unmap)
{
int avail = reclaimable(q);
@@ -437,6 +427,7 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
q->in_use -= avail;
}
}
+EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
static inline int get_buf_size(struct adapter *adapter,
const struct rx_sw_desc *d)
@@ -833,7 +824,7 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
}
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
* @sgl: starting location for writing the SGL
@@ -849,9 +840,9 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
* right after the end of the SGL but does not account for any potential
* wrap around, i.e., @end > @sgl.
*/
-static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
- struct ulptx_sgl *sgl, u64 *end, unsigned int start,
- const dma_addr_t *addr)
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
{
unsigned int i, len;
struct ulptx_sge_pair *to;
@@ -903,6 +894,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
+EXPORT_SYMBOL(cxgb4_write_sgl);
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
@@ -921,14 +913,14 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
}
/**
- * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
* Ring the doorbel for a Tx queue.
*/
-static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
/* Make sure that all writes to the TX Descriptors are committed
* before we tell the hardware about them.
@@ -995,9 +987,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb();
}
}
+EXPORT_SYMBOL(cxgb4_ring_tx_db);
/**
- * inline_tx_skb - inline a packet's data into Tx descriptors
+ * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
* @skb: the packet
* @q: the Tx queue where the packet will be inlined
* @pos: starting position in the Tx queue where to inline the packet
@@ -1007,8 +1000,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* Most of the complexity of this operation is dealing with wrap arounds
* in the middle of the packet we want to inline.
*/
-static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
- void *pos)
+void cxgb4_inline_tx_skb(const struct sk_buff *skb,
+ const struct sge_txq *q, void *pos)
{
u64 *p;
int left = (void *)q->stat - pos;
@@ -1030,6 +1023,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
if ((uintptr_t)p & 8)
*p = 0;
}
+EXPORT_SYMBOL(cxgb4_inline_tx_skb);
static void *inline_tx_skb_header(const struct sk_buff *skb,
const struct sge_txq *q, void *pos,
@@ -1199,6 +1193,12 @@ out_free: dev_kfree_skb_any(skb);
pi = netdev_priv(dev);
adap = pi->adapter;
+ ssi = skb_shinfo(skb);
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (xfrm_offload(skb) && !ssi->gso_size)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_IPSEC_INLINE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
@@ -1215,7 +1215,7 @@ out_free: dev_kfree_skb_any(skb);
}
skb_tx_timestamp(skb);
- reclaim_completed_tx(adap, &q->q, true);
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
@@ -1245,7 +1245,7 @@ out_free: dev_kfree_skb_any(skb);
immediate = true;
if (!immediate &&
- unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1264,7 +1264,6 @@ out_free: dev_kfree_skb_any(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
- ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
@@ -1341,13 +1340,13 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (immediate) {
- inline_tx_skb(skb, &q->q, cpl + 1);
+ cxgb4_inline_tx_skb(skb, &q->q, cpl + 1);
dev_consume_skb_any(skb);
} else {
int last_desc;
- write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
- addr);
+ cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1),
+ end, 0, addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
@@ -1359,7 +1358,7 @@ out_free: dev_kfree_skb_any(skb);
txq_advance(&q->q, ndesc);
- ring_tx_db(adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
@@ -1369,9 +1368,9 @@ out_free: dev_kfree_skb_any(skb);
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
*
- * This is a variant of reclaim_completed_tx() that is used for Tx queues
- * that send only immediate data (presently just the control queues) and
- * thus do not have any sk_buffs to release.
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
@@ -1446,13 +1445,13 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
}
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
ctrlq_check_stop(q, wr);
- ring_tx_db(q->adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
spin_unlock(&q->sendq.lock);
kfree_skb(skb);
@@ -1487,7 +1486,7 @@ static void restart_ctrlq(unsigned long data)
txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
@@ -1500,14 +1499,15 @@ static void restart_ctrlq(unsigned long data)
}
}
if (written > 16) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
spin_lock(&q->sendq.lock);
}
q->full = 0;
-ringdb: if (written)
- ring_tx_db(q->adap, &q->q, written);
+ringdb:
+ if (written)
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
spin_unlock(&q->sendq.lock);
}
@@ -1650,7 +1650,7 @@ static void service_ofldq(struct sge_uld_txq *q)
*/
spin_unlock(&q->sendq.lock);
- reclaim_completed_tx(q->adap, &q->q, false);
+ cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
flits = skb->priority; /* previously saved */
ndesc = flits_to_desc(flits);
@@ -1661,9 +1661,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (u64 *)&q->q.desc[q->q.pidx];
if (is_ofld_imm(skb))
- inline_tx_skb(skb, &q->q, pos);
- else if (map_skb(q->adap->pdev_dev, skb,
- (dma_addr_t *)skb->head)) {
+ cxgb4_inline_tx_skb(skb, &q->q, pos);
+ else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
txq_stop_maperr(q);
spin_lock(&q->sendq.lock);
break;
@@ -1694,9 +1694,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (void *)txq->desc;
}
- write_sgl(skb, &q->q, (void *)pos,
- end, hdr_len,
- (dma_addr_t *)skb->head);
+ cxgb4_write_sgl(skb, &q->q, (void *)pos,
+ end, hdr_len,
+ (dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0];
skb->destructor = deferred_unmap_destructor;
@@ -1710,7 +1710,7 @@ static void service_ofldq(struct sge_uld_txq *q)
txq_advance(&q->q, ndesc);
written += ndesc;
if (unlikely(written > 32)) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
@@ -1725,7 +1725,7 @@ static void service_ofldq(struct sge_uld_txq *q)
kfree_skb(skb);
}
if (likely(written))
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
/*Indicate that no thread is processing the Pending Send Queue
* currently.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57eb4ad3485d..be3658301832 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -513,6 +513,13 @@ struct fw_ulptx_wr {
u64 cookie;
};
+#define FW_ULPTX_WR_DATA_S 28
+#define FW_ULPTX_WR_DATA_M 0x1
+#define FW_ULPTX_WR_DATA_V(x) ((x) << FW_ULPTX_WR_DATA_S)
+#define FW_ULPTX_WR_DATA_G(x) \
+ (((x) >> FW_ULPTX_WR_DATA_S) & FW_ULPTX_WR_DATA_M)
+#define FW_ULPTX_WR_DATA_F FW_ULPTX_WR_DATA_V(1U)
+
struct fw_tp_wr {
__be32 op_to_immdlen;
__be32 flowid_len16;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index 2e5d311d2438..db81ed527452 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
@@ -120,6 +120,7 @@ static struct shash_alg alg = {
.cra_name = "adler32",
.cra_driver_name = "adler32-zlib",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,