summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-10 13:04:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-10 13:04:25 -0700
commit3604a7f568d3f67be8c13736201411ee83b210a1 (patch)
tree6eeed1b02493c7dc481318317215dbd2d72567f9
parentd4013bc4d49f6da8178a340348369bb9920225c9 (diff)
parentb411b1a0c8bddd470fc8c3457629ac25a168cba0 (diff)
downloadlinux-stable-3604a7f568d3f67be8c13736201411ee83b210a1.tar.gz
linux-stable-3604a7f568d3f67be8c13736201411ee83b210a1.tar.bz2
linux-stable-3604a7f568d3f67be8c13736201411ee83b210a1.zip
Merge tag 'v6.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - Feed untrusted RNGs into /dev/random - Allow HWRNG sleeping to be more interruptible - Create lib/utils module - Setting private keys no longer required for akcipher - Remove tcrypt mode=1000 - Reorganised Kconfig entries Algorithms: - Load x86/sha512 based on CPU features - Add AES-NI/AVX/x86_64/GFNI assembler implementation of aria cipher Drivers: - Add HACE crypto driver aspeed" * tag 'v6.1-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (124 commits) crypto: aspeed - Remove redundant dev_err call crypto: scatterwalk - Remove unused inline function scatterwalk_aligned() crypto: aead - Remove unused inline functions from aead crypto: bcm - Simplify obtain the name for cipher crypto: marvell/octeontx - use sysfs_emit() to instead of scnprintf() hwrng: core - start hwrng kthread also for untrusted sources crypto: zip - remove the unneeded result variable crypto: qat - add limit to linked list parsing crypto: octeontx2 - Remove the unneeded result variable crypto: ccp - Remove the unneeded result variable crypto: aspeed - Fix check for platform_get_irq() errors crypto: virtio - fix memory-leak crypto: cavium - prevent integer overflow loading firmware crypto: marvell/octeontx - prevent integer overflows crypto: aspeed - fix build error when only CRYPTO_DEV_ASPEED is enabled crypto: hisilicon/qm - fix the qos value initialization crypto: sun4i-ss - use DEFINE_SHOW_ATTRIBUTE to simplify sun4i_ss_debugfs crypto: tcrypt - add async speed test for aria cipher crypto: aria-avx - add AES-NI/AVX/x86_64/GFNI assembler implementation of aria cipher crypto: aria - prepare generic module for optimized implementations ...
-rw-r--r--Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml53
-rw-r--r--Documentation/virt/kvm/x86/amd-memory-encryption.rst5
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi8
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi8
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/milbeaut_m10v_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/crypto/Kconfig238
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/crypto/Kconfig279
-rw-r--r--arch/mips/crypto/Kconfig74
-rw-r--r--arch/powerpc/crypto/Kconfig97
-rw-r--r--arch/s390/crypto/Kconfig135
-rw-r--r--arch/sparc/crypto/Kconfig90
-rw-r--r--arch/x86/crypto/Kconfig484
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/aria-aesni-avx-asm_64.S1303
-rw-r--r--arch/x86/crypto/aria-avx.h16
-rw-r--r--arch/x86/crypto/aria_aesni_avx_glue.c213
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c10
-rw-r--r--crypto/Kconfig2159
-rw-r--r--crypto/Makefile2
-rw-r--r--crypto/akcipher.c8
-rw-r--r--crypto/algapi.c71
-rw-r--r--crypto/api.c4
-rw-r--r--crypto/aria_generic.c (renamed from crypto/aria.c)39
-rw-r--r--crypto/async_tx/raid6test.c4
-rw-r--r--crypto/curve25519-generic.c4
-rw-r--r--crypto/dh.c4
-rw-r--r--crypto/drbg.c12
-rw-r--r--crypto/ecdh.c4
-rw-r--r--crypto/ecdsa.c4
-rw-r--r--crypto/essiv.c2
-rw-r--r--crypto/rsa.c4
-rw-r--r--crypto/sm2.c4
-rw-r--r--crypto/tcrypt.c53
-rw-r--r--crypto/testmgr.c38
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c4
-rw-r--r--drivers/char/hw_random/core.c55
-rw-r--r--drivers/char/hw_random/imx-rngc.c51
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/aspeed/Kconfig48
-rw-r--r--drivers/crypto/aspeed/Makefile7
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c1133
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c1391
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c284
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h298
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c6
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/cipher.h2
-rw-r--r--drivers/crypto/cavium/cpt/cpt_hw_types.h2
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c30
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/ccp/sev-dev.c78
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c250
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c216
-rw-r--r--drivers/crypto/hisilicon/qm.c906
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h34
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c456
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c160
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h3
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c134
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c266
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c60
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c67
-rw-r--r--drivers/crypto/keembay/Kconfig4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c24
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c5
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c10
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c56
-rw-r--r--drivers/crypto/qce/aead.c4
-rw-r--r--drivers/crypto/qce/sha.c8
-rw-r--r--drivers/crypto/qce/skcipher.c8
-rw-r--r--drivers/crypto/qcom-rng.c7
-rw-r--r--drivers/crypto/sahara.c22
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c3
-rw-r--r--include/crypto/aria.h17
-rw-r--r--include/crypto/internal/aead.h25
-rw-r--r--include/crypto/scatterwalk.h6
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h1
-rw-r--r--include/linux/hisi_acc_qm.h63
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/uapi/misc/uacce/hisi_qm.h17
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/crypto/Kconfig7
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/memneq.c (renamed from lib/memneq.c)7
-rw-r--r--lib/crypto/utils.c88
116 files changed, 9015 insertions, 2932 deletions
diff --git a/Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml b/Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml
new file mode 100644
index 000000000000..a772d232de09
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/aspeed,ast2500-hace.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASPEED HACE hash and crypto Hardware Accelerator Engines
+
+maintainers:
+ - Neal Liu <neal_liu@aspeedtech.com>
+
+description: |
+ The Hash and Crypto Engine (HACE) is designed to accelerate the throughput
+ of hash data digest, encryption, and decryption. Basically, HACE can be
+ divided into two independently engines - Hash Engine and Crypto Engine.
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2500-hace
+ - aspeed,ast2600-hace
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/ast2600-clock.h>
+ hace: crypto@1e6d0000 {
+ compatible = "aspeed,ast2600-hace";
+ reg = <0x1e6d0000 0x200>;
+ interrupts = <4>;
+ clocks = <&syscon ASPEED_CLK_GATE_YCLK>;
+ resets = <&syscon ASPEED_RESET_HACE>;
+ };
diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 2d307811978c..935aaeb97fe6 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -89,9 +89,8 @@ context. In a typical workflow, this command should be the first command issued.
The firmware can be initialized either by using its own non-volatile storage or
the OS can manage the NV storage for the firmware using the module parameter
-``init_ex_path``. The file specified by ``init_ex_path`` must exist. To create
-a new NV storage file allocate the file with 32KB bytes of 0xFF as required by
-the SEV spec.
+``init_ex_path``. If the file specified by ``init_ex_path`` does not exist or
+is invalid, the OS will create or override the file with output from PSP.
Returns: 0 on success, -negative on error
diff --git a/MAINTAINERS b/MAINTAINERS
index bf933661fb36..3bd6bb8d34f2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3237,6 +3237,13 @@ S: Maintained
F: Documentation/devicetree/bindings/usb/aspeed,ast2600-udc.yaml
F: drivers/usb/gadget/udc/aspeed_udc.c
+ASPEED CRYPTO DRIVER
+M: Neal Liu <neal_liu@aspeedtech.com>
+L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/crypto/aspeed,ast2500-hace.yaml
+F: drivers/crypto/aspeed/
+
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
L: acpi4asus-user@lists.sourceforge.net
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 22dc1d6936bc..68923a69b1d4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1850,8 +1850,4 @@ config ARCH_HIBERNATION_POSSIBLE
endmenu
-if CRYPTO
-source "arch/arm/crypto/Kconfig"
-endif
-
source "arch/arm/Kconfig.assembler"
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index c89092c3905b..04f98d1dbb97 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -262,6 +262,14 @@
quality = <100>;
};
+ hace: crypto@1e6e3000 {
+ compatible = "aspeed,ast2500-hace";
+ reg = <0x1e6e3000 0x100>;
+ interrupts = <4>;
+ clocks = <&syscon ASPEED_CLK_GATE_YCLK>;
+ resets = <&syscon ASPEED_RESET_HACE>;
+ };
+
gfx: display@1e6e6000 {
compatible = "aspeed,ast2500-gfx", "syscon";
reg = <0x1e6e6000 0x1000>;
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 1387a763a6a5..ebbcfe445d9c 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -323,6 +323,14 @@
#size-cells = <1>;
ranges;
+ hace: crypto@1e6d0000 {
+ compatible = "aspeed,ast2600-hace";
+ reg = <0x1e6d0000 0x200>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_YCLK>;
+ resets = <&syscon ASPEED_RESET_HACE>;
+ };
+
syscon: syscon@1e6e2000 {
compatible = "aspeed,ast2600-scu", "syscon", "simple-mfd";
reg = <0x1e6e2000 0x1000>;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index deb24a4bd011..31e8e0c0ee1b 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -32,7 +32,6 @@ CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_DEBUG=y
CONFIG_PM_ADVANCED_DEBUG=y
CONFIG_ENERGY_MODEL=y
-CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig
index 6f6b5d0918f7..cdb505c74654 100644
--- a/arch/arm/configs/milbeaut_m10v_defconfig
+++ b/arch/arm/configs/milbeaut_m10v_defconfig
@@ -44,7 +44,6 @@ CONFIG_ARM_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
-CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
CONFIG_CRYPTO_SHA2_ARM_CE=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 54a6dc0aa5a4..b61b2e3d116b 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -132,7 +132,6 @@ CONFIG_ARM_EXYNOS_CPUIDLE=y
CONFIG_ARM_TEGRA_CPUIDLE=y
CONFIG_ARM_QCOM_SPM_CPUIDLE=y
CONFIG_KERNEL_MODE_NEON=y
-CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
CONFIG_CRYPTO_SHA2_ARM_CE=m
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 965853c1c530..2a66850d3288 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -53,7 +53,6 @@ CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_PM_DEBUG=y
-CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index ca6d0049362b..2845fae4f3cc 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -34,7 +34,6 @@ CONFIG_CPUFREQ_DT=m
CONFIG_ARM_PXA2xx_CPUFREQ=m
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
-CONFIG_ARM_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM=m
CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 149a5bd6b88c..3858c4d4cb98 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -1,92 +1,156 @@
# SPDX-License-Identifier: GPL-2.0
-menuconfig ARM_CRYPTO
- bool "ARM Accelerated Cryptographic Algorithms"
- depends on ARM
+menu "Accelerated Cryptographic Algorithms for CPU (arm)"
+
+config CRYPTO_CURVE25519_NEON
+ tristate "Public key crypto: Curve25519 (NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_LIB_CURVE25519_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ help
+ Curve25519 algorithm
+
+ Architecture: arm with
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_GHASH_ARM_CE
+ tristate "Hash functions: GHASH (PMULL/NEON/ARMv8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_CRYPTD
+ select CRYPTO_GF128MUL
help
- Say Y here to choose from a selection of cryptographic algorithms
- implemented using ARM specific CPU features or instructions.
+ GCM GHASH function (NIST SP800-38D)
-if ARM_CRYPTO
+ Architecture: arm using
+ - PMULL (Polynomial Multiply Long) instructions
+ - NEON (Advanced SIMD) extensions
+ - ARMv8 Crypto Extensions
+
+ Use an implementation of GHASH (used by the GCM AEAD chaining mode)
+ that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
+ that is part of the ARMv8 Crypto Extensions, or a slower variant that
+ uses the vmull.p8 instruction that is part of the basic NEON ISA.
+
+config CRYPTO_NHPOLY1305_NEON
+ tristate "Hash functions: NHPoly1305 (NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_NHPOLY1305
+ help
+ NHPoly1305 hash function (Adiantum)
+
+ Architecture: arm using:
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_POLY1305_ARM
+ tristate "Hash functions: Poly1305 (NEON)"
+ select CRYPTO_HASH
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ help
+ Poly1305 authenticator algorithm (RFC7539)
+
+ Architecture: arm optionally using
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_BLAKE2S_ARM
+ bool "Hash functions: BLAKE2s"
+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ BLAKE2s cryptographic hash function (RFC 7693)
+
+ Architecture: arm
+
+ This is faster than the generic implementations of BLAKE2s and
+ BLAKE2b, but slower than the NEON implementation of BLAKE2b.
+ There is no NEON implementation of BLAKE2s, since NEON doesn't
+ really help with it.
+
+config CRYPTO_BLAKE2B_NEON
+ tristate "Hash functions: BLAKE2b (NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_BLAKE2B
+ help
+ BLAKE2b cryptographic hash function (RFC 7693)
+
+ Architecture: arm using
+ - NEON (Advanced SIMD) extensions
+
+ BLAKE2b digest algorithm optimized with ARM NEON instructions.
+ On ARM processors that have NEON support but not the ARMv8
+ Crypto Extensions, typically this BLAKE2b implementation is
+ much faster than the SHA-2 family and slightly faster than
+ SHA-1.
config CRYPTO_SHA1_ARM
- tristate "SHA1 digest algorithm (ARM-asm)"
+ tristate "Hash functions: SHA-1"
select CRYPTO_SHA1
select CRYPTO_HASH
help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using optimized ARM assembler.
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: arm
config CRYPTO_SHA1_ARM_NEON
- tristate "SHA1 digest algorithm (ARM NEON)"
+ tristate "Hash functions: SHA-1 (NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_SHA1_ARM
select CRYPTO_SHA1
select CRYPTO_HASH
help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using optimized ARM NEON assembly, when NEON instructions are
- available.
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: arm using
+ - NEON (Advanced SIMD) extensions
config CRYPTO_SHA1_ARM_CE
- tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
+ tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_SHA1_ARM
select CRYPTO_HASH
help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using special ARMv8 Crypto Extensions.
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: arm using ARMv8 Crypto Extensions
config CRYPTO_SHA2_ARM_CE
- tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
+ tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_SHA256_ARM
select CRYPTO_HASH
help
- SHA-256 secure hash standard (DFIPS 180-2) implemented
- using special ARMv8 Crypto Extensions.
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: arm using
+ - ARMv8 Crypto Extensions
config CRYPTO_SHA256_ARM
- tristate "SHA-224/256 digest algorithm (ARM-asm and NEON)"
+ tristate "Hash functions: SHA-224 and SHA-256 (NEON)"
select CRYPTO_HASH
depends on !CPU_V7M
help
- SHA-256 secure hash standard (DFIPS 180-2) implemented
- using optimized ARM assembler and NEON, when available.
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: arm using
+ - NEON (Advanced SIMD) extensions
config CRYPTO_SHA512_ARM
- tristate "SHA-384/512 digest algorithm (ARM-asm and NEON)"
+ tristate "Hash functions: SHA-384 and SHA-512 (NEON)"
select CRYPTO_HASH
depends on !CPU_V7M
help
- SHA-512 secure hash standard (DFIPS 180-2) implemented
- using optimized ARM assembler and NEON, when available.
-
-config CRYPTO_BLAKE2S_ARM
- bool "BLAKE2s digest algorithm (ARM)"
- select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
- help
- BLAKE2s digest algorithm optimized with ARM scalar instructions. This
- is faster than the generic implementations of BLAKE2s and BLAKE2b, but
- slower than the NEON implementation of BLAKE2b. (There is no NEON
- implementation of BLAKE2s, since NEON doesn't really help with it.)
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
-config CRYPTO_BLAKE2B_NEON
- tristate "BLAKE2b digest algorithm (ARM NEON)"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLAKE2B
- help
- BLAKE2b digest algorithm optimized with ARM NEON instructions.
- On ARM processors that have NEON support but not the ARMv8
- Crypto Extensions, typically this BLAKE2b implementation is
- much faster than SHA-2 and slightly faster than SHA-1.
+ Architecture: arm using
+ - NEON (Advanced SIMD) extensions
config CRYPTO_AES_ARM
- tristate "Scalar AES cipher for ARM"
+ tristate "Ciphers: AES"
select CRYPTO_ALGAPI
select CRYPTO_AES
help
- Use optimized AES assembler routines for ARM platforms.
+ Block ciphers: AES cipher algorithms (FIPS-197)
+
+ Architecture: arm
On ARM processors without the Crypto Extensions, this is the
fastest AES implementation for single blocks. For multiple
@@ -98,7 +162,7 @@ config CRYPTO_AES_ARM
such attacks very difficult.
config CRYPTO_AES_ARM_BS
- tristate "Bit sliced AES using NEON instructions"
+ tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
@@ -106,8 +170,13 @@ config CRYPTO_AES_ARM_BS
select CRYPTO_CBC
select CRYPTO_SIMD
help
- Use a faster and more secure NEON based implementation of AES in CBC,
- CTR and XTS modes
+ Length-preserving ciphers: AES cipher algorithms (FIPS-197)
+ with block cipher modes:
+ - ECB (Electronic Codebook) mode (NIST SP800-38A)
+ - CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+ - CTR (Counter) mode (NIST SP800-38A)
+ - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
+ and IEEE 1619)
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
and for XTS mode encryption, CBC and XTS mode decryption speedup is
@@ -116,58 +185,59 @@ config CRYPTO_AES_ARM_BS
believed to be invulnerable to cache timing attacks.
config CRYPTO_AES_ARM_CE
- tristate "Accelerated AES using ARMv8 Crypto Extensions"
+ tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_SIMD
help
- Use an implementation of AES in CBC, CTR and XTS modes that uses
- ARMv8 Crypto Extensions
+ Length-preserving ciphers: AES cipher algorithms (FIPS-197)
+ with block cipher modes:
+ - ECB (Electronic Codebook) mode (NIST SP800-38A)
+ - CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+ - CTR (Counter) mode (NIST SP800-38A)
+ - CTS (Cipher Text Stealing) mode (NIST SP800-38A)
+ - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
+ and IEEE 1619)
+
+ Architecture: arm using:
+ - ARMv8 Crypto Extensions
-config CRYPTO_GHASH_ARM_CE
- tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON
- select CRYPTO_HASH
- select CRYPTO_CRYPTD
- select CRYPTO_GF128MUL
+config CRYPTO_CHACHA20_NEON
+ tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (NEON)"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
help
- Use an implementation of GHASH (used by the GCM AEAD chaining mode)
- that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
- that is part of the ARMv8 Crypto Extensions, or a slower variant that
- uses the vmull.p8 instruction that is part of the basic NEON ISA.
+ Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ stream cipher algorithms
-config CRYPTO_CRCT10DIF_ARM_CE
- tristate "CRCT10DIF digest algorithm using PMULL instructions"
- depends on KERNEL_MODE_NEON
- depends on CRC_T10DIF
- select CRYPTO_HASH
+ Architecture: arm using:
+ - NEON (Advanced SIMD) extensions
config CRYPTO_CRC32_ARM_CE
- tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
+ tristate "CRC32C and CRC32"
depends on KERNEL_MODE_NEON
depends on CRC32
select CRYPTO_HASH
+ help
+ CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
+ and CRC32 CRC algorithm (IEEE 802.3)
-config CRYPTO_CHACHA20_NEON
- tristate "NEON and scalar accelerated ChaCha stream cipher algorithms"
- select CRYPTO_SKCIPHER
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ Architecture: arm using:
+ - CRC and/or PMULL instructions
-config CRYPTO_POLY1305_ARM
- tristate "Accelerated scalar and SIMD Poly1305 hash implementations"
- select CRYPTO_HASH
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ Drivers: crc32-arm-ce and crc32c-arm-ce
-config CRYPTO_NHPOLY1305_NEON
- tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)"
+config CRYPTO_CRCT10DIF_ARM_CE
+ tristate "CRCT10DIF"
depends on KERNEL_MODE_NEON
- select CRYPTO_NHPOLY1305
+ depends on CRC_T10DIF
+ select CRYPTO_HASH
+ help
+ CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-config CRYPTO_CURVE25519_NEON
- tristate "NEON accelerated Curve25519 scalar multiplication library"
- depends on KERNEL_MODE_NEON
- select CRYPTO_LIB_CURVE25519_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ Architecture: arm using:
+ - PMULL (Polynomial Multiply Long) instructions
+
+endmenu
-endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fb8463c028b2..dbec73313bf7 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2251,6 +2251,3 @@ source "drivers/acpi/Kconfig"
source "arch/arm64/kvm/Kconfig"
-if CRYPTO
-source "arch/arm64/crypto/Kconfig"
-endif # CRYPTO
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index d31545cc145b..0b6af3348e79 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -112,7 +112,6 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y
CONFIG_ACPI_APEI_EINJ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
-CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_SHA512_ARM64_CE=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 60db5bb2ddda..8bd80508a710 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -1,141 +1,282 @@
# SPDX-License-Identifier: GPL-2.0
-menuconfig ARM64_CRYPTO
- bool "ARM64 Accelerated Cryptographic Algorithms"
- depends on ARM64
+menu "Accelerated Cryptographic Algorithms for CPU (arm64)"
+
+config CRYPTO_GHASH_ARM64_CE
+ tristate "Hash functions: GHASH (ARMv8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_GF128MUL
+ select CRYPTO_LIB_AES
+ select CRYPTO_AEAD
help
- Say Y here to choose from a selection of cryptographic algorithms
- implemented using ARM64 specific CPU features or instructions.
+ GCM GHASH function (NIST SP800-38D)
-if ARM64_CRYPTO
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
-config CRYPTO_SHA256_ARM64
- tristate "SHA-224/SHA-256 digest algorithm for arm64"
- select CRYPTO_HASH
+config CRYPTO_NHPOLY1305_NEON
+ tristate "Hash functions: NHPoly1305 (NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_NHPOLY1305
+ help
+ NHPoly1305 hash function (Adiantum)
-config CRYPTO_SHA512_ARM64
- tristate "SHA-384/SHA-512 digest algorithm for arm64"
+ Architecture: arm64 using:
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_POLY1305_NEON
+ tristate "Hash functions: Poly1305 (NEON)"
+ depends on KERNEL_MODE_NEON
select CRYPTO_HASH
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ help
+ Poly1305 authenticator algorithm (RFC7539)
+
+ Architecture: arm64 using:
+ - NEON (Advanced SIMD) extensions
config CRYPTO_SHA1_ARM64_CE
- tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
+ tristate "Hash functions: SHA-1 (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_SHA1
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
+
+config CRYPTO_SHA256_ARM64
+ tristate "Hash functions: SHA-224 and SHA-256"
+ select CRYPTO_HASH
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: arm64
config CRYPTO_SHA2_ARM64_CE
- tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
+ tristate "Hash functions: SHA-224 and SHA-256 (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_SHA256_ARM64
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
+
+config CRYPTO_SHA512_ARM64
+ tristate "Hash functions: SHA-384 and SHA-512"
+ select CRYPTO_HASH
+ help
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
+
+ Architecture: arm64
config CRYPTO_SHA512_ARM64_CE
- tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)"
+ tristate "Hash functions: SHA-384 and SHA-512 (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_SHA512_ARM64
+ help
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
config CRYPTO_SHA3_ARM64
- tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)"
+ tristate "Hash functions: SHA-3 (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_SHA3
+ help
+ SHA-3 secure hash algorithms (FIPS 202)
+
+ Architecture: arm64 using:
+ - ARMv8.2 Crypto Extensions
config CRYPTO_SM3_ARM64_CE
- tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)"
+ tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_SM3
+ help
+ SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
-config CRYPTO_SM4_ARM64_CE
- tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)"
- depends on KERNEL_MODE_NEON
- select CRYPTO_ALGAPI
- select CRYPTO_SM4
-
-config CRYPTO_SM4_ARM64_CE_BLK
- tristate "SM4 in ECB/CBC/CFB/CTR modes using ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON
- select CRYPTO_SKCIPHER
- select CRYPTO_SM4
-
-config CRYPTO_SM4_ARM64_NEON_BLK
- tristate "SM4 in ECB/CBC/CFB/CTR modes using NEON instructions"
- depends on KERNEL_MODE_NEON
- select CRYPTO_SKCIPHER
- select CRYPTO_SM4
-
-config CRYPTO_GHASH_ARM64_CE
- tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON
- select CRYPTO_HASH
- select CRYPTO_GF128MUL
- select CRYPTO_LIB_AES
- select CRYPTO_AEAD
+ Architecture: arm64 using:
+ - ARMv8.2 Crypto Extensions
config CRYPTO_POLYVAL_ARM64_CE
- tristate "POLYVAL using ARMv8 Crypto Extensions (for HCTR2)"
+ tristate "Hash functions: POLYVAL (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_POLYVAL
+ help
+ POLYVAL hash function for HCTR2
-config CRYPTO_CRCT10DIF_ARM64_CE
- tristate "CRCT10DIF digest algorithm using PMULL instructions"
- depends on KERNEL_MODE_NEON && CRC_T10DIF
- select CRYPTO_HASH
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
config CRYPTO_AES_ARM64
- tristate "AES core cipher using scalar instructions"
+ tristate "Ciphers: AES, modes: ECB, CBC, CTR, CTS, XCTR, XTS"
select CRYPTO_AES
+ help
+ Block ciphers: AES cipher algorithms (FIPS-197)
+ Length-preserving ciphers: AES with ECB, CBC, CTR, CTS,
+ XCTR, and XTS modes
+ AEAD cipher: AES with CBC, ESSIV, and SHA-256
+ for fscrypt and dm-crypt
+
+ Architecture: arm64
config CRYPTO_AES_ARM64_CE
- tristate "AES core cipher using ARMv8 Crypto Extensions"
+ tristate "Ciphers: AES (ARMv8 Crypto Extensions)"
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_LIB_AES
+ help
+ Block ciphers: AES cipher algorithms (FIPS-197)
-config CRYPTO_AES_ARM64_CE_CCM
- tristate "AES in CCM mode using ARMv8 Crypto Extensions"
- depends on ARM64 && KERNEL_MODE_NEON
- select CRYPTO_ALGAPI
- select CRYPTO_AES_ARM64_CE
- select CRYPTO_AEAD
- select CRYPTO_LIB_AES
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
config CRYPTO_AES_ARM64_CE_BLK
- tristate "AES in ECB/CBC/CTR/XTS/XCTR modes using ARMv8 Crypto Extensions"
+ tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_AES_ARM64_CE
+ help
+ Length-preserving ciphers: AES cipher algorithms (FIPS-197)
+ with block cipher modes:
+ - ECB (Electronic Codebook) mode (NIST SP800-38A)
+ - CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+ - CTR (Counter) mode (NIST SP800-38A)
+ - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
+ and IEEE 1619)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
config CRYPTO_AES_ARM64_NEON_BLK
- tristate "AES in ECB/CBC/CTR/XTS/XCTR modes using NEON instructions"
+ tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
+ help
+ Length-preserving ciphers: AES cipher algorithms (FIPS-197)
+ with block cipher modes:
+ - ECB (Electronic Codebook) mode (NIST SP800-38A)
+ - CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+ - CTR (Counter) mode (NIST SP800-38A)
+ - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
+ and IEEE 1619)
+
+ Architecture: arm64 using:
+ - NEON (Advanced SIMD) extensions
config CRYPTO_CHACHA20_NEON
- tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions"
+ tristate "Ciphers: ChaCha (NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ help
+ Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ stream cipher algorithms
-config CRYPTO_POLY1305_NEON
- tristate "Poly1305 hash function using scalar or NEON instructions"
+ Architecture: arm64 using:
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_AES_ARM64_BS
+ tristate "Ciphers: AES, modes: ECB/CBC/CTR/XCTR/XTS modes (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
- select CRYPTO_HASH
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ select CRYPTO_SKCIPHER
+ select CRYPTO_AES_ARM64_NEON_BLK
+ select CRYPTO_LIB_AES
+ help
+ Length-preserving ciphers: AES cipher algorithms (FIPS-197)
+ with block cipher modes:
+ - ECB (Electronic Codebook) mode (NIST SP800-38A)
+ - CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+ - CTR (Counter) mode (NIST SP800-38A)
+ - XCTR mode for HCTR2
+ - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
+ and IEEE 1619)
-config CRYPTO_NHPOLY1305_NEON
- tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)"
+ Architecture: arm64 using:
+ - bit-sliced algorithm
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_SM4_ARM64_CE
+ tristate "Ciphers: SM4 (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
- select CRYPTO_NHPOLY1305
+ select CRYPTO_ALGAPI
+ select CRYPTO_SM4
+ help
+ Block ciphers: SM4 cipher algorithms (OSCCA GB/T 32907-2016)
-config CRYPTO_AES_ARM64_BS
- tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
+ Architecture: arm64 using:
+ - ARMv8.2 Crypto Extensions
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_SM4_ARM64_CE_BLK
+ tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
- select CRYPTO_AES_ARM64_NEON_BLK
+ select CRYPTO_SM4
+ help
+ Length-preserving ciphers: SM4 cipher algorithms (OSCCA GB/T 32907-2016)
+ with block cipher modes:
+ - ECB (Electronic Codebook) mode (NIST SP800-38A)
+ - CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+ - CFB (Cipher Feedback) mode (NIST SP800-38A)
+ - CTR (Counter) mode (NIST SP800-38A)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_SM4_ARM64_NEON_BLK
+ tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SM4
+ help
+ Length-preserving ciphers: SM4 cipher algorithms (OSCCA GB/T 32907-2016)
+ with block cipher modes:
+ - ECB (Electronic Codebook) mode (NIST SP800-38A)
+ - CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+ - CFB (Cipher Feedback) mode (NIST SP800-38A)
+ - CTR (Counter) mode (NIST SP800-38A)
+
+ Architecture: arm64 using:
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_AES_ARM64_CE_CCM
+ tristate "AEAD cipher: AES in CCM mode (ARMv8 Crypto Extensions)"
+ depends on ARM64 && KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES_ARM64_CE
+ select CRYPTO_AEAD
select CRYPTO_LIB_AES
+ help
+ AEAD cipher: AES cipher algorithms (FIPS-197) with
+ CCM (Counter with Cipher Block Chaining-Message Authentication Code)
+ authenticated encryption mode (NIST SP800-38C)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_CRCT10DIF_ARM64_CE
+ tristate "CRCT10DIF (PMULL)"
+ depends on KERNEL_MODE_NEON && CRC_T10DIF
+ select CRYPTO_HASH
+ help
+ CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
+
+ Architecture: arm64 using
+ - PMULL (Polynomial Multiply Long) instructions
+
+endmenu
-endif
diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig
new file mode 100644
index 000000000000..9003a5c1e879
--- /dev/null
+++ b/arch/mips/crypto/Kconfig
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (mips)"
+
+config CRYPTO_CRC32_MIPS
+ tristate "CRC32c and CRC32"
+ depends on MIPS_CRC_SUPPORT
+ select CRYPTO_HASH
+ help
+ CRC32c and CRC32 CRC algorithms
+
+ Architecture: mips
+
+config CRYPTO_POLY1305_MIPS
+ tristate "Hash functions: Poly1305"
+ depends on MIPS
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ help
+ Poly1305 authenticator algorithm (RFC7539)
+
+ Architecture: mips
+
+config CRYPTO_MD5_OCTEON
+ tristate "Digests: MD5 (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
+ select CRYPTO_MD5
+ select CRYPTO_HASH
+ help
+ MD5 message digest algorithm (RFC1321)
+
+ Architecture: mips OCTEON using crypto instructions, when available
+
+config CRYPTO_SHA1_OCTEON
+ tristate "Hash functions: SHA-1 (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
+ select CRYPTO_SHA1
+ select CRYPTO_HASH
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: mips OCTEON
+
+config CRYPTO_SHA256_OCTEON
+ tristate "Hash functions: SHA-224 and SHA-256 (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: mips OCTEON using crypto instructions, when available
+
+config CRYPTO_SHA512_OCTEON
+ tristate "Hash functions: SHA-384 and SHA-512 (OCTEON)"
+ depends on CPU_CAVIUM_OCTEON
+ select CRYPTO_SHA512
+ select CRYPTO_HASH
+ help
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
+
+ Architecture: mips OCTEON using crypto instructions, when available
+
+config CRYPTO_CHACHA_MIPS
+ tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (MIPS32r2)"
+ depends on CPU_MIPS32_R2
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ help
+ Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ stream cipher algorithms
+
+ Architecture: MIPS32r2
+
+endmenu
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
new file mode 100644
index 000000000000..c1b964447401
--- /dev/null
+++ b/arch/powerpc/crypto/Kconfig
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
+
+config CRYPTO_CRC32C_VPMSUM
+ tristate "CRC32c"
+ depends on PPC64 && ALTIVEC
+ select CRYPTO_HASH
+ select CRC32
+ help
+ CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
+
+ Architecture: powerpc64 using
+ - AltiVec extensions
+
+ Enable on POWER8 and newer processors for improved performance.
+
+config CRYPTO_CRCT10DIF_VPMSUM
+ tristate "CRC32T10DIF"
+ depends on PPC64 && ALTIVEC && CRC_T10DIF
+ select CRYPTO_HASH
+ help
+ CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
+
+ Architecture: powerpc64 using
+ - AltiVec extensions
+
+ Enable on POWER8 and newer processors for improved performance.
+
+config CRYPTO_VPMSUM_TESTER
+ tristate "CRC32c and CRC32T10DIF hardware acceleration tester"
+ depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM
+ help
+ Stress test for CRC32c and CRCT10DIF algorithms implemented with
+ powerpc64 AltiVec extensions (POWER8 vpmsum instructions).
+ Unless you are testing these algorithms, you don't need this.
+
+config CRYPTO_MD5_PPC
+ tristate "Digests: MD5"
+ depends on PPC
+ select CRYPTO_HASH
+ help
+ MD5 message digest algorithm (RFC1321)
+
+ Architecture: powerpc
+
+config CRYPTO_SHA1_PPC
+ tristate "Hash functions: SHA-1"
+ depends on PPC
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: powerpc
+
+config CRYPTO_SHA1_PPC_SPE
+ tristate "Hash functions: SHA-1 (SPE)"
+ depends on PPC && SPE
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: powerpc using
+ - SPE (Signal Processing Engine) extensions
+
+config CRYPTO_SHA256_PPC_SPE
+ tristate "Hash functions: SHA-224 and SHA-256 (SPE)"
+ depends on PPC && SPE
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: powerpc using
+ - SPE (Signal Processing Engine) extensions
+
+config CRYPTO_AES_PPC_SPE
+ tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)"
+ depends on PPC && SPE
+ select CRYPTO_SKCIPHER
+ help
+ Block ciphers: AES cipher algorithms (FIPS-197)
+ Length-preserving ciphers: AES with ECB, CBC, CTR, and XTS modes
+
+ Architecture: powerpc using:
+ - SPE (Signal Processing Engine) extensions
+
+ SPE is available for:
+ - Processor Type: Freescale 8500
+ - CPU selection: e500 (8540)
+
+ This module should only be used for low power (router) devices
+ without hardware AES acceleration (e.g. caam crypto). It reduces the
+ size of the AES tables from 16KB to 8KB + 256 bytes and mitigates
+ timining attacks. Nevertheless it might be not as secure as other
+ architecture specific assembler implementations that work on 1KB
+ tables or 256 bytes S-boxes.
+
+endmenu
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
new file mode 100644
index 000000000000..06ee706b0d78
--- /dev/null
+++ b/arch/s390/crypto/Kconfig
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (s390)"
+
+config CRYPTO_CRC32_S390
+ tristate "CRC32c and CRC32"
+ depends on S390
+ select CRYPTO_HASH
+ select CRC32
+ help
+ CRC32c and CRC32 CRC algorithms
+
+ Architecture: s390
+
+ It is available with IBM z13 or later.
+
+config CRYPTO_SHA512_S390
+ tristate "Hash functions: SHA-384 and SHA-512"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
+
+ Architecture: s390
+
+ It is available as of z10.
+
+config CRYPTO_SHA1_S390
+ tristate "Hash functions: SHA-1"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: s390
+
+ It is available as of z990.
+
+config CRYPTO_SHA256_S390
+ tristate "Hash functions: SHA-224 and SHA-256"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: s390
+
+ It is available as of z9.
+
+config CRYPTO_SHA3_256_S390
+ tristate "Hash functions: SHA3-224 and SHA3-256"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ SHA3-224 and SHA3-256 secure hash algorithms (FIPS 202)
+
+ Architecture: s390
+
+ It is available as of z14.
+
+config CRYPTO_SHA3_512_S390
+ tristate "Hash functions: SHA3-384 and SHA3-512"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ SHA3-384 and SHA3-512 secure hash algorithms (FIPS 202)
+
+ Architecture: s390
+
+ It is available as of z14.
+
+config CRYPTO_GHASH_S390
+ tristate "Hash functions: GHASH"
+ depends on S390
+ select CRYPTO_HASH
+ help
+ GCM GHASH hash function (NIST SP800-38D)
+
+ Architecture: s390
+
+ It is available as of z196.
+
+config CRYPTO_AES_S390
+ tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS, GCM"
+ depends on S390
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ help
+ Block cipher: AES cipher algorithms (FIPS 197)
+ AEAD cipher: AES with GCM
+ Length-preserving ciphers: AES with ECB, CBC, XTS, and CTR modes
+
+ Architecture: s390
+
+ As of z9 the ECB and CBC modes are hardware accelerated
+ for 128 bit keys.
+
+ As of z10 the ECB and CBC modes are hardware accelerated
+ for all AES key sizes.
+
+ As of z196 the CTR mode is hardware accelerated for all AES
+ key sizes and XTS mode is hardware accelerated for 256 and
+ 512 bit keys.
+
+config CRYPTO_DES_S390
+ tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR"
+ depends on S390
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_DES
+ help
+ Block ciphers: DES (FIPS 46-2) cipher algorithm
+ Block ciphers: Triple DES EDE (FIPS 46-3) cipher algorithm
+ Length-preserving ciphers: DES with ECB, CBC, and CTR modes
+ Length-preserving ciphers: Triple DES EDED with ECB, CBC, and CTR modes
+
+ Architecture: s390
+
+ As of z990 the ECB and CBC mode are hardware accelerated.
+ As of z196 the CTR mode is hardware accelerated.
+
+config CRYPTO_CHACHA_S390
+ tristate "Ciphers: ChaCha20"
+ depends on S390
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ help
+ Length-preserving cipher: ChaCha20 stream cipher (RFC 7539)
+
+ Architecture: s390
+
+ It is available as of z13.
+
+endmenu
diff --git a/arch/sparc/crypto/Kconfig b/arch/sparc/crypto/Kconfig
new file mode 100644
index 000000000000..cfe5102b1c68
--- /dev/null
+++ b/arch/sparc/crypto/Kconfig
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (sparc64)"
+
+config CRYPTO_DES_SPARC64
+ tristate "Ciphers: DES and Triple DES EDE, modes: ECB/CBC"
+ depends on SPARC64
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_DES
+ select CRYPTO_SKCIPHER
+ help
+ Block cipher: DES (FIPS 46-2) cipher algorithm
+ Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm
+ Length-preserving ciphers: DES with ECB and CBC modes
+ Length-preserving ciphers: Tripe DES EDE with ECB and CBC modes
+
+ Architecture: sparc64
+
+config CRYPTO_CRC32C_SPARC64
+ tristate "CRC32c"
+ depends on SPARC64
+ select CRYPTO_HASH
+ select CRC32
+ help
+ CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
+
+ Architecture: sparc64
+
+config CRYPTO_MD5_SPARC64
+ tristate "Digests: MD5"
+ depends on SPARC64
+ select CRYPTO_MD5
+ select CRYPTO_HASH
+ help
+ MD5 message digest algorithm (RFC1321)
+
+ Architecture: sparc64 using crypto instructions, when available
+
+config CRYPTO_SHA1_SPARC64
+ tristate "Hash functions: SHA-1"
+ depends on SPARC64
+ select CRYPTO_SHA1
+ select CRYPTO_HASH
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: sparc64
+
+config CRYPTO_SHA256_SPARC64
+ tristate "Hash functions: SHA-224 and SHA-256"
+ depends on SPARC64
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: sparc64 using crypto instructions, when available
+
+config CRYPTO_SHA512_SPARC64
+ tristate "Hash functions: SHA-384 and SHA-512"
+ depends on SPARC64
+ select CRYPTO_SHA512
+ select CRYPTO_HASH
+ help
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
+
+ Architecture: sparc64 using crypto instructions, when available
+
+config CRYPTO_AES_SPARC64
+ tristate "Ciphers: AES, modes: ECB, CBC, CTR"
+ depends on SPARC64
+ select CRYPTO_SKCIPHER
+ help
+ Block ciphers: AES cipher algorithms (FIPS-197)
+ Length-preseving ciphers: AES with ECB, CBC, and CTR modes
+
+ Architecture: sparc64 using crypto instructions
+
+config CRYPTO_CAMELLIA_SPARC64
+ tristate "Ciphers: Camellia, modes: ECB, CBC"
+ depends on SPARC64
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ help
+ Block ciphers: Camellia cipher algorithms
+ Length-preserving ciphers: Camellia with ECB and CBC modes
+
+ Architecture: sparc64
+
+endmenu
diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
new file mode 100644
index 000000000000..71c4c473d34b
--- /dev/null
+++ b/arch/x86/crypto/Kconfig
@@ -0,0 +1,484 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (x86)"
+
+config CRYPTO_CURVE25519_X86
+ tristate "Public key crypto: Curve25519 (ADX)"
+ depends on X86 && 64BIT
+ select CRYPTO_LIB_CURVE25519_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ help
+ Curve25519 algorithm
+
+ Architecture: x86_64 using:
+ - ADX (large integer arithmetic)
+
+config CRYPTO_AES_NI_INTEL
+ tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTR, XTS, GCM (AES-NI)"
+ depends on X86
+ select CRYPTO_AEAD
+ select CRYPTO_LIB_AES
+ select CRYPTO_ALGAPI
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SIMD
+ help
+ Block cipher: AES cipher algorithms
+ AEAD cipher: AES with GCM
+ Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTR, XTS
+
+ Architecture: x86 (32-bit and 64-bit) using:
+ - AES-NI (AES new instructions)
+
+config CRYPTO_BLOWFISH_X86_64
+ tristate "Ciphers: Blowfish, modes: ECB, CBC"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_BLOWFISH_COMMON
+ imply CRYPTO_CTR
+ help
+ Block cipher: Blowfish cipher algorithm
+ Length-preserving ciphers: Blowfish with ECB and CBC modes
+
+ Architecture: x86_64
+
+config CRYPTO_CAMELLIA_X86_64
+ tristate "Ciphers: Camellia with modes: ECB, CBC"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ imply CRYPTO_CTR
+ help
+ Block cipher: Camellia cipher algorithms
+ Length-preserving ciphers: Camellia with ECB and CBC modes
+
+ Architecture: x86_64
+
+config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
+ tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_CAMELLIA_X86_64
+ select CRYPTO_SIMD
+ imply CRYPTO_XTS
+ help
+ Length-preserving ciphers: Camellia with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - AES-NI (AES New Instructions)
+ - AVX (Advanced Vector Extensions)
+
+config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
+ tristate "Ciphers: Camellia with modes: ECB, CBC (AES-NI/AVX2)"
+ depends on X86 && 64BIT
+ select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
+ help
+ Length-preserving ciphers: Camellia with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - AES-NI (AES New Instructions)
+ - AVX2 (Advanced Vector Extensions 2)
+
+config CRYPTO_CAST5_AVX_X86_64
+ tristate "Ciphers: CAST5 with modes: ECB, CBC (AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_CAST5
+ select CRYPTO_CAST_COMMON
+ select CRYPTO_SIMD
+ imply CRYPTO_CTR
+ help
+ Length-preserving ciphers: CAST5 (CAST-128) cipher algorithm
+ (RFC2144) with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - AVX (Advanced Vector Extensions)
+
+ Processes 16 blocks in parallel.
+
+config CRYPTO_CAST6_AVX_X86_64
+ tristate "Ciphers: CAST6 with modes: ECB, CBC (AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_CAST6
+ select CRYPTO_CAST_COMMON
+ select CRYPTO_SIMD
+ imply CRYPTO_XTS
+ imply CRYPTO_CTR
+ help
+ Length-preserving ciphers: CAST6 (CAST-256) cipher algorithm
+ (RFC2612) with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - AVX (Advanced Vector Extensions)
+
+ Processes eight blocks in parallel.
+
+config CRYPTO_DES3_EDE_X86_64
+ tristate "Ciphers: Triple DES EDE with modes: ECB, CBC"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_DES
+ imply CRYPTO_CTR
+ help
+ Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm
+ Length-preserving ciphers: Triple DES EDE with ECB and CBC modes
+
+ Architecture: x86_64
+
+ Processes one or three blocks in parallel.
+
+config CRYPTO_SERPENT_SSE2_X86_64
+ tristate "Ciphers: Serpent with modes: ECB, CBC (SSE2)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SERPENT
+ select CRYPTO_SIMD
+ imply CRYPTO_CTR
+ help
+ Length-preserving ciphers: Serpent cipher algorithm
+ with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - SSE2 (Streaming SIMD Extensions 2)
+
+ Processes eight blocks in parallel.
+
+config CRYPTO_SERPENT_SSE2_586
+ tristate "Ciphers: Serpent with modes: ECB, CBC (32-bit with SSE2)"
+ depends on X86 && !64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SERPENT
+ select CRYPTO_SIMD
+ imply CRYPTO_CTR
+ help
+ Length-preserving ciphers: Serpent cipher algorithm
+ with ECB and CBC modes
+
+ Architecture: x86 (32-bit) using:
+ - SSE2 (Streaming SIMD Extensions 2)
+
+ Processes four blocks in parallel.
+
+config CRYPTO_SERPENT_AVX_X86_64
+ tristate "Ciphers: Serpent with modes: ECB, CBC (AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SERPENT
+ select CRYPTO_SIMD
+ imply CRYPTO_XTS
+ imply CRYPTO_CTR
+ help
+ Length-preserving ciphers: Serpent cipher algorithm
+ with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - AVX (Advanced Vector Extensions)
+
+ Processes eight blocks in parallel.
+
+config CRYPTO_SERPENT_AVX2_X86_64
+ tristate "Ciphers: Serpent with modes: ECB, CBC (AVX2)"
+ depends on X86 && 64BIT
+ select CRYPTO_SERPENT_AVX_X86_64
+ help
+ Length-preserving ciphers: Serpent cipher algorithm
+ with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - AVX2 (Advanced Vector Extensions 2)
+
+ Processes 16 blocks in parallel.
+
+config CRYPTO_SM4_AESNI_AVX_X86_64
+ tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SIMD
+ select CRYPTO_ALGAPI
+ select CRYPTO_SM4
+ help
+ Length-preserving ciphers: SM4 cipher algorithms
+ (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes
+
+ Architecture: x86_64 using:
+ - AES-NI (AES New Instructions)
+ - AVX (Advanced Vector Extensions)
+
+ Through two affine transforms,
+ we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+ effect of instruction acceleration.
+
+ If unsure, say N.
+
+config CRYPTO_SM4_AESNI_AVX2_X86_64
+ tristate "Ciphers: SM4 with modes: ECB, CBC, CFB, CTR (AES-NI/AVX2)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SIMD
+ select CRYPTO_ALGAPI
+ select CRYPTO_SM4
+ select CRYPTO_SM4_AESNI_AVX_X86_64
+ help
+ Length-preserving ciphers: SM4 cipher algorithms
+ (OSCCA GB/T 32907-2016) with ECB, CBC, CFB, and CTR modes
+
+ Architecture: x86_64 using:
+ - AES-NI (AES New Instructions)
+ - AVX2 (Advanced Vector Extensions 2)
+
+ Through two affine transforms,
+ we can use the AES S-Box to simulate the SM4 S-Box to achieve the
+ effect of instruction acceleration.
+
+ If unsure, say N.
+
+config CRYPTO_TWOFISH_586
+ tristate "Ciphers: Twofish (32-bit)"
+ depends on (X86 || UML_X86) && !64BIT
+ select CRYPTO_ALGAPI
+ select CRYPTO_TWOFISH_COMMON
+ imply CRYPTO_CTR
+ help
+ Block cipher: Twofish cipher algorithm
+
+ Architecture: x86 (32-bit)
+
+config CRYPTO_TWOFISH_X86_64
+ tristate "Ciphers: Twofish"
+ depends on (X86 || UML_X86) && 64BIT
+ select CRYPTO_ALGAPI
+ select CRYPTO_TWOFISH_COMMON
+ imply CRYPTO_CTR
+ help
+ Block cipher: Twofish cipher algorithm
+
+ Architecture: x86_64
+
+config CRYPTO_TWOFISH_X86_64_3WAY
+ tristate "Ciphers: Twofish with modes: ECB, CBC (3-way parallel)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_TWOFISH_COMMON
+ select CRYPTO_TWOFISH_X86_64
+ help
+ Length-preserving cipher: Twofish cipher algorithm
+ with ECB and CBC modes
+
+ Architecture: x86_64
+
+ Processes three blocks in parallel, better utilizing resources of
+ out-of-order CPUs.
+
+config CRYPTO_TWOFISH_AVX_X86_64
+ tristate "Ciphers: Twofish with modes: ECB, CBC (AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SIMD
+ select CRYPTO_TWOFISH_COMMON
+ select CRYPTO_TWOFISH_X86_64
+ select CRYPTO_TWOFISH_X86_64_3WAY
+ imply CRYPTO_XTS
+ help
+ Length-preserving cipher: Twofish cipher algorithm
+ with ECB and CBC modes
+
+ Architecture: x86_64 using:
+ - AVX (Advanced Vector Extensions)
+
+ Processes eight blocks in parallel.
+
+config CRYPTO_ARIA_AESNI_AVX_X86_64
+ tristate "Ciphers: ARIA with modes: ECB, CTR (AES-NI/AVX/GFNI)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_SIMD
+ select CRYPTO_ALGAPI
+ select CRYPTO_ARIA
+ help
+ Length-preserving cipher: ARIA cipher algorithms
+ (RFC 5794) with ECB and CTR modes
+
+ Architecture: x86_64 using:
+ - AES-NI (AES New Instructions)
+ - AVX (Advanced Vector Extensions)
+ - GFNI (Galois Field New Instructions)
+
+ Processes 16 blocks in parallel.
+
+config CRYPTO_CHACHA20_X86_64
+ tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (SSSE3/AVX2/AVX-512VL)"
+ depends on X86 && 64BIT
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ help
+ Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ stream cipher algorithms
+
+ Architecture: x86_64 using:
+ - SSSE3 (Supplemental SSE3)
+ - AVX2 (Advanced Vector Extensions 2)
+ - AVX-512VL (Advanced Vector Extensions-512VL)
+
+config CRYPTO_AEGIS128_AESNI_SSE2
+ tristate "AEAD ciphers: AEGIS-128 (AES-NI/SSE2)"
+ depends on X86 && 64BIT
+ select CRYPTO_AEAD
+ select CRYPTO_SIMD
+ help
+ AEGIS-128 AEAD algorithm
+
+ Architecture: x86_64 using:
+ - AES-NI (AES New Instructions)
+ - SSE2 (Streaming SIMD Extensions 2)
+
+config CRYPTO_NHPOLY1305_SSE2
+ tristate "Hash functions: NHPoly1305 (SSE2)"
+ depends on X86 && 64BIT
+ select CRYPTO_NHPOLY1305
+ help
+ NHPoly1305 hash function for Adiantum
+
+ Architecture: x86_64 using:
+ - SSE2 (Streaming SIMD Extensions 2)
+
+config CRYPTO_NHPOLY1305_AVX2
+ tristate "Hash functions: NHPoly1305 (AVX2)"
+ depends on X86 && 64BIT
+ select CRYPTO_NHPOLY1305
+ help
+ NHPoly1305 hash function for Adiantum
+
+ Architecture: x86_64 using:
+ - AVX2 (Advanced Vector Extensions 2)
+
+config CRYPTO_BLAKE2S_X86
+ bool "Hash functions: BLAKE2s (SSSE3/AVX-512)"
+ depends on X86 && 64BIT
+ select CRYPTO_LIB_BLAKE2S_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ BLAKE2s cryptographic hash function (RFC 7693)
+
+ Architecture: x86_64 using:
+ - SSSE3 (Supplemental SSE3)
+ - AVX-512 (Advanced Vector Extensions-512)
+
+config CRYPTO_POLYVAL_CLMUL_NI
+ tristate "Hash functions: POLYVAL (CLMUL-NI)"
+ depends on X86 && 64BIT
+ select CRYPTO_POLYVAL
+ help
+ POLYVAL hash function for HCTR2
+
+ Architecture: x86_64 using:
+ - CLMUL-NI (carry-less multiplication new instructions)
+
+config CRYPTO_POLY1305_X86_64
+ tristate "Hash functions: Poly1305 (SSE2/AVX2)"
+ depends on X86 && 64BIT
+ select CRYPTO_LIB_POLY1305_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ help
+ Poly1305 authenticator algorithm (RFC7539)
+
+ Architecture: x86_64 using:
+ - SSE2 (Streaming SIMD Extensions 2)
+ - AVX2 (Advanced Vector Extensions 2)
+
+config CRYPTO_SHA1_SSSE3
+ tristate "Hash functions: SHA-1 (SSSE3/AVX/AVX2/SHA-NI)"
+ depends on X86 && 64BIT
+ select CRYPTO_SHA1
+ select CRYPTO_HASH
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: x86_64 using:
+ - SSSE3 (Supplemental SSE3)
+ - AVX (Advanced Vector Extensions)
+ - AVX2 (Advanced Vector Extensions 2)
+ - SHA-NI (SHA Extensions New Instructions)
+
+config CRYPTO_SHA256_SSSE3
+ tristate "Hash functions: SHA-224 and SHA-256 (SSSE3/AVX/AVX2/SHA-NI)"
+ depends on X86 && 64BIT
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: x86_64 using:
+ - SSSE3 (Supplemental SSE3)
+ - AVX (Advanced Vector Extensions)
+ - AVX2 (Advanced Vector Extensions 2)
+ - SHA-NI (SHA Extensions New Instructions)
+
+config CRYPTO_SHA512_SSSE3
+ tristate "Hash functions: SHA-384 and SHA-512 (SSSE3/AVX/AVX2)"
+ depends on X86 && 64BIT
+ select CRYPTO_SHA512
+ select CRYPTO_HASH
+ help
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180)
+
+ Architecture: x86_64 using:
+ - SSSE3 (Supplemental SSE3)
+ - AVX (Advanced Vector Extensions)
+ - AVX2 (Advanced Vector Extensions 2)
+
+config CRYPTO_SM3_AVX_X86_64
+ tristate "Hash functions: SM3 (AVX)"
+ depends on X86 && 64BIT
+ select CRYPTO_HASH
+ select CRYPTO_SM3
+ help
+ SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3
+
+ Architecture: x86_64 using:
+ - AVX (Advanced Vector Extensions)
+
+ If unsure, say N.
+
+config CRYPTO_GHASH_CLMUL_NI_INTEL
+ tristate "Hash functions: GHASH (CLMUL-NI)"
+ depends on X86 && 64BIT
+ select CRYPTO_CRYPTD
+ help
+ GCM GHASH hash function (NIST SP800-38D)
+
+ Architecture: x86_64 using:
+ - CLMUL-NI (carry-less multiplication new instructions)
+
+config CRYPTO_CRC32C_INTEL
+ tristate "CRC32c (SSE4.2/PCLMULQDQ)"
+ depends on X86
+ select CRYPTO_HASH
+ help
+ CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
+
+ Architecture: x86 (32-bit and 64-bit) using:
+ - SSE4.2 (Streaming SIMD Extensions 4.2) CRC32 instruction
+ - PCLMULQDQ (carry-less multiplication)
+
+config CRYPTO_CRC32_PCLMUL
+ tristate "CRC32 (PCLMULQDQ)"
+ depends on X86
+ select CRYPTO_HASH
+ select CRC32
+ help
+ CRC32 CRC algorithm (IEEE 802.3)
+
+ Architecture: x86 (32-bit and 64-bit) using:
+ - PCLMULQDQ (carry-less multiplication)
+
+config CRYPTO_CRCT10DIF_PCLMUL
+ tristate "CRCT10DIF (PCLMULQDQ)"
+ depends on X86 && 64BIT && CRC_T10DIF
+ select CRYPTO_HASH
+ help
+ CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
+
+ Architecture: x86_64 using:
+ - PCLMULQDQ (carry-less multiplication)
+
+endmenu
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 04d07ab744b2..3b1d701a4f6c 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -100,6 +100,9 @@ sm4-aesni-avx-x86_64-y := sm4-aesni-avx-asm_64.o sm4_aesni_avx_glue.o
obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64) += sm4-aesni-avx2-x86_64.o
sm4-aesni-avx2-x86_64-y := sm4-aesni-avx2-asm_64.o sm4_aesni_avx2_glue.o
+obj-$(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64) += aria-aesni-avx-x86_64.o
+aria-aesni-avx-x86_64-y := aria-aesni-avx-asm_64.o aria_aesni_avx_glue.o
+
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $< > $@
$(obj)/%.S: $(src)/%.pl FORCE
diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S
new file mode 100644
index 000000000000..c75fd7d015ed
--- /dev/null
+++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S
@@ -0,0 +1,1303 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ARIA Cipher 16-way parallel algorithm (AVX)
+ *
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/frame.h>
+
+/* struct aria_ctx: */
+#define enc_key 0
+#define dec_key 272
+#define rounds 544
+
+/* register macros */
+#define CTX %rdi
+
+
+#define BV8(a0, a1, a2, a3, a4, a5, a6, a7) \
+ ( (((a0) & 1) << 0) | \
+ (((a1) & 1) << 1) | \
+ (((a2) & 1) << 2) | \
+ (((a3) & 1) << 3) | \
+ (((a4) & 1) << 4) | \
+ (((a5) & 1) << 5) | \
+ (((a6) & 1) << 6) | \
+ (((a7) & 1) << 7) )
+
+#define BM8X8(l0, l1, l2, l3, l4, l5, l6, l7) \
+ ( ((l7) << (0 * 8)) | \
+ ((l6) << (1 * 8)) | \
+ ((l5) << (2 * 8)) | \
+ ((l4) << (3 * 8)) | \
+ ((l3) << (4 * 8)) | \
+ ((l2) << (5 * 8)) | \
+ ((l1) << (6 * 8)) | \
+ ((l0) << (7 * 8)) )
+
+#define inc_le128(x, minus_one, tmp) \
+ vpcmpeqq minus_one, x, tmp; \
+ vpsubq minus_one, x, x; \
+ vpslldq $8, tmp, tmp; \
+ vpsubq tmp, x, x;
+
+#define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \
+ vpand x, mask4bit, tmp0; \
+ vpandn x, mask4bit, x; \
+ vpsrld $4, x, x; \
+ \
+ vpshufb tmp0, lo_t, tmp0; \
+ vpshufb x, hi_t, x; \
+ vpxor tmp0, x, x;
+
+#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
+ vpunpckhdq x1, x0, t2; \
+ vpunpckldq x1, x0, x0; \
+ \
+ vpunpckldq x3, x2, t1; \
+ vpunpckhdq x3, x2, x2; \
+ \
+ vpunpckhqdq t1, x0, x1; \
+ vpunpcklqdq t1, x0, x0; \
+ \
+ vpunpckhqdq x2, t2, x3; \
+ vpunpcklqdq x2, t2, x2;
+
+#define byteslice_16x16b(a0, b0, c0, d0, \
+ a1, b1, c1, d1, \
+ a2, b2, c2, d2, \
+ a3, b3, c3, d3, \
+ st0, st1) \
+ vmovdqu d2, st0; \
+ vmovdqu d3, st1; \
+ transpose_4x4(a0, a1, a2, a3, d2, d3); \
+ transpose_4x4(b0, b1, b2, b3, d2, d3); \
+ vmovdqu st0, d2; \
+ vmovdqu st1, d3; \
+ \
+ vmovdqu a0, st0; \
+ vmovdqu a1, st1; \
+ transpose_4x4(c0, c1, c2, c3, a0, a1); \
+ transpose_4x4(d0, d1, d2, d3, a0, a1); \
+ \
+ vmovdqu .Lshufb_16x16b, a0; \
+ vmovdqu st1, a1; \
+ vpshufb a0, a2, a2; \
+ vpshufb a0, a3, a3; \
+ vpshufb a0, b0, b0; \
+ vpshufb a0, b1, b1; \
+ vpshufb a0, b2, b2; \
+ vpshufb a0, b3, b3; \
+ vpshufb a0, a1, a1; \
+ vpshufb a0, c0, c0; \
+ vpshufb a0, c1, c1; \
+ vpshufb a0, c2, c2; \
+ vpshufb a0, c3, c3; \
+ vpshufb a0, d0, d0; \
+ vpshufb a0, d1, d1; \
+ vpshufb a0, d2, d2; \
+ vpshufb a0, d3, d3; \
+ vmovdqu d3, st1; \
+ vmovdqu st0, d3; \
+ vpshufb a0, d3, a0; \
+ vmovdqu d2, st0; \
+ \
+ transpose_4x4(a0, b0, c0, d0, d2, d3); \
+ transpose_4x4(a1, b1, c1, d1, d2, d3); \
+ vmovdqu st0, d2; \
+ vmovdqu st1, d3; \
+ \
+ vmovdqu b0, st0; \
+ vmovdqu b1, st1; \
+ transpose_4x4(a2, b2, c2, d2, b0, b1); \
+ transpose_4x4(a3, b3, c3, d3, b0, b1); \
+ vmovdqu st0, b0; \
+ vmovdqu st1, b1; \
+ /* does not adjust output bytes inside vectors */
+
+#define debyteslice_16x16b(a0, b0, c0, d0, \
+ a1, b1, c1, d1, \
+ a2, b2, c2, d2, \
+ a3, b3, c3, d3, \
+ st0, st1) \
+ vmovdqu d2, st0; \
+ vmovdqu d3, st1; \
+ transpose_4x4(a0, a1, a2, a3, d2, d3); \
+ transpose_4x4(b0, b1, b2, b3, d2, d3); \
+ vmovdqu st0, d2; \
+ vmovdqu st1, d3; \
+ \
+ vmovdqu a0, st0; \
+ vmovdqu a1, st1; \
+ transpose_4x4(c0, c1, c2, c3, a0, a1); \
+ transpose_4x4(d0, d1, d2, d3, a0, a1); \
+ \
+ vmovdqu .Lshufb_16x16b, a0; \
+ vmovdqu st1, a1; \
+ vpshufb a0, a2, a2; \
+ vpshufb a0, a3, a3; \
+ vpshufb a0, b0, b0; \
+ vpshufb a0, b1, b1; \
+ vpshufb a0, b2, b2; \
+ vpshufb a0, b3, b3; \
+ vpshufb a0, a1, a1; \
+ vpshufb a0, c0, c0; \
+ vpshufb a0, c1, c1; \
+ vpshufb a0, c2, c2; \
+ vpshufb a0, c3, c3; \
+ vpshufb a0, d0, d0; \
+ vpshufb a0, d1, d1; \
+ vpshufb a0, d2, d2; \
+ vpshufb a0, d3, d3; \
+ vmovdqu d3, st1; \
+ vmovdqu st0, d3; \
+ vpshufb a0, d3, a0; \
+ vmovdqu d2, st0; \
+ \
+ transpose_4x4(c0, d0, a0, b0, d2, d3); \
+ transpose_4x4(c1, d1, a1, b1, d2, d3); \
+ vmovdqu st0, d2; \
+ vmovdqu st1, d3; \
+ \
+ vmovdqu b0, st0; \
+ vmovdqu b1, st1; \
+ transpose_4x4(c2, d2, a2, b2, b0, b1); \
+ transpose_4x4(c3, d3, a3, b3, b0, b1); \
+ vmovdqu st0, b0; \
+ vmovdqu st1, b1; \
+ /* does not adjust output bytes inside vectors */
+
+/* load blocks to registers and apply pre-whitening */
+#define inpack16_pre(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ rio) \
+ vmovdqu (0 * 16)(rio), x0; \
+ vmovdqu (1 * 16)(rio), x1; \
+ vmovdqu (2 * 16)(rio), x2; \
+ vmovdqu (3 * 16)(rio), x3; \
+ vmovdqu (4 * 16)(rio), x4; \
+ vmovdqu (5 * 16)(rio), x5; \
+ vmovdqu (6 * 16)(rio), x6; \
+ vmovdqu (7 * 16)(rio), x7; \
+ vmovdqu (8 * 16)(rio), y0; \
+ vmovdqu (9 * 16)(rio), y1; \
+ vmovdqu (10 * 16)(rio), y2; \
+ vmovdqu (11 * 16)(rio), y3; \
+ vmovdqu (12 * 16)(rio), y4; \
+ vmovdqu (13 * 16)(rio), y5; \
+ vmovdqu (14 * 16)(rio), y6; \
+ vmovdqu (15 * 16)(rio), y7;
+
+/* byteslice pre-whitened blocks and store to temporary memory */
+#define inpack16_post(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_ab, mem_cd) \
+ byteslice_16x16b(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ (mem_ab), (mem_cd)); \
+ \
+ vmovdqu x0, 0 * 16(mem_ab); \
+ vmovdqu x1, 1 * 16(mem_ab); \
+ vmovdqu x2, 2 * 16(mem_ab); \
+ vmovdqu x3, 3 * 16(mem_ab); \
+ vmovdqu x4, 4 * 16(mem_ab); \
+ vmovdqu x5, 5 * 16(mem_ab); \
+ vmovdqu x6, 6 * 16(mem_ab); \
+ vmovdqu x7, 7 * 16(mem_ab); \
+ vmovdqu y0, 0 * 16(mem_cd); \
+ vmovdqu y1, 1 * 16(mem_cd); \
+ vmovdqu y2, 2 * 16(mem_cd); \
+ vmovdqu y3, 3 * 16(mem_cd); \
+ vmovdqu y4, 4 * 16(mem_cd); \
+ vmovdqu y5, 5 * 16(mem_cd); \
+ vmovdqu y6, 6 * 16(mem_cd); \
+ vmovdqu y7, 7 * 16(mem_cd);
+
+#define write_output(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem) \
+ vmovdqu x0, 0 * 16(mem); \
+ vmovdqu x1, 1 * 16(mem); \
+ vmovdqu x2, 2 * 16(mem); \
+ vmovdqu x3, 3 * 16(mem); \
+ vmovdqu x4, 4 * 16(mem); \
+ vmovdqu x5, 5 * 16(mem); \
+ vmovdqu x6, 6 * 16(mem); \
+ vmovdqu x7, 7 * 16(mem); \
+ vmovdqu y0, 8 * 16(mem); \
+ vmovdqu y1, 9 * 16(mem); \
+ vmovdqu y2, 10 * 16(mem); \
+ vmovdqu y3, 11 * 16(mem); \
+ vmovdqu y4, 12 * 16(mem); \
+ vmovdqu y5, 13 * 16(mem); \
+ vmovdqu y6, 14 * 16(mem); \
+ vmovdqu y7, 15 * 16(mem); \
+
+#define aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, idx) \
+ vmovdqu x0, ((idx + 0) * 16)(mem_tmp); \
+ vmovdqu x1, ((idx + 1) * 16)(mem_tmp); \
+ vmovdqu x2, ((idx + 2) * 16)(mem_tmp); \
+ vmovdqu x3, ((idx + 3) * 16)(mem_tmp); \
+ vmovdqu x4, ((idx + 4) * 16)(mem_tmp); \
+ vmovdqu x5, ((idx + 5) * 16)(mem_tmp); \
+ vmovdqu x6, ((idx + 6) * 16)(mem_tmp); \
+ vmovdqu x7, ((idx + 7) * 16)(mem_tmp);
+
+#define aria_load_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, idx) \
+ vmovdqu ((idx + 0) * 16)(mem_tmp), x0; \
+ vmovdqu ((idx + 1) * 16)(mem_tmp), x1; \
+ vmovdqu ((idx + 2) * 16)(mem_tmp), x2; \
+ vmovdqu ((idx + 3) * 16)(mem_tmp), x3; \
+ vmovdqu ((idx + 4) * 16)(mem_tmp), x4; \
+ vmovdqu ((idx + 5) * 16)(mem_tmp), x5; \
+ vmovdqu ((idx + 6) * 16)(mem_tmp), x6; \
+ vmovdqu ((idx + 7) * 16)(mem_tmp), x7;
+
+#define aria_ark_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ t0, rk, idx, round) \
+ /* AddRoundKey */ \
+ vpbroadcastb ((round * 16) + idx + 3)(rk), t0; \
+ vpxor t0, x0, x0; \
+ vpbroadcastb ((round * 16) + idx + 2)(rk), t0; \
+ vpxor t0, x1, x1; \
+ vpbroadcastb ((round * 16) + idx + 1)(rk), t0; \
+ vpxor t0, x2, x2; \
+ vpbroadcastb ((round * 16) + idx + 0)(rk), t0; \
+ vpxor t0, x3, x3; \
+ vpbroadcastb ((round * 16) + idx + 7)(rk), t0; \
+ vpxor t0, x4, x4; \
+ vpbroadcastb ((round * 16) + idx + 6)(rk), t0; \
+ vpxor t0, x5, x5; \
+ vpbroadcastb ((round * 16) + idx + 5)(rk), t0; \
+ vpxor t0, x6, x6; \
+ vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \
+ vpxor t0, x7, x7;
+
+#define aria_sbox_8way_gfni(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ t0, t1, t2, t3, \
+ t4, t5, t6, t7) \
+ vpbroadcastq .Ltf_s2_bitmatrix, t0; \
+ vpbroadcastq .Ltf_inv_bitmatrix, t1; \
+ vpbroadcastq .Ltf_id_bitmatrix, t2; \
+ vpbroadcastq .Ltf_aff_bitmatrix, t3; \
+ vpbroadcastq .Ltf_x2_bitmatrix, t4; \
+ vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \
+ vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \
+ vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \
+ vgf2p8affineqb $(tf_inv_const), t1, x6, x6; \
+ vgf2p8affineinvqb $0, t2, x2, x2; \
+ vgf2p8affineinvqb $0, t2, x6, x6; \
+ vgf2p8affineinvqb $(tf_aff_const), t3, x0, x0; \
+ vgf2p8affineinvqb $(tf_aff_const), t3, x4, x4; \
+ vgf2p8affineqb $(tf_x2_const), t4, x3, x3; \
+ vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \
+ vgf2p8affineinvqb $0, t2, x3, x3; \
+ vgf2p8affineinvqb $0, t2, x7, x7
+
+#define aria_sbox_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ t0, t1, t2, t3, \
+ t4, t5, t6, t7) \
+ vpxor t7, t7, t7; \
+ vmovdqa .Linv_shift_row, t0; \
+ vmovdqa .Lshift_row, t1; \
+ vpbroadcastd .L0f0f0f0f, t6; \
+ vmovdqa .Ltf_lo__inv_aff__and__s2, t2; \
+ vmovdqa .Ltf_hi__inv_aff__and__s2, t3; \
+ vmovdqa .Ltf_lo__x2__and__fwd_aff, t4; \
+ vmovdqa .Ltf_hi__x2__and__fwd_aff, t5; \
+ \
+ vaesenclast t7, x0, x0; \
+ vaesenclast t7, x4, x4; \
+ vaesenclast t7, x1, x1; \
+ vaesenclast t7, x5, x5; \
+ vaesdeclast t7, x2, x2; \
+ vaesdeclast t7, x6, x6; \
+ \
+ /* AES inverse shift rows */ \
+ vpshufb t0, x0, x0; \
+ vpshufb t0, x4, x4; \
+ vpshufb t0, x1, x1; \
+ vpshufb t0, x5, x5; \
+ vpshufb t1, x3, x3; \
+ vpshufb t1, x7, x7; \
+ vpshufb t1, x2, x2; \
+ vpshufb t1, x6, x6; \
+ \
+ /* affine transformation for S2 */ \
+ filter_8bit(x1, t2, t3, t6, t0); \
+ /* affine transformation for S2 */ \
+ filter_8bit(x5, t2, t3, t6, t0); \
+ \
+ /* affine transformation for X2 */ \
+ filter_8bit(x3, t4, t5, t6, t0); \
+ /* affine transformation for X2 */ \
+ filter_8bit(x7, t4, t5, t6, t0); \
+ vaesdeclast t7, x3, x3; \
+ vaesdeclast t7, x7, x7;
+
+#define aria_diff_m(x0, x1, x2, x3, \
+ t0, t1, t2, t3) \
+ /* T = rotr32(X, 8); */ \
+ /* X ^= T */ \
+ vpxor x0, x3, t0; \
+ vpxor x1, x0, t1; \
+ vpxor x2, x1, t2; \
+ vpxor x3, x2, t3; \
+ /* X = T ^ rotr(X, 16); */ \
+ vpxor t2, x0, x0; \
+ vpxor x1, t3, t3; \
+ vpxor t0, x2, x2; \
+ vpxor t1, x3, x1; \
+ vmovdqu t3, x3;
+
+#define aria_diff_word(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7) \
+ /* t1 ^= t2; */ \
+ vpxor y0, x4, x4; \
+ vpxor y1, x5, x5; \
+ vpxor y2, x6, x6; \
+ vpxor y3, x7, x7; \
+ \
+ /* t2 ^= t3; */ \
+ vpxor y4, y0, y0; \
+ vpxor y5, y1, y1; \
+ vpxor y6, y2, y2; \
+ vpxor y7, y3, y3; \
+ \
+ /* t0 ^= t1; */ \
+ vpxor x4, x0, x0; \
+ vpxor x5, x1, x1; \
+ vpxor x6, x2, x2; \
+ vpxor x7, x3, x3; \
+ \
+ /* t3 ^= t1; */ \
+ vpxor x4, y4, y4; \
+ vpxor x5, y5, y5; \
+ vpxor x6, y6, y6; \
+ vpxor x7, y7, y7; \
+ \
+ /* t2 ^= t0; */ \
+ vpxor x0, y0, y0; \
+ vpxor x1, y1, y1; \
+ vpxor x2, y2, y2; \
+ vpxor x3, y3, y3; \
+ \
+ /* t1 ^= t2; */ \
+ vpxor y0, x4, x4; \
+ vpxor y1, x5, x5; \
+ vpxor y2, x6, x6; \
+ vpxor y3, x7, x7;
+
+#define aria_fe(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, rk, round) \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, round); \
+ \
+ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \
+ y0, y1, y2, y3, y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 8); \
+ \
+ aria_load_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, round); \
+ \
+ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \
+ y0, y1, y2, y3, y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_load_state_8way(y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, 8); \
+ aria_diff_word(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ /* aria_diff_byte() \
+ * T3 = ABCD -> BADC \
+ * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \
+ * T0 = ABCD -> CDAB \
+ * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \
+ * T1 = ABCD -> DCBA \
+ * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \
+ */ \
+ aria_diff_word(x2, x3, x0, x1, \
+ x7, x6, x5, x4, \
+ y0, y1, y2, y3, \
+ y5, y4, y7, y6); \
+ aria_store_state_8way(x3, x2, x1, x0, \
+ x6, x7, x4, x5, \
+ mem_tmp, 0);
+
+#define aria_fo(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, rk, round) \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, round); \
+ \
+ aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, y1, y2, y3, y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 8); \
+ \
+ aria_load_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, round); \
+ \
+ aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, y1, y2, y3, y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_load_state_8way(y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, 8); \
+ aria_diff_word(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ /* aria_diff_byte() \
+ * T1 = ABCD -> BADC \
+ * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \
+ * T2 = ABCD -> CDAB \
+ * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \
+ * T3 = ABCD -> DCBA \
+ * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \
+ */ \
+ aria_diff_word(x0, x1, x2, x3, \
+ x5, x4, x7, x6, \
+ y2, y3, y0, y1, \
+ y7, y6, y5, y4); \
+ aria_store_state_8way(x3, x2, x1, x0, \
+ x6, x7, x4, x5, \
+ mem_tmp, 0);
+
+#define aria_ff(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, rk, round, last_round) \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, round); \
+ \
+ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \
+ y0, y1, y2, y3, y4, y5, y6, y7); \
+ \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, last_round); \
+ \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 8); \
+ \
+ aria_load_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, round); \
+ \
+ aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \
+ y0, y1, y2, y3, y4, y5, y6, y7); \
+ \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, last_round); \
+ \
+ aria_load_state_8way(y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, 8);
+
+#define aria_fe_gfni(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, rk, round) \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, round); \
+ \
+ aria_sbox_8way_gfni(x2, x3, x0, x1, \
+ x6, x7, x4, x5, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 8); \
+ \
+ aria_load_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, round); \
+ \
+ aria_sbox_8way_gfni(x2, x3, x0, x1, \
+ x6, x7, x4, x5, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_load_state_8way(y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, 8); \
+ aria_diff_word(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ /* aria_diff_byte() \
+ * T3 = ABCD -> BADC \
+ * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \
+ * T0 = ABCD -> CDAB \
+ * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \
+ * T1 = ABCD -> DCBA \
+ * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \
+ */ \
+ aria_diff_word(x2, x3, x0, x1, \
+ x7, x6, x5, x4, \
+ y0, y1, y2, y3, \
+ y5, y4, y7, y6); \
+ aria_store_state_8way(x3, x2, x1, x0, \
+ x6, x7, x4, x5, \
+ mem_tmp, 0);
+
+#define aria_fo_gfni(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, rk, round) \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, round); \
+ \
+ aria_sbox_8way_gfni(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 8); \
+ \
+ aria_load_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, round); \
+ \
+ aria_sbox_8way_gfni(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ \
+ aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \
+ aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_load_state_8way(y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, 8); \
+ aria_diff_word(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ /* aria_diff_byte() \
+ * T1 = ABCD -> BADC \
+ * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \
+ * T2 = ABCD -> CDAB \
+ * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \
+ * T3 = ABCD -> DCBA \
+ * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \
+ */ \
+ aria_diff_word(x0, x1, x2, x3, \
+ x5, x4, x7, x6, \
+ y2, y3, y0, y1, \
+ y7, y6, y5, y4); \
+ aria_store_state_8way(x3, x2, x1, x0, \
+ x6, x7, x4, x5, \
+ mem_tmp, 0);
+
+#define aria_ff_gfni(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, rk, round, last_round) \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, round); \
+ \
+ aria_sbox_8way_gfni(x2, x3, x0, x1, \
+ x6, x7, x4, x5, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 8, last_round); \
+ \
+ aria_store_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 8); \
+ \
+ aria_load_state_8way(x0, x1, x2, x3, \
+ x4, x5, x6, x7, \
+ mem_tmp, 0); \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, round); \
+ \
+ aria_sbox_8way_gfni(x2, x3, x0, x1, \
+ x6, x7, x4, x5, \
+ y0, y1, y2, y3, \
+ y4, y5, y6, y7); \
+ \
+ aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \
+ y0, rk, 0, last_round); \
+ \
+ aria_load_state_8way(y0, y1, y2, y3, \
+ y4, y5, y6, y7, \
+ mem_tmp, 8);
+
+/* NB: section is mergeable, all elements must be aligned 16-byte blocks */
+.section .rodata.cst16, "aM", @progbits, 16
+.align 16
+
+#define SHUFB_BYTES(idx) \
+ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx)
+
+.Lshufb_16x16b:
+ .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3);
+/* For isolating SubBytes from AESENCLAST, inverse shift row */
+.Linv_shift_row:
+ .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
+ .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
+.Lshift_row:
+ .byte 0x00, 0x05, 0x0a, 0x0f, 0x04, 0x09, 0x0e, 0x03
+ .byte 0x08, 0x0d, 0x02, 0x07, 0x0c, 0x01, 0x06, 0x0b
+/* For CTR-mode IV byteswap */
+.Lbswap128_mask:
+ .byte 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08
+ .byte 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
+
+/* AES inverse affine and S2 combined:
+ * 1 1 0 0 0 0 0 1 x0 0
+ * 0 1 0 0 1 0 0 0 x1 0
+ * 1 1 0 0 1 1 1 1 x2 0
+ * 0 1 1 0 1 0 0 1 x3 1
+ * 0 1 0 0 1 1 0 0 * x4 + 0
+ * 0 1 0 1 1 0 0 0 x5 0
+ * 0 0 0 0 0 1 0 1 x6 0
+ * 1 1 1 0 0 1 1 1 x7 1
+ */
+.Ltf_lo__inv_aff__and__s2:
+ .octa 0x92172DA81A9FA520B2370D883ABF8500
+.Ltf_hi__inv_aff__and__s2:
+ .octa 0x2B15FFC1AF917B45E6D8320C625CB688
+
+/* X2 and AES forward affine combined:
+ * 1 0 1 1 0 0 0 1 x0 0
+ * 0 1 1 1 1 0 1 1 x1 0
+ * 0 0 0 1 1 0 1 0 x2 1
+ * 0 1 0 0 0 1 0 0 x3 0
+ * 0 0 1 1 1 0 1 1 * x4 + 0
+ * 0 1 0 0 1 0 0 0 x5 0
+ * 1 1 0 1 0 0 1 1 x6 0
+ * 0 1 0 0 1 0 1 0 x7 0
+ */
+.Ltf_lo__x2__and__fwd_aff:
+ .octa 0xEFAE0544FCBD1657B8F95213ABEA4100
+.Ltf_hi__x2__and__fwd_aff:
+ .octa 0x3F893781E95FE1576CDA64D2BA0CB204
+
+.section .rodata.cst8, "aM", @progbits, 8
+.align 8
+/* AES affine: */
+#define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0)
+.Ltf_aff_bitmatrix:
+ .quad BM8X8(BV8(1, 0, 0, 0, 1, 1, 1, 1),
+ BV8(1, 1, 0, 0, 0, 1, 1, 1),
+ BV8(1, 1, 1, 0, 0, 0, 1, 1),
+ BV8(1, 1, 1, 1, 0, 0, 0, 1),
+ BV8(1, 1, 1, 1, 1, 0, 0, 0),
+ BV8(0, 1, 1, 1, 1, 1, 0, 0),
+ BV8(0, 0, 1, 1, 1, 1, 1, 0),
+ BV8(0, 0, 0, 1, 1, 1, 1, 1))
+
+/* AES inverse affine: */
+#define tf_inv_const BV8(1, 0, 1, 0, 0, 0, 0, 0)
+.Ltf_inv_bitmatrix:
+ .quad BM8X8(BV8(0, 0, 1, 0, 0, 1, 0, 1),
+ BV8(1, 0, 0, 1, 0, 0, 1, 0),
+ BV8(0, 1, 0, 0, 1, 0, 0, 1),
+ BV8(1, 0, 1, 0, 0, 1, 0, 0),
+ BV8(0, 1, 0, 1, 0, 0, 1, 0),
+ BV8(0, 0, 1, 0, 1, 0, 0, 1),
+ BV8(1, 0, 0, 1, 0, 1, 0, 0),
+ BV8(0, 1, 0, 0, 1, 0, 1, 0))
+
+/* S2: */
+#define tf_s2_const BV8(0, 1, 0, 0, 0, 1, 1, 1)
+.Ltf_s2_bitmatrix:
+ .quad BM8X8(BV8(0, 1, 0, 1, 0, 1, 1, 1),
+ BV8(0, 0, 1, 1, 1, 1, 1, 1),
+ BV8(1, 1, 1, 0, 1, 1, 0, 1),
+ BV8(1, 1, 0, 0, 0, 0, 1, 1),
+ BV8(0, 1, 0, 0, 0, 0, 1, 1),
+ BV8(1, 1, 0, 0, 1, 1, 1, 0),
+ BV8(0, 1, 1, 0, 0, 0, 1, 1),
+ BV8(1, 1, 1, 1, 0, 1, 1, 0))
+
+/* X2: */
+#define tf_x2_const BV8(0, 0, 1, 1, 0, 1, 0, 0)
+.Ltf_x2_bitmatrix:
+ .quad BM8X8(BV8(0, 0, 0, 1, 1, 0, 0, 0),
+ BV8(0, 0, 1, 0, 0, 1, 1, 0),
+ BV8(0, 0, 0, 0, 1, 0, 1, 0),
+ BV8(1, 1, 1, 0, 0, 0, 1, 1),
+ BV8(1, 1, 1, 0, 1, 1, 0, 0),
+ BV8(0, 1, 1, 0, 1, 0, 1, 1),
+ BV8(1, 0, 1, 1, 1, 1, 0, 1),
+ BV8(1, 0, 0, 1, 0, 0, 1, 1))
+
+/* Identity matrix: */
+.Ltf_id_bitmatrix:
+ .quad BM8X8(BV8(1, 0, 0, 0, 0, 0, 0, 0),
+ BV8(0, 1, 0, 0, 0, 0, 0, 0),
+ BV8(0, 0, 1, 0, 0, 0, 0, 0),
+ BV8(0, 0, 0, 1, 0, 0, 0, 0),
+ BV8(0, 0, 0, 0, 1, 0, 0, 0),
+ BV8(0, 0, 0, 0, 0, 1, 0, 0),
+ BV8(0, 0, 0, 0, 0, 0, 1, 0),
+ BV8(0, 0, 0, 0, 0, 0, 0, 1))
+
+/* 4-bit mask */
+.section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4
+.align 4
+.L0f0f0f0f:
+ .long 0x0f0f0f0f
+
+.text
+
+SYM_FUNC_START_LOCAL(__aria_aesni_avx_crypt_16way)
+ /* input:
+ * %r9: rk
+ * %rsi: dst
+ * %rdx: src
+ * %xmm0..%xmm15: 16 byte-sliced blocks
+ */
+
+ FRAME_BEGIN
+
+ movq %rsi, %rax;
+ leaq 8 * 16(%rax), %r8;
+
+ inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r8);
+ aria_fo(%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 0);
+ aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 1);
+ aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 2);
+ aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 3);
+ aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 4);
+ aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 5);
+ aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 6);
+ aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 7);
+ aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 8);
+ aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 9);
+ aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 10);
+ cmpl $12, rounds(CTX);
+ jne .Laria_192;
+ aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 11, 12);
+ jmp .Laria_end;
+.Laria_192:
+ aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 11);
+ aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 12);
+ cmpl $14, rounds(CTX);
+ jne .Laria_256;
+ aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 13, 14);
+ jmp .Laria_end;
+.Laria_256:
+ aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 13);
+ aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 14);
+ aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 15, 16);
+.Laria_end:
+ debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4,
+ %xmm9, %xmm13, %xmm0, %xmm5,
+ %xmm10, %xmm14, %xmm3, %xmm6,
+ %xmm11, %xmm15, %xmm2, %xmm7,
+ (%rax), (%r8));
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(__aria_aesni_avx_crypt_16way)
+
+SYM_FUNC_START(aria_aesni_avx_encrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ FRAME_BEGIN
+
+ leaq enc_key(CTX), %r9;
+
+ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rdx);
+
+ call __aria_aesni_avx_crypt_16way;
+
+ write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax);
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(aria_aesni_avx_encrypt_16way)
+
+SYM_FUNC_START(aria_aesni_avx_decrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ FRAME_BEGIN
+
+ leaq dec_key(CTX), %r9;
+
+ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rdx);
+
+ call __aria_aesni_avx_crypt_16way;
+
+ write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax);
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(aria_aesni_avx_decrypt_16way)
+
+SYM_FUNC_START_LOCAL(__aria_aesni_avx_ctr_gen_keystream_16way)
+ /* input:
+ * %rdi: ctx
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: keystream
+ * %r8: iv (big endian, 128bit)
+ */
+
+ FRAME_BEGIN
+ /* load IV and byteswap */
+ vmovdqu (%r8), %xmm8;
+
+ vmovdqa .Lbswap128_mask (%rip), %xmm1;
+ vpshufb %xmm1, %xmm8, %xmm3; /* be => le */
+
+ vpcmpeqd %xmm0, %xmm0, %xmm0;
+ vpsrldq $8, %xmm0, %xmm0; /* low: -1, high: 0 */
+
+ /* construct IVs */
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm9;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm10;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm11;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm12;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm13;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm14;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm15;
+ vmovdqu %xmm8, (0 * 16)(%rcx);
+ vmovdqu %xmm9, (1 * 16)(%rcx);
+ vmovdqu %xmm10, (2 * 16)(%rcx);
+ vmovdqu %xmm11, (3 * 16)(%rcx);
+ vmovdqu %xmm12, (4 * 16)(%rcx);
+ vmovdqu %xmm13, (5 * 16)(%rcx);
+ vmovdqu %xmm14, (6 * 16)(%rcx);
+ vmovdqu %xmm15, (7 * 16)(%rcx);
+
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm8;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm9;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm10;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm11;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm12;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm13;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm14;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm15;
+ inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */
+ vpshufb %xmm1, %xmm3, %xmm4;
+ vmovdqu %xmm4, (%r8);
+
+ vmovdqu (0 * 16)(%rcx), %xmm0;
+ vmovdqu (1 * 16)(%rcx), %xmm1;
+ vmovdqu (2 * 16)(%rcx), %xmm2;
+ vmovdqu (3 * 16)(%rcx), %xmm3;
+ vmovdqu (4 * 16)(%rcx), %xmm4;
+ vmovdqu (5 * 16)(%rcx), %xmm5;
+ vmovdqu (6 * 16)(%rcx), %xmm6;
+ vmovdqu (7 * 16)(%rcx), %xmm7;
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(__aria_aesni_avx_ctr_gen_keystream_16way)
+
+SYM_FUNC_START(aria_aesni_avx_ctr_crypt_16way)
+ /* input:
+ * %rdi: ctx
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: keystream
+ * %r8: iv (big endian, 128bit)
+ */
+ FRAME_BEGIN
+
+ call __aria_aesni_avx_ctr_gen_keystream_16way;
+
+ leaq (%rsi), %r10;
+ leaq (%rdx), %r11;
+ leaq (%rcx), %rsi;
+ leaq (%rcx), %rdx;
+ leaq enc_key(CTX), %r9;
+
+ call __aria_aesni_avx_crypt_16way;
+
+ vpxor (0 * 16)(%r11), %xmm1, %xmm1;
+ vpxor (1 * 16)(%r11), %xmm0, %xmm0;
+ vpxor (2 * 16)(%r11), %xmm3, %xmm3;
+ vpxor (3 * 16)(%r11), %xmm2, %xmm2;
+ vpxor (4 * 16)(%r11), %xmm4, %xmm4;
+ vpxor (5 * 16)(%r11), %xmm5, %xmm5;
+ vpxor (6 * 16)(%r11), %xmm6, %xmm6;
+ vpxor (7 * 16)(%r11), %xmm7, %xmm7;
+ vpxor (8 * 16)(%r11), %xmm8, %xmm8;
+ vpxor (9 * 16)(%r11), %xmm9, %xmm9;
+ vpxor (10 * 16)(%r11), %xmm10, %xmm10;
+ vpxor (11 * 16)(%r11), %xmm11, %xmm11;
+ vpxor (12 * 16)(%r11), %xmm12, %xmm12;
+ vpxor (13 * 16)(%r11), %xmm13, %xmm13;
+ vpxor (14 * 16)(%r11), %xmm14, %xmm14;
+ vpxor (15 * 16)(%r11), %xmm15, %xmm15;
+ write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %r10);
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(aria_aesni_avx_ctr_crypt_16way)
+
+SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way)
+ /* input:
+ * %r9: rk
+ * %rsi: dst
+ * %rdx: src
+ * %xmm0..%xmm15: 16 byte-sliced blocks
+ */
+
+ FRAME_BEGIN
+
+ movq %rsi, %rax;
+ leaq 8 * 16(%rax), %r8;
+
+ inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r8);
+ aria_fo_gfni(%xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 0);
+ aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 1);
+ aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 2);
+ aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 3);
+ aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 4);
+ aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 5);
+ aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 6);
+ aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 7);
+ aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 8);
+ aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 9);
+ aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 10);
+ cmpl $12, rounds(CTX);
+ jne .Laria_gfni_192;
+ aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 11, 12);
+ jmp .Laria_gfni_end;
+.Laria_gfni_192:
+ aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 11);
+ aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 12);
+ cmpl $14, rounds(CTX);
+ jne .Laria_gfni_256;
+ aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 13, 14);
+ jmp .Laria_gfni_end;
+.Laria_gfni_256:
+ aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 13);
+ aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10,
+ %xmm12, %xmm13, %xmm14, %xmm15,
+ %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %rax, %r9, 14);
+ aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2,
+ %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11,
+ %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax, %r9, 15, 16);
+.Laria_gfni_end:
+ debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4,
+ %xmm9, %xmm13, %xmm0, %xmm5,
+ %xmm10, %xmm14, %xmm3, %xmm6,
+ %xmm11, %xmm15, %xmm2, %xmm7,
+ (%rax), (%r8));
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(__aria_aesni_avx_gfni_crypt_16way)
+
+SYM_FUNC_START(aria_aesni_avx_gfni_encrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ FRAME_BEGIN
+
+ leaq enc_key(CTX), %r9;
+
+ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rdx);
+
+ call __aria_aesni_avx_gfni_crypt_16way;
+
+ write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax);
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(aria_aesni_avx_gfni_encrypt_16way)
+
+SYM_FUNC_START(aria_aesni_avx_gfni_decrypt_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+ * %rdx: src
+ */
+
+ FRAME_BEGIN
+
+ leaq dec_key(CTX), %r9;
+
+ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rdx);
+
+ call __aria_aesni_avx_gfni_crypt_16way;
+
+ write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %rax);
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(aria_aesni_avx_gfni_decrypt_16way)
+
+SYM_FUNC_START(aria_aesni_avx_gfni_ctr_crypt_16way)
+ /* input:
+ * %rdi: ctx
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: keystream
+ * %r8: iv (big endian, 128bit)
+ */
+ FRAME_BEGIN
+
+ call __aria_aesni_avx_ctr_gen_keystream_16way
+
+ leaq (%rsi), %r10;
+ leaq (%rdx), %r11;
+ leaq (%rcx), %rsi;
+ leaq (%rcx), %rdx;
+ leaq enc_key(CTX), %r9;
+
+ call __aria_aesni_avx_gfni_crypt_16way;
+
+ vpxor (0 * 16)(%r11), %xmm1, %xmm1;
+ vpxor (1 * 16)(%r11), %xmm0, %xmm0;
+ vpxor (2 * 16)(%r11), %xmm3, %xmm3;
+ vpxor (3 * 16)(%r11), %xmm2, %xmm2;
+ vpxor (4 * 16)(%r11), %xmm4, %xmm4;
+ vpxor (5 * 16)(%r11), %xmm5, %xmm5;
+ vpxor (6 * 16)(%r11), %xmm6, %xmm6;
+ vpxor (7 * 16)(%r11), %xmm7, %xmm7;
+ vpxor (8 * 16)(%r11), %xmm8, %xmm8;
+ vpxor (9 * 16)(%r11), %xmm9, %xmm9;
+ vpxor (10 * 16)(%r11), %xmm10, %xmm10;
+ vpxor (11 * 16)(%r11), %xmm11, %xmm11;
+ vpxor (12 * 16)(%r11), %xmm12, %xmm12;
+ vpxor (13 * 16)(%r11), %xmm13, %xmm13;
+ vpxor (14 * 16)(%r11), %xmm14, %xmm14;
+ vpxor (15 * 16)(%r11), %xmm15, %xmm15;
+ write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, %r10);
+
+ FRAME_END
+ RET;
+SYM_FUNC_END(aria_aesni_avx_gfni_ctr_crypt_16way)
diff --git a/arch/x86/crypto/aria-avx.h b/arch/x86/crypto/aria-avx.h
new file mode 100644
index 000000000000..01e9a01dc157
--- /dev/null
+++ b/arch/x86/crypto/aria-avx.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef ASM_X86_ARIA_AVX_H
+#define ASM_X86_ARIA_AVX_H
+
+#include <linux/types.h>
+
+#define ARIA_AESNI_PARALLEL_BLOCKS 16
+#define ARIA_AESNI_PARALLEL_BLOCK_SIZE (ARIA_BLOCK_SIZE * 16)
+
+struct aria_avx_ops {
+ void (*aria_encrypt_16way)(const void *ctx, u8 *dst, const u8 *src);
+ void (*aria_decrypt_16way)(const void *ctx, u8 *dst, const u8 *src);
+ void (*aria_ctr_crypt_16way)(const void *ctx, u8 *dst, const u8 *src,
+ u8 *keystream, u8 *iv);
+};
+#endif
diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c
new file mode 100644
index 000000000000..c561ea4fefa5
--- /dev/null
+++ b/arch/x86/crypto/aria_aesni_avx_glue.c
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Glue Code for the AVX/AES-NI/GFNI assembler implementation of the ARIA Cipher
+ *
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/aria.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "ecb_cbc_helpers.h"
+#include "aria-avx.h"
+
+asmlinkage void aria_aesni_avx_encrypt_16way(const void *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void aria_aesni_avx_decrypt_16way(const void *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void aria_aesni_avx_ctr_crypt_16way(const void *ctx, u8 *dst,
+ const u8 *src,
+ u8 *keystream, u8 *iv);
+asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void aria_aesni_avx_gfni_decrypt_16way(const void *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void aria_aesni_avx_gfni_ctr_crypt_16way(const void *ctx, u8 *dst,
+ const u8 *src,
+ u8 *keystream, u8 *iv);
+
+static struct aria_avx_ops aria_ops;
+
+static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey)
+{
+ ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
+ ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_encrypt_16way);
+ ECB_BLOCK(1, aria_encrypt);
+ ECB_WALK_END();
+}
+
+static int ecb_do_decrypt(struct skcipher_request *req, const u32 *rkey)
+{
+ ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
+ ECB_BLOCK(ARIA_AESNI_PARALLEL_BLOCKS, aria_ops.aria_decrypt_16way);
+ ECB_BLOCK(1, aria_decrypt);
+ ECB_WALK_END();
+}
+
+static int aria_avx_ecb_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return ecb_do_encrypt(req, ctx->enc_key[0]);
+}
+
+static int aria_avx_ecb_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return ecb_do_decrypt(req, ctx->dec_key[0]);
+}
+
+static int aria_avx_set_key(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return aria_set_key(&tfm->base, key, keylen);
+}
+
+static int aria_avx_ctr_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = walk.nbytes) > 0) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+
+ while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
+ u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
+
+ kernel_fpu_begin();
+ aria_ops.aria_ctr_crypt_16way(ctx, dst, src, keystream,
+ walk.iv);
+ kernel_fpu_end();
+ dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
+ src += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
+ nbytes -= ARIA_AESNI_PARALLEL_BLOCK_SIZE;
+ }
+
+ while (nbytes >= ARIA_BLOCK_SIZE) {
+ u8 keystream[ARIA_BLOCK_SIZE];
+
+ memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
+ crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
+
+ aria_encrypt(ctx, keystream, keystream);
+
+ crypto_xor_cpy(dst, src, keystream, ARIA_BLOCK_SIZE);
+ dst += ARIA_BLOCK_SIZE;
+ src += ARIA_BLOCK_SIZE;
+ nbytes -= ARIA_BLOCK_SIZE;
+ }
+
+ if (walk.nbytes == walk.total && nbytes > 0) {
+ u8 keystream[ARIA_BLOCK_SIZE];
+
+ memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
+ crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
+
+ aria_encrypt(ctx, keystream, keystream);
+
+ crypto_xor_cpy(dst, src, keystream, nbytes);
+ dst += nbytes;
+ src += nbytes;
+ nbytes = 0;
+ }
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ return err;
+}
+
+static struct skcipher_alg aria_algs[] = {
+ {
+ .base.cra_name = "__ecb(aria)",
+ .base.cra_driver_name = "__ecb-aria-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = ARIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aria_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = ARIA_MIN_KEY_SIZE,
+ .max_keysize = ARIA_MAX_KEY_SIZE,
+ .setkey = aria_avx_set_key,
+ .encrypt = aria_avx_ecb_encrypt,
+ .decrypt = aria_avx_ecb_decrypt,
+ }, {
+ .base.cra_name = "__ctr(aria)",
+ .base.cra_driver_name = "__ctr-aria-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct aria_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = ARIA_MIN_KEY_SIZE,
+ .max_keysize = ARIA_MAX_KEY_SIZE,
+ .ivsize = ARIA_BLOCK_SIZE,
+ .chunksize = ARIA_BLOCK_SIZE,
+ .walksize = 16 * ARIA_BLOCK_SIZE,
+ .setkey = aria_avx_set_key,
+ .encrypt = aria_avx_ctr_encrypt,
+ .decrypt = aria_avx_ctr_encrypt,
+ }
+};
+
+static struct simd_skcipher_alg *aria_simd_algs[ARRAY_SIZE(aria_algs)];
+
+static int __init aria_avx_init(void)
+{
+ const char *feature_name;
+
+ if (!boot_cpu_has(X86_FEATURE_AVX) ||
+ !boot_cpu_has(X86_FEATURE_AES) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+ pr_info("AVX or AES-NI instructions are not detected.\n");
+ return -ENODEV;
+ }
+
+ if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+ &feature_name)) {
+ pr_info("CPU feature '%s' is not supported.\n", feature_name);
+ return -ENODEV;
+ }
+
+ if (boot_cpu_has(X86_FEATURE_GFNI)) {
+ aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way;
+ aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way;
+ aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
+ } else {
+ aria_ops.aria_encrypt_16way = aria_aesni_avx_encrypt_16way;
+ aria_ops.aria_decrypt_16way = aria_aesni_avx_decrypt_16way;
+ aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_ctr_crypt_16way;
+ }
+
+ return simd_register_skciphers_compat(aria_algs,
+ ARRAY_SIZE(aria_algs),
+ aria_simd_algs);
+}
+
+static void __exit aria_avx_exit(void)
+{
+ simd_unregister_skciphers(aria_algs, ARRAY_SIZE(aria_algs),
+ aria_simd_algs);
+}
+
+module_init(aria_avx_init);
+module_exit(aria_avx_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
+MODULE_DESCRIPTION("ARIA Cipher Algorithm, AVX/AES-NI/GFNI optimized");
+MODULE_ALIAS_CRYPTO("aria");
+MODULE_ALIAS_CRYPTO("aria-aesni-avx");
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 30e70f4fe2f7..6d3b85e53d0e 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
+#include <asm/cpu_device_id.h>
#include <asm/simd.h>
asmlinkage void sha512_transform_ssse3(struct sha512_state *state,
@@ -284,6 +285,13 @@ static int register_sha512_avx2(void)
ARRAY_SIZE(sha512_avx2_algs));
return 0;
}
+static const struct x86_cpu_id module_cpu_ids[] = {
+ X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
+ X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
+ X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
static void unregister_sha512_avx2(void)
{
@@ -294,6 +302,8 @@ static void unregister_sha512_avx2(void)
static int __init sha512_ssse3_mod_init(void)
{
+ if (!x86_match_cpu(module_cpu_ids))
+ return -ENODEV;
if (register_sha512_ssse3())
goto fail;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bb427a835e44..2589ad5357df 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -15,13 +15,13 @@ source "crypto/async_tx/Kconfig"
#
menuconfig CRYPTO
tristate "Cryptographic API"
- select LIB_MEMNEQ
+ select CRYPTO_LIB_UTILS
help
This option provides the core Cryptographic API.
if CRYPTO
-comment "Crypto core or helper"
+menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
@@ -219,7 +219,8 @@ config CRYPTO_AUTHENC
select CRYPTO_NULL
help
Authenc: Combined mode wrapper for IPsec.
- This is required for IPSec.
+
+ This is required for IPSec ESP (XFRM_ESP).
config CRYPTO_TEST
tristate "Testing module"
@@ -235,54 +236,65 @@ config CRYPTO_SIMD
config CRYPTO_ENGINE
tristate
-comment "Public-key cryptography"
+endmenu
+
+menu "Public-key cryptography"
config CRYPTO_RSA
- tristate "RSA algorithm"
+ tristate "RSA (Rivest-Shamir-Adleman)"
select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
select MPILIB
select ASN1
help
- Generic implementation of the RSA public key algorithm.
+ RSA (Rivest-Shamir-Adleman) public key algorithm (RFC8017)
config CRYPTO_DH
- tristate "Diffie-Hellman algorithm"
+ tristate "DH (Diffie-Hellman)"
select CRYPTO_KPP
select MPILIB
help
- Generic implementation of the Diffie-Hellman algorithm.
+ DH (Diffie-Hellman) key exchange algorithm
config CRYPTO_DH_RFC7919_GROUPS
- bool "Support for RFC 7919 FFDHE group parameters"
+ bool "RFC 7919 FFDHE groups"
depends on CRYPTO_DH
select CRYPTO_RNG_DEFAULT
help
- Provide support for RFC 7919 FFDHE group parameters. If unsure, say N.
+ FFDHE (Finite-Field-based Diffie-Hellman Ephemeral) groups
+ defined in RFC7919.
+
+ Support these finite-field groups in DH key exchanges:
+ - ffdhe2048, ffdhe3072, ffdhe4096, ffdhe6144, ffdhe8192
+
+ If unsure, say N.
config CRYPTO_ECC
tristate
select CRYPTO_RNG_DEFAULT
config CRYPTO_ECDH
- tristate "ECDH algorithm"
+ tristate "ECDH (Elliptic Curve Diffie-Hellman)"
select CRYPTO_ECC
select CRYPTO_KPP
help
- Generic implementation of the ECDH algorithm
+ ECDH (Elliptic Curve Diffie-Hellman) key exchange algorithm
+ using curves P-192, P-256, and P-384 (FIPS 186)
config CRYPTO_ECDSA
- tristate "ECDSA (NIST P192, P256 etc.) algorithm"
+ tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)"
select CRYPTO_ECC
select CRYPTO_AKCIPHER
select ASN1
help
- Elliptic Curve Digital Signature Algorithm (NIST P192, P256 etc.)
- is A NIST cryptographic standard algorithm. Only signature verification
- is implemented.
+ ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186,
+ ISO/IEC 14888-3)
+ using curves P-192, P-256, and P-384
+
+ Only signature verification is implemented.
config CRYPTO_ECRDSA
- tristate "EC-RDSA (GOST 34.10) algorithm"
+ tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)"
select CRYPTO_ECC
select CRYPTO_AKCIPHER
select CRYPTO_STREEBOG
@@ -290,184 +302,441 @@ config CRYPTO_ECRDSA
select ASN1
help
Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012,
- RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic
- standard algorithms (called GOST algorithms). Only signature verification
- is implemented.
+ RFC 7091, ISO/IEC 14888-3)
+
+ One of the Russian cryptographic standard algorithms (called GOST
+ algorithms). Only signature verification is implemented.
config CRYPTO_SM2
- tristate "SM2 algorithm"
+ tristate "SM2 (ShangMi 2)"
select CRYPTO_SM3
select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
select MPILIB
select ASN1
help
- Generic implementation of the SM2 public key algorithm. It was
- published by State Encryption Management Bureau, China.
+ SM2 (ShangMi 2) public key algorithm
+
+ Published by State Encryption Management Bureau, China,
as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012.
References:
- https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
+ https://datatracker.ietf.org/doc/draft-shen-sm2-ecdsa/
http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml
http://www.gmbz.org.cn/main/bzlb.html
config CRYPTO_CURVE25519
- tristate "Curve25519 algorithm"
+ tristate "Curve25519"
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
+ help
+ Curve25519 elliptic curve (RFC7748)
-config CRYPTO_CURVE25519_X86
- tristate "x86_64 accelerated Curve25519 scalar multiplication library"
- depends on X86 && 64BIT
- select CRYPTO_LIB_CURVE25519_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CURVE25519
+endmenu
-comment "Authenticated Encryption with Associated Data"
+menu "Block ciphers"
-config CRYPTO_CCM
- tristate "CCM support"
- select CRYPTO_CTR
- select CRYPTO_HASH
- select CRYPTO_AEAD
- select CRYPTO_MANAGER
+config CRYPTO_AES
+ tristate "AES (Advanced Encryption Standard)"
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_AES
help
- Support for Counter with CBC MAC. Required for IPsec.
+ AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3)
-config CRYPTO_GCM
- tristate "GCM/GMAC support"
- select CRYPTO_CTR
- select CRYPTO_AEAD
- select CRYPTO_GHASH
- select CRYPTO_NULL
- select CRYPTO_MANAGER
+ Rijndael appears to be consistently a very good performer in
+ both hardware and software across a wide range of computing
+ environments regardless of its use in feedback or non-feedback
+ modes. Its key setup time is excellent, and its key agility is
+ good. Rijndael's very low memory requirements make it very well
+ suited for restricted-space environments, in which it also
+ demonstrates excellent performance. Rijndael's operations are
+ among the easiest to defend against power and timing attacks.
+
+ The AES specifies three key sizes: 128, 192 and 256 bits
+
+config CRYPTO_AES_TI
+ tristate "AES (Advanced Encryption Standard) (fixed time)"
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_AES
help
- Support for Galois/Counter Mode (GCM) and Galois Message
- Authentication Code (GMAC). Required for IPSec.
+ AES cipher algorithms (Rijndael)(FIPS-197, ISO/IEC 18033-3)
-config CRYPTO_CHACHA20POLY1305
- tristate "ChaCha20-Poly1305 AEAD support"
- select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
- select CRYPTO_AEAD
- select CRYPTO_MANAGER
+ This is a generic implementation of AES that attempts to eliminate
+ data dependent latencies as much as possible without affecting
+ performance too much. It is intended for use by the generic CCM
+ and GCM drivers, and other CTR or CMAC/XCBC based modes that rely
+ solely on encryption (although decryption is supported as well, but
+ with a more dramatic performance hit)
+
+ Instead of using 16 lookup tables of 1 KB each, (8 for encryption and
+ 8 for decryption), this implementation only uses just two S-boxes of
+ 256 bytes each, and attempts to eliminate data dependent latencies by
+ prefetching the entire table into the cache at the start of each
+ block. Interrupts are also disabled to avoid races where cachelines
+ are evicted when the CPU is interrupted to do something else.
+
+config CRYPTO_ANUBIS
+ tristate "Anubis"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
+ select CRYPTO_ALGAPI
help
- ChaCha20-Poly1305 AEAD support, RFC7539.
+ Anubis cipher algorithm
- Support for the AEAD wrapper using the ChaCha20 stream cipher combined
- with the Poly1305 authenticator. It is defined in RFC7539 for use in
- IETF protocols.
+ Anubis is a variable key length cipher which can use keys from
+ 128 bits to 320 bits in length. It was evaluated as a entrant
+ in the NESSIE competition.
-config CRYPTO_AEGIS128
- tristate "AEGIS-128 AEAD algorithm"
- select CRYPTO_AEAD
- select CRYPTO_AES # for AES S-box tables
+ See https://web.archive.org/web/20160606112246/http://www.larc.usp.br/~pbarreto/AnubisPage.html
+ for further information.
+
+config CRYPTO_ARIA
+ tristate "ARIA"
+ select CRYPTO_ALGAPI
help
- Support for the AEGIS-128 dedicated AEAD algorithm.
+ ARIA cipher algorithm (RFC5794)
-config CRYPTO_AEGIS128_SIMD
- bool "Support SIMD acceleration for AEGIS-128"
- depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
- default y
+ ARIA is a standard encryption algorithm of the Republic of Korea.
+ The ARIA specifies three key sizes and rounds.
+ 128-bit: 12 rounds.
+ 192-bit: 14 rounds.
+ 256-bit: 16 rounds.
-config CRYPTO_AEGIS128_AESNI_SSE2
- tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
- depends on X86 && 64BIT
- select CRYPTO_AEAD
- select CRYPTO_SIMD
+ See:
+ https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do
+
+config CRYPTO_BLOWFISH
+ tristate "Blowfish"
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLOWFISH_COMMON
help
- AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm.
+ Blowfish cipher algorithm, by Bruce Schneier
-config CRYPTO_SEQIV
- tristate "Sequence Number IV Generator"
- select CRYPTO_AEAD
+ This is a variable key length cipher which can use keys from 32
+ bits to 448 bits in length. It's fast, simple and specifically
+ designed for use on "large microprocessors".
+
+ See https://www.schneier.com/blowfish.html for further information.
+
+config CRYPTO_BLOWFISH_COMMON
+ tristate
+ help
+ Common parts of the Blowfish cipher algorithm shared by the
+ generic c and the assembler implementations.
+
+config CRYPTO_CAMELLIA
+ tristate "Camellia"
+ select CRYPTO_ALGAPI
+ help
+ Camellia cipher algorithms (ISO/IEC 18033-3)
+
+ Camellia is a symmetric key block cipher developed jointly
+ at NTT and Mitsubishi Electric Corporation.
+
+ The Camellia specifies three key sizes: 128, 192 and 256 bits.
+
+ See https://info.isl.ntt.co.jp/crypt/eng/camellia/ for further information.
+
+config CRYPTO_CAST_COMMON
+ tristate
+ help
+ Common parts of the CAST cipher algorithms shared by the
+ generic c and the assembler implementations.
+
+config CRYPTO_CAST5
+ tristate "CAST5 (CAST-128)"
+ select CRYPTO_ALGAPI
+ select CRYPTO_CAST_COMMON
+ help
+ CAST5 (CAST-128) cipher algorithm (RFC2144, ISO/IEC 18033-3)
+
+config CRYPTO_CAST6
+ tristate "CAST6 (CAST-256)"
+ select CRYPTO_ALGAPI
+ select CRYPTO_CAST_COMMON
+ help
+ CAST6 (CAST-256) encryption algorithm (RFC2612)
+
+config CRYPTO_DES
+ tristate "DES and Triple DES EDE"
+ select CRYPTO_ALGAPI
+ select CRYPTO_LIB_DES
+ help
+ DES (Data Encryption Standard)(FIPS 46-2, ISO/IEC 18033-3) and
+ Triple DES EDE (Encrypt/Decrypt/Encrypt) (FIPS 46-3, ISO/IEC 18033-3)
+ cipher algorithms
+
+config CRYPTO_FCRYPT
+ tristate "FCrypt"
+ select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
- select CRYPTO_NULL
- select CRYPTO_RNG_DEFAULT
- select CRYPTO_MANAGER
help
- This IV generator generates an IV based on a sequence number by
- xoring it with a salt. This algorithm is mainly useful for CTR
+ FCrypt algorithm used by RxRPC
-config CRYPTO_ECHAINIV
- tristate "Encrypted Chain IV Generator"
- select CRYPTO_AEAD
- select CRYPTO_NULL
- select CRYPTO_RNG_DEFAULT
+ See https://ota.polyonymo.us/fcrypt-paper.txt
+
+config CRYPTO_KHAZAD
+ tristate "Khazad"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
+ select CRYPTO_ALGAPI
+ help
+ Khazad cipher algorithm
+
+ Khazad was a finalist in the initial NESSIE competition. It is
+ an algorithm optimized for 64-bit processors with good performance
+ on 32-bit processors. Khazad uses an 128 bit key size.
+
+ See https://web.archive.org/web/20171011071731/http://www.larc.usp.br/~pbarreto/KhazadPage.html
+ for further information.
+
+config CRYPTO_SEED
+ tristate "SEED"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
+ select CRYPTO_ALGAPI
+ help
+ SEED cipher algorithm (RFC4269, ISO/IEC 18033-3)
+
+ SEED is a 128-bit symmetric key block cipher that has been
+ developed by KISA (Korea Information Security Agency) as a
+ national standard encryption algorithm of the Republic of Korea.
+ It is a 16 round block cipher with the key size of 128 bit.
+
+ See https://seed.kisa.or.kr/kisa/algorithm/EgovSeedInfo.do
+ for further information.
+
+config CRYPTO_SERPENT
+ tristate "Serpent"
+ select CRYPTO_ALGAPI
+ help
+ Serpent cipher algorithm, by Anderson, Biham & Knudsen
+
+ Keys are allowed to be from 0 to 256 bits in length, in steps
+ of 8 bits.
+
+ See https://www.cl.cam.ac.uk/~rja14/serpent.html for further information.
+
+config CRYPTO_SM4
+ tristate
+
+config CRYPTO_SM4_GENERIC
+ tristate "SM4 (ShangMi 4)"
+ select CRYPTO_ALGAPI
+ select CRYPTO_SM4
+ help
+ SM4 cipher algorithms (OSCCA GB/T 32907-2016,
+ ISO/IEC 18033-3:2010/Amd 1:2021)
+
+ SM4 (GBT.32907-2016) is a cryptographic standard issued by the
+ Organization of State Commercial Administration of China (OSCCA)
+ as an authorized cryptographic algorithms for the use within China.
+
+ SMS4 was originally created for use in protecting wireless
+ networks, and is mandated in the Chinese National Standard for
+ Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure)
+ (GB.15629.11-2003).
+
+ The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and
+ standardized through TC 260 of the Standardization Administration
+ of the People's Republic of China (SAC).
+
+ The input, output, and key of SMS4 are each 128 bits.
+
+ See https://eprint.iacr.org/2008/329.pdf for further information.
+
+ If unsure, say N.
+
+config CRYPTO_TEA
+ tristate "TEA, XTEA and XETA"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
+ select CRYPTO_ALGAPI
+ help
+ TEA (Tiny Encryption Algorithm) cipher algorithms
+
+ Tiny Encryption Algorithm is a simple cipher that uses
+ many rounds for security. It is very fast and uses
+ little memory.
+
+ Xtendend Tiny Encryption Algorithm is a modification to
+ the TEA algorithm to address a potential key weakness
+ in the TEA algorithm.
+
+ Xtendend Encryption Tiny Algorithm is a mis-implementation
+ of the XTEA algorithm for compatibility purposes.
+
+config CRYPTO_TWOFISH
+ tristate "Twofish"
+ select CRYPTO_ALGAPI
+ select CRYPTO_TWOFISH_COMMON
+ help
+ Twofish cipher algorithm
+
+ Twofish was submitted as an AES (Advanced Encryption Standard)
+ candidate cipher by researchers at CounterPane Systems. It is a
+ 16 round block cipher supporting key sizes of 128, 192, and 256
+ bits.
+
+ See https://www.schneier.com/twofish.html for further information.
+
+config CRYPTO_TWOFISH_COMMON
+ tristate
+ help
+ Common parts of the Twofish cipher algorithm shared by the
+ generic c and the assembler implementations.
+
+endmenu
+
+menu "Length-preserving ciphers and modes"
+
+config CRYPTO_ADIANTUM
+ tristate "Adiantum"
+ select CRYPTO_CHACHA20
+ select CRYPTO_LIB_POLY1305_GENERIC
+ select CRYPTO_NHPOLY1305
select CRYPTO_MANAGER
help
- This IV generator generates an IV based on the encryption of
- a sequence number xored with a salt. This is the default
- algorithm for CBC.
+ Adiantum tweakable, length-preserving encryption mode
+
+ Designed for fast and secure disk encryption, especially on
+ CPUs without dedicated crypto instructions. It encrypts
+ each sector using the XChaCha12 stream cipher, two passes of
+ an ε-almost-∆-universal hash function, and an invocation of
+ the AES-256 block cipher on a single 16-byte block. On CPUs
+ without AES instructions, Adiantum is much faster than
+ AES-XTS.
+
+ Adiantum's security is provably reducible to that of its
+ underlying stream and block ciphers, subject to a security
+ bound. Unlike XTS, Adiantum is a true wide-block encryption
+ mode, so it actually provides an even stronger notion of
+ security than XTS, subject to the security bound.
+
+ If unsure, say N.
+
+config CRYPTO_ARC4
+ tristate "ARC4 (Alleged Rivest Cipher 4)"
+ depends on CRYPTO_USER_API_ENABLE_OBSOLETE
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_ARC4
+ help
+ ARC4 cipher algorithm
+
+ ARC4 is a stream cipher using keys ranging from 8 bits to 2048
+ bits in length. This algorithm is required for driver-based
+ WEP, but it should not be for other purposes because of the
+ weakness of the algorithm.
-comment "Block modes"
+config CRYPTO_CHACHA20
+ tristate "ChaCha"
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_SKCIPHER
+ help
+ The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
+
+ ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
+ Bernstein and further specified in RFC7539 for use in IETF protocols.
+ This is the portable C implementation of ChaCha20. See
+ https://cr.yp.to/chacha/chacha-20080128.pdf for further information.
+
+ XChaCha20 is the application of the XSalsa20 construction to ChaCha20
+ rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length
+ from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits,
+ while provably retaining ChaCha20's security. See
+ https://cr.yp.to/snuffle/xsalsa-20081128.pdf for further information.
+
+ XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly
+ reduced security margin but increased performance. It can be needed
+ in some performance-sensitive scenarios.
config CRYPTO_CBC
- tristate "CBC support"
+ tristate "CBC (Cipher Block Chaining)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- CBC: Cipher Block Chaining mode
- This block cipher algorithm is required for IPSec.
+ CBC (Cipher Block Chaining) mode (NIST SP800-38A)
+
+ This block cipher mode is required for IPSec ESP (XFRM_ESP).
config CRYPTO_CFB
- tristate "CFB support"
+ tristate "CFB (Cipher Feedback)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- CFB: Cipher FeedBack mode
- This block cipher algorithm is required for TPM2 Cryptography.
+ CFB (Cipher Feedback) mode (NIST SP800-38A)
+
+ This block cipher mode is required for TPM2 Cryptography.
config CRYPTO_CTR
- tristate "CTR support"
+ tristate "CTR (Counter)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- CTR: Counter mode
- This block cipher algorithm is required for IPSec.
+ CTR (Counter) mode (NIST SP800-38A)
config CRYPTO_CTS
- tristate "CTS support"
+ tristate "CTS (Cipher Text Stealing)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- CTS: Cipher Text Stealing
- This is the Cipher Text Stealing mode as described by
- Section 8 of rfc2040 and referenced by rfc3962
- (rfc3962 includes errata information in its Appendix A) or
- CBC-CS3 as defined by NIST in Sp800-38A addendum from Oct 2010.
+ CBC-CS3 variant of CTS (Cipher Text Stealing) (NIST
+ Addendum to SP800-38A (October 2010))
+
This mode is required for Kerberos gss mechanism support
for AES encryption.
- See: https://csrc.nist.gov/publications/detail/sp/800-38a/addendum/final
-
config CRYPTO_ECB
- tristate "ECB support"
+ tristate "ECB (Electronic Codebook)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- ECB: Electronic CodeBook mode
- This is the simplest block cipher algorithm. It simply encrypts
- the input block by block.
+ ECB (Electronic Codebook) mode (NIST SP800-38A)
+
+config CRYPTO_HCTR2
+ tristate "HCTR2"
+ select CRYPTO_XCTR
+ select CRYPTO_POLYVAL
+ select CRYPTO_MANAGER
+ help
+ HCTR2 length-preserving encryption mode
+
+ A mode for storage encryption that is efficient on processors with
+ instructions to accelerate AES and carryless multiplication, e.g.
+ x86 processors with AES-NI and CLMUL, and ARM processors with the
+ ARMv8 crypto extensions.
+
+ See https://eprint.iacr.org/2021/1441
+
+config CRYPTO_KEYWRAP
+ tristate "KW (AES Key Wrap)"
+ select CRYPTO_SKCIPHER
+ select CRYPTO_MANAGER
+ help
+ KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F
+ and RFC3394) without padding.
config CRYPTO_LRW
- tristate "LRW support"
+ tristate "LRW (Liskov Rivest Wagner)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_GF128MUL
select CRYPTO_ECB
help
- LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable
+ LRW (Liskov Rivest Wagner) mode
+
+ A tweakable, non malleable, non movable
narrow block cipher mode for dm-crypt. Use it with cipher
specification string aes-lrw-benbi, the key must be 256, 320 or 384.
The first 128, 192 or 256 bits in the key are used for AES and the
rest is used to tie each cipher block to its logical position.
+ See https://people.csail.mit.edu/rivest/pubs/LRW02.pdf
+
config CRYPTO_OFB
- tristate "OFB support"
+ tristate "OFB (Output Feedback)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- OFB: the Output Feedback mode makes a block cipher into a synchronous
+ OFB (Output Feedback) mode (NIST SP800-38A)
+
+ This mode makes a block cipher into a synchronous
stream cipher. It generates keystream blocks, which are then XORed
with the plaintext blocks to get the ciphertext. Flipping a bit in the
ciphertext produces a flipped bit in the plaintext at the same
@@ -475,102 +744,133 @@ config CRYPTO_OFB
normally even when applied before encryption.
config CRYPTO_PCBC
- tristate "PCBC support"
+ tristate "PCBC (Propagating Cipher Block Chaining)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- PCBC: Propagating Cipher Block Chaining mode
- This block cipher algorithm is required for RxRPC.
+ PCBC (Propagating Cipher Block Chaining) mode
+
+ This block cipher mode is required for RxRPC.
config CRYPTO_XCTR
tristate
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
help
- XCTR: XOR Counter mode. This blockcipher mode is a variant of CTR mode
- using XORs and little-endian addition rather than big-endian arithmetic.
+ XCTR (XOR Counter) mode for HCTR2
+
+ This blockcipher mode is a variant of CTR mode using XORs and little-endian
+ addition rather than big-endian arithmetic.
+
XCTR mode is used to implement HCTR2.
config CRYPTO_XTS
- tristate "XTS support"
+ tristate "XTS (XOR Encrypt XOR with ciphertext stealing)"
select CRYPTO_SKCIPHER
select CRYPTO_MANAGER
select CRYPTO_ECB
help
- XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
- key size 256, 384 or 512 bits. This implementation currently
- can't handle a sectorsize which is not a multiple of 16 bytes.
+ XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
+ and IEEE 1619)
-config CRYPTO_KEYWRAP
- tristate "Key wrapping support"
- select CRYPTO_SKCIPHER
- select CRYPTO_MANAGER
- help
- Support for key wrapping (NIST SP800-38F / RFC3394) without
- padding.
+ Use with aes-xts-plain, key size 256, 384 or 512 bits. This
+ implementation currently can't handle a sectorsize which is not a
+ multiple of 16 bytes.
config CRYPTO_NHPOLY1305
tristate
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305_GENERIC
-config CRYPTO_NHPOLY1305_SSE2
- tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)"
- depends on X86 && 64BIT
- select CRYPTO_NHPOLY1305
+endmenu
+
+menu "AEAD (authenticated encryption with associated data) ciphers"
+
+config CRYPTO_AEGIS128
+ tristate "AEGIS-128"
+ select CRYPTO_AEAD
+ select CRYPTO_AES # for AES S-box tables
help
- SSE2 optimized implementation of the hash function used by the
- Adiantum encryption mode.
+ AEGIS-128 AEAD algorithm
-config CRYPTO_NHPOLY1305_AVX2
- tristate "NHPoly1305 hash function (x86_64 AVX2 implementation)"
- depends on X86 && 64BIT
- select CRYPTO_NHPOLY1305
+config CRYPTO_AEGIS128_SIMD
+ bool "AEGIS-128 (arm NEON, arm64 NEON)"
+ depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
+ default y
help
- AVX2 optimized implementation of the hash function used by the
- Adiantum encryption mode.
+ AEGIS-128 AEAD algorithm
-config CRYPTO_ADIANTUM
- tristate "Adiantum support"
+ Architecture: arm or arm64 using:
+ - NEON (Advanced SIMD) extension
+
+config CRYPTO_CHACHA20POLY1305
+ tristate "ChaCha20-Poly1305"
select CRYPTO_CHACHA20
- select CRYPTO_LIB_POLY1305_GENERIC
- select CRYPTO_NHPOLY1305
+ select CRYPTO_POLY1305
+ select CRYPTO_AEAD
select CRYPTO_MANAGER
help
- Adiantum is a tweakable, length-preserving encryption mode
- designed for fast and secure disk encryption, especially on
- CPUs without dedicated crypto instructions. It encrypts
- each sector using the XChaCha12 stream cipher, two passes of
- an ε-almost-∆-universal hash function, and an invocation of
- the AES-256 block cipher on a single 16-byte block. On CPUs
- without AES instructions, Adiantum is much faster than
- AES-XTS.
+ ChaCha20 stream cipher and Poly1305 authenticator combined
+ mode (RFC8439)
- Adiantum's security is provably reducible to that of its
- underlying stream and block ciphers, subject to a security
- bound. Unlike XTS, Adiantum is a true wide-block encryption
- mode, so it actually provides an even stronger notion of
- security than XTS, subject to the security bound.
+config CRYPTO_CCM
+ tristate "CCM (Counter with Cipher Block Chaining-MAC)"
+ select CRYPTO_CTR
+ select CRYPTO_HASH
+ select CRYPTO_AEAD
+ select CRYPTO_MANAGER
+ help
+ CCM (Counter with Cipher Block Chaining-Message Authentication Code)
+ authenticated encryption mode (NIST SP800-38C)
- If unsure, say N.
+config CRYPTO_GCM
+ tristate "GCM (Galois/Counter Mode) and GMAC (GCM MAC)"
+ select CRYPTO_CTR
+ select CRYPTO_AEAD
+ select CRYPTO_GHASH
+ select CRYPTO_NULL
+ select CRYPTO_MANAGER
+ help
+ GCM (Galois/Counter Mode) authenticated encryption mode and GMAC
+ (GCM Message Authentication Code) (NIST SP800-38D)
-config CRYPTO_HCTR2
- tristate "HCTR2 support"
- select CRYPTO_XCTR
- select CRYPTO_POLYVAL
+ This is required for IPSec ESP (XFRM_ESP).
+
+config CRYPTO_SEQIV
+ tristate "Sequence Number IV Generator"
+ select CRYPTO_AEAD
+ select CRYPTO_SKCIPHER
+ select CRYPTO_NULL
+ select CRYPTO_RNG_DEFAULT
select CRYPTO_MANAGER
help
- HCTR2 is a length-preserving encryption mode for storage encryption that
- is efficient on processors with instructions to accelerate AES and
- carryless multiplication, e.g. x86 processors with AES-NI and CLMUL, and
- ARM processors with the ARMv8 crypto extensions.
+ Sequence Number IV generator
+
+ This IV generator generates an IV based on a sequence number by
+ xoring it with a salt. This algorithm is mainly useful for CTR.
+
+ This is required for IPsec ESP (XFRM_ESP).
+
+config CRYPTO_ECHAINIV
+ tristate "Encrypted Chain IV Generator"
+ select CRYPTO_AEAD
+ select CRYPTO_NULL
+ select CRYPTO_RNG_DEFAULT
+ select CRYPTO_MANAGER
+ help
+ Encrypted Chain IV generator
+
+ This IV generator generates an IV based on the encryption of
+ a sequence number xored with a salt. This is the default
+ algorithm for CBC.
config CRYPTO_ESSIV
- tristate "ESSIV support for block encryption"
+ tristate "Encrypted Salt-Sector IV Generator"
select CRYPTO_AUTHENC
help
- Encrypted salt-sector initialization vector (ESSIV) is an IV
- generation method that is used in some cases by fscrypt and/or
+ Encrypted Salt-Sector IV generator
+
+ This IV generator is used in some cases by fscrypt and/or
dm-crypt. It uses the hash of the block encryption key as the
symmetric key for a block encryption pass applied to the input
IV, making low entropy IV sources more suitable for block
@@ -593,1422 +893,356 @@ config CRYPTO_ESSIV
combined with ESSIV the only feasible mode for h/w accelerated
block encryption)
-comment "Hash modes"
-
-config CRYPTO_CMAC
- tristate "CMAC support"
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- help
- Cipher-based Message Authentication Code (CMAC) specified by
- The National Institute of Standards and Technology (NIST).
-
- https://tools.ietf.org/html/rfc4493
- http://csrc.nist.gov/publications/nistpubs/800-38B/SP_800-38B.pdf
-
-config CRYPTO_HMAC
- tristate "HMAC support"
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- help
- HMAC: Keyed-Hashing for Message Authentication (RFC2104).
- This is required for IPSec.
-
-config CRYPTO_XCBC
- tristate "XCBC support"
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- help
- XCBC: Keyed-Hashing with encryption algorithm
- https://www.ietf.org/rfc/rfc3566.txt
- http://csrc.nist.gov/encryption/modes/proposedmodes/
- xcbc-mac/xcbc-mac-spec.pdf
-
-config CRYPTO_VMAC
- tristate "VMAC support"
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- help
- VMAC is a message authentication algorithm designed for
- very high speed on 64-bit architectures.
-
- See also:
- <https://fastcrypto.org/vmac>
+endmenu
-comment "Digest"
-
-config CRYPTO_CRC32C
- tristate "CRC32c CRC algorithm"
- select CRYPTO_HASH
- select CRC32
- help
- Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
- by iSCSI for header and data digests and by others.
- See Castagnoli93. Module will be crc32c.
-
-config CRYPTO_CRC32C_INTEL
- tristate "CRC32c INTEL hardware acceleration"
- depends on X86
- select CRYPTO_HASH
- help
- In Intel processor with SSE4.2 supported, the processor will
- support CRC32C implementation using hardware accelerated CRC32
- instruction. This option will create 'crc32c-intel' module,
- which will enable any routine to use the CRC32 instruction to
- gain performance compared with software implementation.
- Module will be crc32c-intel.
-
-config CRYPTO_CRC32C_VPMSUM
- tristate "CRC32c CRC algorithm (powerpc64)"
- depends on PPC64 && ALTIVEC
- select CRYPTO_HASH
- select CRC32
- help
- CRC32c algorithm implemented using vector polynomial multiply-sum
- (vpmsum) instructions, introduced in POWER8. Enable on POWER8
- and newer processors for improved performance.
-
-
-config CRYPTO_CRC32C_SPARC64
- tristate "CRC32c CRC algorithm (SPARC64)"
- depends on SPARC64
- select CRYPTO_HASH
- select CRC32
- help
- CRC32c CRC algorithm implemented using sparc64 crypto instructions,
- when available.
-
-config CRYPTO_CRC32
- tristate "CRC32 CRC algorithm"
- select CRYPTO_HASH
- select CRC32
- help
- CRC-32-IEEE 802.3 cyclic redundancy-check algorithm.
- Shash crypto api wrappers to crc32_le function.
-
-config CRYPTO_CRC32_PCLMUL
- tristate "CRC32 PCLMULQDQ hardware acceleration"
- depends on X86
- select CRYPTO_HASH
- select CRC32
- help
- From Intel Westmere and AMD Bulldozer processor with SSE4.2
- and PCLMULQDQ supported, the processor will support
- CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ
- instruction. This option will create 'crc32-pclmul' module,
- which will enable any routine to use the CRC-32-IEEE 802.3 checksum
- and gain better performance as compared with the table implementation.
-
-config CRYPTO_CRC32_MIPS
- tristate "CRC32c and CRC32 CRC algorithm (MIPS)"
- depends on MIPS_CRC_SUPPORT
- select CRYPTO_HASH
- help
- CRC32c and CRC32 CRC algorithms implemented using mips crypto
- instructions, when available.
-
-config CRYPTO_CRC32_S390
- tristate "CRC-32 algorithms"
- depends on S390
- select CRYPTO_HASH
- select CRC32
- help
- Select this option if you want to use hardware accelerated
- implementations of CRC algorithms. With this option, you
- can optimize the computation of CRC-32 (IEEE 802.3 Ethernet)
- and CRC-32C (Castagnoli).
-
- It is available with IBM z13 or later.
-
-config CRYPTO_XXHASH
- tristate "xxHash hash algorithm"
- select CRYPTO_HASH
- select XXHASH
- help
- xxHash non-cryptographic hash algorithm. Extremely fast, working at
- speeds close to RAM limits.
+menu "Hashes, digests, and MACs"
config CRYPTO_BLAKE2B
- tristate "BLAKE2b digest algorithm"
+ tristate "BLAKE2b"
select CRYPTO_HASH
help
- Implementation of cryptographic hash function BLAKE2b (or just BLAKE2),
- optimized for 64bit platforms and can produce digests of any size
- between 1 to 64. The keyed hash is also implemented.
+ BLAKE2b cryptographic hash function (RFC 7693)
- This module provides the following algorithms:
+ BLAKE2b is optimized for 64-bit platforms and can produce digests
+ of any size between 1 and 64 bytes. The keyed hash is also implemented.
+ This module provides the following algorithms:
- blake2b-160
- blake2b-256
- blake2b-384
- blake2b-512
- See https://blake2.net for further information.
+ Used by the btrfs filesystem.
-config CRYPTO_BLAKE2S_X86
- bool "BLAKE2s digest algorithm (x86 accelerated version)"
- depends on X86 && 64BIT
- select CRYPTO_LIB_BLAKE2S_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ See https://blake2.net for further information.
-config CRYPTO_CRCT10DIF
- tristate "CRCT10DIF algorithm"
+config CRYPTO_CMAC
+ tristate "CMAC (Cipher-based MAC)"
select CRYPTO_HASH
+ select CRYPTO_MANAGER
help
- CRC T10 Data Integrity Field computation is being cast as
- a crypto transform. This allows for faster crc t10 diff
- transforms to be used if they are available.
+ CMAC (Cipher-based Message Authentication Code) authentication
+ mode (NIST SP800-38B and IETF RFC4493)
-config CRYPTO_CRCT10DIF_PCLMUL
- tristate "CRCT10DIF PCLMULQDQ hardware acceleration"
- depends on X86 && 64BIT && CRC_T10DIF
+config CRYPTO_GHASH
+ tristate "GHASH"
+ select CRYPTO_GF128MUL
select CRYPTO_HASH
help
- For x86_64 processors with SSE4.2 and PCLMULQDQ supported,
- CRC T10 DIF PCLMULQDQ computation can be hardware
- accelerated PCLMULQDQ instruction. This option will create
- 'crct10dif-pclmul' module, which is faster when computing the
- crct10dif checksum as compared with the generic table implementation.
+ GCM GHASH function (NIST SP800-38D)
-config CRYPTO_CRCT10DIF_VPMSUM
- tristate "CRC32T10DIF powerpc64 hardware acceleration"
- depends on PPC64 && ALTIVEC && CRC_T10DIF
+config CRYPTO_HMAC
+ tristate "HMAC (Keyed-Hash MAC)"
select CRYPTO_HASH
+ select CRYPTO_MANAGER
help
- CRC10T10DIF algorithm implemented using vector polynomial
- multiply-sum (vpmsum) instructions, introduced in POWER8. Enable on
- POWER8 and newer processors for improved performance.
+ HMAC (Keyed-Hash Message Authentication Code) (FIPS 198 and
+ RFC2104)
-config CRYPTO_CRC64_ROCKSOFT
- tristate "Rocksoft Model CRC64 algorithm"
- depends on CRC64
+ This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP).
+
+config CRYPTO_MD4
+ tristate "MD4"
select CRYPTO_HASH
+ help
+ MD4 message digest algorithm (RFC1320)
-config CRYPTO_VPMSUM_TESTER
- tristate "Powerpc64 vpmsum hardware acceleration tester"
- depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM
+config CRYPTO_MD5
+ tristate "MD5"
+ select CRYPTO_HASH
help
- Stress test for CRC32c and CRC-T10DIF algorithms implemented with
- POWER8 vpmsum instructions.
- Unless you are testing these algorithms, you don't need this.
+ MD5 message digest algorithm (RFC1321)
-config CRYPTO_GHASH
- tristate "GHASH hash function"
- select CRYPTO_GF128MUL
+config CRYPTO_MICHAEL_MIC
+ tristate "Michael MIC"
select CRYPTO_HASH
help
- GHASH is the hash function used in GCM (Galois/Counter Mode).
- It is not a general-purpose cryptographic hash function.
+ Michael MIC (Message Integrity Code) (IEEE 802.11i)
+
+ Defined by the IEEE 802.11i TKIP (Temporal Key Integrity Protocol),
+ known as WPA (Wif-Fi Protected Access).
+
+ This algorithm is required for TKIP, but it should not be used for
+ other purposes because of the weakness of the algorithm.
config CRYPTO_POLYVAL
tristate
select CRYPTO_GF128MUL
select CRYPTO_HASH
help
- POLYVAL is the hash function used in HCTR2. It is not a general-purpose
- cryptographic hash function.
+ POLYVAL hash function for HCTR2
-config CRYPTO_POLYVAL_CLMUL_NI
- tristate "POLYVAL hash function (CLMUL-NI accelerated)"
- depends on X86 && 64BIT
- select CRYPTO_POLYVAL
- help
- This is the x86_64 CLMUL-NI accelerated implementation of POLYVAL. It is
- used to efficiently implement HCTR2 on x86-64 processors that support
- carry-less multiplication instructions.
+ This is used in HCTR2. It is not a general-purpose
+ cryptographic hash function.
config CRYPTO_POLY1305
- tristate "Poly1305 authenticator algorithm"
+ tristate "Poly1305"
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305_GENERIC
help
- Poly1305 authenticator algorithm, RFC7539.
+ Poly1305 authenticator algorithm (RFC7539)
Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
in IETF protocols. This is the portable C implementation of Poly1305.
-config CRYPTO_POLY1305_X86_64
- tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)"
- depends on X86 && 64BIT
- select CRYPTO_LIB_POLY1305_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
- help
- Poly1305 authenticator algorithm, RFC7539.
-
- Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
- It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
- in IETF protocols. This is the x86_64 assembler implementation using SIMD
- instructions.
-
-config CRYPTO_POLY1305_MIPS
- tristate "Poly1305 authenticator algorithm (MIPS optimized)"
- depends on MIPS
- select CRYPTO_ARCH_HAVE_LIB_POLY1305
-
-config CRYPTO_MD4
- tristate "MD4 digest algorithm"
- select CRYPTO_HASH
- help
- MD4 message digest algorithm (RFC1320).
-
-config CRYPTO_MD5
- tristate "MD5 digest algorithm"
- select CRYPTO_HASH
- help
- MD5 message digest algorithm (RFC1321).
-
-config CRYPTO_MD5_OCTEON
- tristate "MD5 digest algorithm (OCTEON)"
- depends on CPU_CAVIUM_OCTEON
- select CRYPTO_MD5
- select CRYPTO_HASH
- help
- MD5 message digest algorithm (RFC1321) implemented
- using OCTEON crypto instructions, when available.
-
-config CRYPTO_MD5_PPC
- tristate "MD5 digest algorithm (PPC)"
- depends on PPC
- select CRYPTO_HASH
- help
- MD5 message digest algorithm (RFC1321) implemented
- in PPC assembler.
-
-config CRYPTO_MD5_SPARC64
- tristate "MD5 digest algorithm (SPARC64)"
- depends on SPARC64
- select CRYPTO_MD5
- select CRYPTO_HASH
- help
- MD5 message digest algorithm (RFC1321) implemented
- using sparc64 crypto instructions, when available.
-
-config CRYPTO_MICHAEL_MIC
- tristate "Michael MIC keyed digest algorithm"
- select CRYPTO_HASH
- help
- Michael MIC is used for message integrity protection in TKIP
- (IEEE 802.11i). This algorithm is required for TKIP, but it
- should not be used for other purposes because of the weakness
- of the algorithm.
-
config CRYPTO_RMD160
- tristate "RIPEMD-160 digest algorithm"
+ tristate "RIPEMD-160"
select CRYPTO_HASH
help
- RIPEMD-160 (ISO/IEC 10118-3:2004).
+ RIPEMD-160 hash function (ISO/IEC 10118-3)
RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
to be used as a secure replacement for the 128-bit hash functions
MD4, MD5 and its predecessor RIPEMD
(not to be confused with RIPEMD-128).
- It's speed is comparable to SHA1 and there are no known attacks
+ Its speed is comparable to SHA-1 and there are no known attacks
against RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See https://homes.esat.kuleuven.be/~bosselae/ripemd160.html
+ for further information.
config CRYPTO_SHA1
- tristate "SHA1 digest algorithm"
+ tristate "SHA-1"
select CRYPTO_HASH
select CRYPTO_LIB_SHA1
help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
-
-config CRYPTO_SHA1_SSSE3
- tristate "SHA1 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)"
- depends on X86 && 64BIT
- select CRYPTO_SHA1
- select CRYPTO_HASH
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using Supplemental SSE3 (SSSE3) instructions or Advanced Vector
- Extensions (AVX/AVX2) or SHA-NI(SHA Extensions New Instructions),
- when available.
-
-config CRYPTO_SHA256_SSSE3
- tristate "SHA256 digest algorithm (SSSE3/AVX/AVX2/SHA-NI)"
- depends on X86 && 64BIT
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-256 secure hash standard (DFIPS 180-2) implemented
- using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector
- Extensions version 1 (AVX1), or Advanced Vector Extensions
- version 2 (AVX2) instructions, or SHA-NI (SHA Extensions New
- Instructions) when available.
-
-config CRYPTO_SHA512_SSSE3
- tristate "SHA512 digest algorithm (SSSE3/AVX/AVX2)"
- depends on X86 && 64BIT
- select CRYPTO_SHA512
- select CRYPTO_HASH
- help
- SHA-512 secure hash standard (DFIPS 180-2) implemented
- using Supplemental SSE3 (SSSE3) instructions, or Advanced Vector
- Extensions version 1 (AVX1), or Advanced Vector Extensions
- version 2 (AVX2) instructions, when available.
-
-config CRYPTO_SHA512_S390
- tristate "SHA384 and SHA512 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA512 secure hash standard.
-
- It is available as of z10.
-
-config CRYPTO_SHA1_OCTEON
- tristate "SHA1 digest algorithm (OCTEON)"
- depends on CPU_CAVIUM_OCTEON
- select CRYPTO_SHA1
- select CRYPTO_HASH
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using OCTEON crypto instructions, when available.
-
-config CRYPTO_SHA1_SPARC64
- tristate "SHA1 digest algorithm (SPARC64)"
- depends on SPARC64
- select CRYPTO_SHA1
- select CRYPTO_HASH
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using sparc64 crypto instructions, when available.
-
-config CRYPTO_SHA1_PPC
- tristate "SHA1 digest algorithm (powerpc)"
- depends on PPC
- help
- This is the powerpc hardware accelerated implementation of the
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
-
-config CRYPTO_SHA1_PPC_SPE
- tristate "SHA1 digest algorithm (PPC SPE)"
- depends on PPC && SPE
- help
- SHA-1 secure hash standard (DFIPS 180-4) implemented
- using powerpc SPE SIMD instruction set.
-
-config CRYPTO_SHA1_S390
- tristate "SHA1 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
-
- It is available as of z990.
+ SHA-1 secure hash algorithm (FIPS 180, ISO/IEC 10118-3)
config CRYPTO_SHA256
- tristate "SHA224 and SHA256 digest algorithm"
+ tristate "SHA-224 and SHA-256"
select CRYPTO_HASH
select CRYPTO_LIB_SHA256
help
- SHA256 secure hash standard (DFIPS 180-2).
-
- This version of SHA implements a 256 bit hash with 128 bits of
- security against collision attacks.
-
- This code also includes SHA-224, a 224 bit hash with 112 bits
- of security against collision attacks.
-
-config CRYPTO_SHA256_PPC_SPE
- tristate "SHA224 and SHA256 digest algorithm (PPC SPE)"
- depends on PPC && SPE
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA224 and SHA256 secure hash standard (DFIPS 180-2)
- implemented using powerpc SPE SIMD instruction set.
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180, ISO/IEC 10118-3)
-config CRYPTO_SHA256_OCTEON
- tristate "SHA224 and SHA256 digest algorithm (OCTEON)"
- depends on CPU_CAVIUM_OCTEON
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-256 secure hash standard (DFIPS 180-2) implemented
- using OCTEON crypto instructions, when available.
-
-config CRYPTO_SHA256_SPARC64
- tristate "SHA224 and SHA256 digest algorithm (SPARC64)"
- depends on SPARC64
- select CRYPTO_SHA256
- select CRYPTO_HASH
- help
- SHA-256 secure hash standard (DFIPS 180-2) implemented
- using sparc64 crypto instructions, when available.
-
-config CRYPTO_SHA256_S390
- tristate "SHA256 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA256 secure hash standard (DFIPS 180-2).
-
- It is available as of z9.
+ This is required for IPsec AH (XFRM_AH) and IPsec ESP (XFRM_ESP).
+ Used by the btrfs filesystem, Ceph, NFS, and SMB.
config CRYPTO_SHA512
- tristate "SHA384 and SHA512 digest algorithms"
+ tristate "SHA-384 and SHA-512"
select CRYPTO_HASH
help
- SHA512 secure hash standard (DFIPS 180-2).
-
- This version of SHA implements a 512 bit hash with 256 bits of
- security against collision attacks.
-
- This code also includes SHA-384, a 384 bit hash with 192 bits
- of security against collision attacks.
-
-config CRYPTO_SHA512_OCTEON
- tristate "SHA384 and SHA512 digest algorithms (OCTEON)"
- depends on CPU_CAVIUM_OCTEON
- select CRYPTO_SHA512
- select CRYPTO_HASH
- help
- SHA-512 secure hash standard (DFIPS 180-2) implemented
- using OCTEON crypto instructions, when available.
-
-config CRYPTO_SHA512_SPARC64
- tristate "SHA384 and SHA512 digest algorithm (SPARC64)"
- depends on SPARC64
- select CRYPTO_SHA512
- select CRYPTO_HASH
- help
- SHA-512 secure hash standard (DFIPS 180-2) implemented
- using sparc64 crypto instructions, when available.
+ SHA-384 and SHA-512 secure hash algorithms (FIPS 180, ISO/IEC 10118-3)
config CRYPTO_SHA3
- tristate "SHA3 digest algorithm"
- select CRYPTO_HASH
- help
- SHA-3 secure hash standard (DFIPS 202). It's based on
- cryptographic sponge function family called Keccak.
-
- References:
- http://keccak.noekeon.org/
-
-config CRYPTO_SHA3_256_S390
- tristate "SHA3_224 and SHA3_256 digest algorithm"
- depends on S390
+ tristate "SHA-3"
select CRYPTO_HASH
help
- This is the s390 hardware accelerated implementation of the
- SHA3_256 secure hash standard.
-
- It is available as of z14.
-
-config CRYPTO_SHA3_512_S390
- tristate "SHA3_384 and SHA3_512 digest algorithm"
- depends on S390
- select CRYPTO_HASH
- help
- This is the s390 hardware accelerated implementation of the
- SHA3_512 secure hash standard.
-
- It is available as of z14.
+ SHA-3 secure hash algorithms (FIPS 202, ISO/IEC 10118-3)
config CRYPTO_SM3
tristate
config CRYPTO_SM3_GENERIC
- tristate "SM3 digest algorithm"
+ tristate "SM3 (ShangMi 3)"
select CRYPTO_HASH
select CRYPTO_SM3
help
- SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
- It is part of the Chinese Commercial Cryptography suite.
+ SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012, ISO/IEC 10118-3)
+
+ This is part of the Chinese Commercial Cryptography suite.
References:
http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
-config CRYPTO_SM3_AVX_X86_64
- tristate "SM3 digest algorithm (x86_64/AVX)"
- depends on X86 && 64BIT
- select CRYPTO_HASH
- select CRYPTO_SM3
- help
- SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
- It is part of the Chinese Commercial Cryptography suite. This is
- SM3 optimized implementation using Advanced Vector Extensions (AVX)
- when available.
-
- If unsure, say N.
-
config CRYPTO_STREEBOG
- tristate "Streebog Hash Function"
+ tristate "Streebog"
select CRYPTO_HASH
help
- Streebog Hash Function (GOST R 34.11-2012, RFC 6986) is one of the Russian
- cryptographic standard algorithms (called GOST algorithms).
- This setting enables two hash algorithms with 256 and 512 bits output.
+ Streebog Hash Function (GOST R 34.11-2012, RFC 6986, ISO/IEC 10118-3)
+
+ This is one of the Russian cryptographic standard algorithms (called
+ GOST algorithms). This setting enables two hash algorithms with
+ 256 and 512 bits output.
References:
https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf
https://tools.ietf.org/html/rfc6986
-config CRYPTO_WP512
- tristate "Whirlpool digest algorithms"
+config CRYPTO_VMAC
+ tristate "VMAC"
select CRYPTO_HASH
+ select CRYPTO_MANAGER
help
- Whirlpool hash algorithm 512, 384 and 256-bit hashes
-
- Whirlpool-512 is part of the NESSIE cryptographic primitives.
- Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard
-
- See also:
- <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html>
+ VMAC is a message authentication algorithm designed for
+ very high speed on 64-bit architectures.
-config CRYPTO_GHASH_CLMUL_NI_INTEL
- tristate "GHASH hash function (CLMUL-NI accelerated)"
- depends on X86 && 64BIT
- select CRYPTO_CRYPTD
- help
- This is the x86_64 CLMUL-NI accelerated implementation of
- GHASH, the hash function used in GCM (Galois/Counter mode).
+ See https://fastcrypto.org/vmac for further information.
-config CRYPTO_GHASH_S390
- tristate "GHASH hash function"
- depends on S390
+config CRYPTO_WP512
+ tristate "Whirlpool"
select CRYPTO_HASH
help
- This is the s390 hardware accelerated implementation of GHASH,
- the hash function used in GCM (Galois/Counter mode).
-
- It is available as of z196.
-
-comment "Ciphers"
-
-config CRYPTO_AES
- tristate "AES cipher algorithms"
- select CRYPTO_ALGAPI
- select CRYPTO_LIB_AES
- help
- AES cipher algorithms (FIPS-197). AES uses the Rijndael
- algorithm.
-
- Rijndael appears to be consistently a very good performer in
- both hardware and software across a wide range of computing
- environments regardless of its use in feedback or non-feedback
- modes. Its key setup time is excellent, and its key agility is
- good. Rijndael's very low memory requirements make it very well
- suited for restricted-space environments, in which it also
- demonstrates excellent performance. Rijndael's operations are
- among the easiest to defend against power and timing attacks.
-
- The AES specifies three key sizes: 128, 192 and 256 bits
-
- See <http://csrc.nist.gov/CryptoToolkit/aes/> for more information.
-
-config CRYPTO_AES_TI
- tristate "Fixed time AES cipher"
- select CRYPTO_ALGAPI
- select CRYPTO_LIB_AES
- help
- This is a generic implementation of AES that attempts to eliminate
- data dependent latencies as much as possible without affecting
- performance too much. It is intended for use by the generic CCM
- and GCM drivers, and other CTR or CMAC/XCBC based modes that rely
- solely on encryption (although decryption is supported as well, but
- with a more dramatic performance hit)
-
- Instead of using 16 lookup tables of 1 KB each, (8 for encryption and
- 8 for decryption), this implementation only uses just two S-boxes of
- 256 bytes each, and attempts to eliminate data dependent latencies by
- prefetching the entire table into the cache at the start of each
- block. Interrupts are also disabled to avoid races where cachelines
- are evicted when the CPU is interrupted to do something else.
-
-config CRYPTO_AES_NI_INTEL
- tristate "AES cipher algorithms (AES-NI)"
- depends on X86
- select CRYPTO_AEAD
- select CRYPTO_LIB_AES
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
- help
- Use Intel AES-NI instructions for AES algorithm.
-
- AES cipher algorithms (FIPS-197). AES uses the Rijndael
- algorithm.
-
- Rijndael appears to be consistently a very good performer in
- both hardware and software across a wide range of computing
- environments regardless of its use in feedback or non-feedback
- modes. Its key setup time is excellent, and its key agility is
- good. Rijndael's very low memory requirements make it very well
- suited for restricted-space environments, in which it also
- demonstrates excellent performance. Rijndael's operations are
- among the easiest to defend against power and timing attacks.
-
- The AES specifies three key sizes: 128, 192 and 256 bits
-
- See <http://csrc.nist.gov/encryption/aes/> for more information.
-
- In addition to AES cipher algorithm support, the acceleration
- for some popular block cipher mode is supported too, including
- ECB, CBC, LRW, XTS. The 64 bit version has additional
- acceleration for CTR and XCTR.
-
-config CRYPTO_AES_SPARC64
- tristate "AES cipher algorithms (SPARC64)"
- depends on SPARC64
- select CRYPTO_SKCIPHER
- help
- Use SPARC64 crypto opcodes for AES algorithm.
-
- AES cipher algorithms (FIPS-197). AES uses the Rijndael
- algorithm.
-
- Rijndael appears to be consistently a very good performer in
- both hardware and software across a wide range of computing
- environments regardless of its use in feedback or non-feedback
- modes. Its key setup time is excellent, and its key agility is
- good. Rijndael's very low memory requirements make it very well
- suited for restricted-space environments, in which it also
- demonstrates excellent performance. Rijndael's operations are
- among the easiest to defend against power and timing attacks.
-
- The AES specifies three key sizes: 128, 192 and 256 bits
-
- See <http://csrc.nist.gov/encryption/aes/> for more information.
-
- In addition to AES cipher algorithm support, the acceleration
- for some popular block cipher mode is supported too, including
- ECB and CBC.
-
-config CRYPTO_AES_PPC_SPE
- tristate "AES cipher algorithms (PPC SPE)"
- depends on PPC && SPE
- select CRYPTO_SKCIPHER
- help
- AES cipher algorithms (FIPS-197). Additionally the acceleration
- for popular block cipher modes ECB, CBC, CTR and XTS is supported.
- This module should only be used for low power (router) devices
- without hardware AES acceleration (e.g. caam crypto). It reduces the
- size of the AES tables from 16KB to 8KB + 256 bytes and mitigates
- timining attacks. Nevertheless it might be not as secure as other
- architecture specific assembler implementations that work on 1KB
- tables or 256 bytes S-boxes.
-
-config CRYPTO_AES_S390
- tristate "AES cipher algorithms"
- depends on S390
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- help
- This is the s390 hardware accelerated implementation of the
- AES cipher algorithms (FIPS-197).
-
- As of z9 the ECB and CBC modes are hardware accelerated
- for 128 bit keys.
- As of z10 the ECB and CBC modes are hardware accelerated
- for all AES key sizes.
- As of z196 the CTR mode is hardware accelerated for all AES
- key sizes and XTS mode is hardware accelerated for 256 and
- 512 bit keys.
-
-config CRYPTO_ANUBIS
- tristate "Anubis cipher algorithm"
- depends on CRYPTO_USER_API_ENABLE_OBSOLETE
- select CRYPTO_ALGAPI
- help
- Anubis cipher algorithm.
-
- Anubis is a variable key length cipher which can use keys from
- 128 bits to 320 bits in length. It was evaluated as a entrant
- in the NESSIE competition.
-
- See also:
- <https://www.cosic.esat.kuleuven.be/nessie/reports/>
- <http://www.larc.usp.br/~pbarreto/AnubisPage.html>
-
-config CRYPTO_ARC4
- tristate "ARC4 cipher algorithm"
- depends on CRYPTO_USER_API_ENABLE_OBSOLETE
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_ARC4
- help
- ARC4 cipher algorithm.
-
- ARC4 is a stream cipher using keys ranging from 8 bits to 2048
- bits in length. This algorithm is required for driver-based
- WEP, but it should not be for other purposes because of the
- weakness of the algorithm.
-
-config CRYPTO_BLOWFISH
- tristate "Blowfish cipher algorithm"
- select CRYPTO_ALGAPI
- select CRYPTO_BLOWFISH_COMMON
- help
- Blowfish cipher algorithm, by Bruce Schneier.
-
- This is a variable key length cipher which can use keys from 32
- bits to 448 bits in length. It's fast, simple and specifically
- designed for use on "large microprocessors".
-
- See also:
- <https://www.schneier.com/blowfish.html>
-
-config CRYPTO_BLOWFISH_COMMON
- tristate
- help
- Common parts of the Blowfish cipher algorithm shared by the
- generic c and the assembler implementations.
-
- See also:
- <https://www.schneier.com/blowfish.html>
-
-config CRYPTO_BLOWFISH_X86_64
- tristate "Blowfish cipher algorithm (x86_64)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_BLOWFISH_COMMON
- imply CRYPTO_CTR
- help
- Blowfish cipher algorithm (x86_64), by Bruce Schneier.
-
- This is a variable key length cipher which can use keys from 32
- bits to 448 bits in length. It's fast, simple and specifically
- designed for use on "large microprocessors".
-
- See also:
- <https://www.schneier.com/blowfish.html>
-
-config CRYPTO_CAMELLIA
- tristate "Camellia cipher algorithms"
- select CRYPTO_ALGAPI
- help
- Camellia cipher algorithms module.
-
- Camellia is a symmetric key block cipher developed jointly
- at NTT and Mitsubishi Electric Corporation.
-
- The Camellia specifies three key sizes: 128, 192 and 256 bits.
-
- See also:
- <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
-
-config CRYPTO_CAMELLIA_X86_64
- tristate "Camellia cipher algorithm (x86_64)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- imply CRYPTO_CTR
- help
- Camellia cipher algorithm module (x86_64).
-
- Camellia is a symmetric key block cipher developed jointly
- at NTT and Mitsubishi Electric Corporation.
-
- The Camellia specifies three key sizes: 128, 192 and 256 bits.
-
- See also:
- <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
-
-config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
- tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_CAMELLIA_X86_64
- select CRYPTO_SIMD
- imply CRYPTO_XTS
- help
- Camellia cipher algorithm module (x86_64/AES-NI/AVX).
-
- Camellia is a symmetric key block cipher developed jointly
- at NTT and Mitsubishi Electric Corporation.
-
- The Camellia specifies three key sizes: 128, 192 and 256 bits.
-
- See also:
- <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
-
-config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
- tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
- depends on X86 && 64BIT
- select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
- help
- Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
-
- Camellia is a symmetric key block cipher developed jointly
- at NTT and Mitsubishi Electric Corporation.
-
- The Camellia specifies three key sizes: 128, 192 and 256 bits.
-
- See also:
- <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
-
-config CRYPTO_CAMELLIA_SPARC64
- tristate "Camellia cipher algorithm (SPARC64)"
- depends on SPARC64
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- help
- Camellia cipher algorithm module (SPARC64).
-
- Camellia is a symmetric key block cipher developed jointly
- at NTT and Mitsubishi Electric Corporation.
-
- The Camellia specifies three key sizes: 128, 192 and 256 bits.
-
- See also:
- <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
-
-config CRYPTO_CAST_COMMON
- tristate
- help
- Common parts of the CAST cipher algorithms shared by the
- generic c and the assembler implementations.
-
-config CRYPTO_CAST5
- tristate "CAST5 (CAST-128) cipher algorithm"
- select CRYPTO_ALGAPI
- select CRYPTO_CAST_COMMON
- help
- The CAST5 encryption algorithm (synonymous with CAST-128) is
- described in RFC2144.
-
-config CRYPTO_CAST5_AVX_X86_64
- tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_CAST5
- select CRYPTO_CAST_COMMON
- select CRYPTO_SIMD
- imply CRYPTO_CTR
- help
- The CAST5 encryption algorithm (synonymous with CAST-128) is
- described in RFC2144.
-
- This module provides the Cast5 cipher algorithm that processes
- sixteen blocks parallel using the AVX instruction set.
-
-config CRYPTO_CAST6
- tristate "CAST6 (CAST-256) cipher algorithm"
- select CRYPTO_ALGAPI
- select CRYPTO_CAST_COMMON
- help
- The CAST6 encryption algorithm (synonymous with CAST-256) is
- described in RFC2612.
-
-config CRYPTO_CAST6_AVX_X86_64
- tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_CAST6
- select CRYPTO_CAST_COMMON
- select CRYPTO_SIMD
- imply CRYPTO_XTS
- imply CRYPTO_CTR
- help
- The CAST6 encryption algorithm (synonymous with CAST-256) is
- described in RFC2612.
-
- This module provides the Cast6 cipher algorithm that processes
- eight blocks parallel using the AVX instruction set.
-
-config CRYPTO_DES
- tristate "DES and Triple DES EDE cipher algorithms"
- select CRYPTO_ALGAPI
- select CRYPTO_LIB_DES
- help
- DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
+ Whirlpool hash function (ISO/IEC 10118-3)
-config CRYPTO_DES_SPARC64
- tristate "DES and Triple DES EDE cipher algorithms (SPARC64)"
- depends on SPARC64
- select CRYPTO_ALGAPI
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- help
- DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
- optimized using SPARC64 crypto opcodes.
-
-config CRYPTO_DES3_EDE_X86_64
- tristate "Triple DES EDE cipher algorithm (x86-64)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- imply CRYPTO_CTR
- help
- Triple DES EDE (FIPS 46-3) algorithm.
-
- This module provides implementation of the Triple DES EDE cipher
- algorithm that is optimized for x86-64 processors. Two versions of
- algorithm are provided; regular processing one input block and
- one that processes three blocks parallel.
-
-config CRYPTO_DES_S390
- tristate "DES and Triple DES cipher algorithms"
- depends on S390
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- help
- This is the s390 hardware accelerated implementation of the
- DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
-
- As of z990 the ECB and CBC mode are hardware accelerated.
- As of z196 the CTR mode is hardware accelerated.
-
-config CRYPTO_FCRYPT
- tristate "FCrypt cipher algorithm"
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- help
- FCrypt algorithm used by RxRPC.
-
-config CRYPTO_KHAZAD
- tristate "Khazad cipher algorithm"
- depends on CRYPTO_USER_API_ENABLE_OBSOLETE
- select CRYPTO_ALGAPI
- help
- Khazad cipher algorithm.
-
- Khazad was a finalist in the initial NESSIE competition. It is
- an algorithm optimized for 64-bit processors with good performance
- on 32-bit processors. Khazad uses an 128 bit key size.
-
- See also:
- <http://www.larc.usp.br/~pbarreto/KhazadPage.html>
-
-config CRYPTO_CHACHA20
- tristate "ChaCha stream cipher algorithms"
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_SKCIPHER
- help
- The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms.
-
- ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
- Bernstein and further specified in RFC7539 for use in IETF protocols.
- This is the portable C implementation of ChaCha20. See also:
- <https://cr.yp.to/chacha/chacha-20080128.pdf>
-
- XChaCha20 is the application of the XSalsa20 construction to ChaCha20
- rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length
- from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits,
- while provably retaining ChaCha20's security. See also:
- <https://cr.yp.to/snuffle/xsalsa-20081128.pdf>
-
- XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly
- reduced security margin but increased performance. It can be needed
- in some performance-sensitive scenarios.
-
-config CRYPTO_CHACHA20_X86_64
- tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- help
- SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20,
- XChaCha20, and XChaCha12 stream ciphers.
-
-config CRYPTO_CHACHA_MIPS
- tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)"
- depends on CPU_MIPS32_R2
- select CRYPTO_SKCIPHER
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
-
-config CRYPTO_CHACHA_S390
- tristate "ChaCha20 stream cipher"
- depends on S390
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_CHACHA_GENERIC
- select CRYPTO_ARCH_HAVE_LIB_CHACHA
- help
- This is the s390 SIMD implementation of the ChaCha20 stream
- cipher (RFC 7539).
-
- It is available as of z13.
-
-config CRYPTO_SEED
- tristate "SEED cipher algorithm"
- depends on CRYPTO_USER_API_ENABLE_OBSOLETE
- select CRYPTO_ALGAPI
- help
- SEED cipher algorithm (RFC4269).
-
- SEED is a 128-bit symmetric key block cipher that has been
- developed by KISA (Korea Information Security Agency) as a
- national standard encryption algorithm of the Republic of Korea.
- It is a 16 round block cipher with the key size of 128 bit.
-
- See also:
- <http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp>
-
-config CRYPTO_ARIA
- tristate "ARIA cipher algorithm"
- select CRYPTO_ALGAPI
- help
- ARIA cipher algorithm (RFC5794).
-
- ARIA is a standard encryption algorithm of the Republic of Korea.
- The ARIA specifies three key sizes and rounds.
- 128-bit: 12 rounds.
- 192-bit: 14 rounds.
- 256-bit: 16 rounds.
-
- See also:
- <https://seed.kisa.or.kr/kisa/algorithm/EgovAriaInfo.do>
-
-config CRYPTO_SERPENT
- tristate "Serpent cipher algorithm"
- select CRYPTO_ALGAPI
- help
- Serpent cipher algorithm, by Anderson, Biham & Knudsen.
-
- Keys are allowed to be from 0 to 256 bits in length, in steps
- of 8 bits.
-
- See also:
- <https://www.cl.cam.ac.uk/~rja14/serpent.html>
-
-config CRYPTO_SERPENT_SSE2_X86_64
- tristate "Serpent cipher algorithm (x86_64/SSE2)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_SERPENT
- select CRYPTO_SIMD
- imply CRYPTO_CTR
- help
- Serpent cipher algorithm, by Anderson, Biham & Knudsen.
-
- Keys are allowed to be from 0 to 256 bits in length, in steps
- of 8 bits.
-
- This module provides Serpent cipher algorithm that processes eight
- blocks parallel using SSE2 instruction set.
-
- See also:
- <https://www.cl.cam.ac.uk/~rja14/serpent.html>
-
-config CRYPTO_SERPENT_SSE2_586
- tristate "Serpent cipher algorithm (i586/SSE2)"
- depends on X86 && !64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_SERPENT
- select CRYPTO_SIMD
- imply CRYPTO_CTR
- help
- Serpent cipher algorithm, by Anderson, Biham & Knudsen.
-
- Keys are allowed to be from 0 to 256 bits in length, in steps
- of 8 bits.
-
- This module provides Serpent cipher algorithm that processes four
- blocks parallel using SSE2 instruction set.
-
- See also:
- <https://www.cl.cam.ac.uk/~rja14/serpent.html>
-
-config CRYPTO_SERPENT_AVX_X86_64
- tristate "Serpent cipher algorithm (x86_64/AVX)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_SERPENT
- select CRYPTO_SIMD
- imply CRYPTO_XTS
- imply CRYPTO_CTR
- help
- Serpent cipher algorithm, by Anderson, Biham & Knudsen.
-
- Keys are allowed to be from 0 to 256 bits in length, in steps
- of 8 bits.
-
- This module provides the Serpent cipher algorithm that processes
- eight blocks parallel using the AVX instruction set.
-
- See also:
- <https://www.cl.cam.ac.uk/~rja14/serpent.html>
-
-config CRYPTO_SERPENT_AVX2_X86_64
- tristate "Serpent cipher algorithm (x86_64/AVX2)"
- depends on X86 && 64BIT
- select CRYPTO_SERPENT_AVX_X86_64
- help
- Serpent cipher algorithm, by Anderson, Biham & Knudsen.
-
- Keys are allowed to be from 0 to 256 bits in length, in steps
- of 8 bits.
-
- This module provides Serpent cipher algorithm that processes 16
- blocks parallel using AVX2 instruction set.
-
- See also:
- <https://www.cl.cam.ac.uk/~rja14/serpent.html>
-
-config CRYPTO_SM4
- tristate
-
-config CRYPTO_SM4_GENERIC
- tristate "SM4 cipher algorithm"
- select CRYPTO_ALGAPI
- select CRYPTO_SM4
- help
- SM4 cipher algorithms (OSCCA GB/T 32907-2016).
-
- SM4 (GBT.32907-2016) is a cryptographic standard issued by the
- Organization of State Commercial Administration of China (OSCCA)
- as an authorized cryptographic algorithms for the use within China.
+ 512, 384 and 256-bit hashes.
- SMS4 was originally created for use in protecting wireless
- networks, and is mandated in the Chinese National Standard for
- Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure)
- (GB.15629.11-2003).
-
- The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and
- standardized through TC 260 of the Standardization Administration
- of the People's Republic of China (SAC).
-
- The input, output, and key of SMS4 are each 128 bits.
-
- See also: <https://eprint.iacr.org/2008/329.pdf>
-
- If unsure, say N.
-
-config CRYPTO_SM4_AESNI_AVX_X86_64
- tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
- select CRYPTO_ALGAPI
- select CRYPTO_SM4
- help
- SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX).
-
- SM4 (GBT.32907-2016) is a cryptographic standard issued by the
- Organization of State Commercial Administration of China (OSCCA)
- as an authorized cryptographic algorithms for the use within China.
-
- This is SM4 optimized implementation using AES-NI/AVX/x86_64
- instruction set for block cipher. Through two affine transforms,
- we can use the AES S-Box to simulate the SM4 S-Box to achieve the
- effect of instruction acceleration.
+ Whirlpool-512 is part of the NESSIE cryptographic primitives.
- If unsure, say N.
+ See https://web.archive.org/web/20171129084214/http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html
+ for further information.
-config CRYPTO_SM4_AESNI_AVX2_X86_64
- tristate "SM4 cipher algorithm (x86_64/AES-NI/AVX2)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
- select CRYPTO_ALGAPI
- select CRYPTO_SM4
- select CRYPTO_SM4_AESNI_AVX_X86_64
+config CRYPTO_XCBC
+ tristate "XCBC-MAC (Extended Cipher Block Chaining MAC)"
+ select CRYPTO_HASH
+ select CRYPTO_MANAGER
help
- SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2).
+ XCBC-MAC (Extended Cipher Block Chaining Message Authentication
+ Code) (RFC3566)
- SM4 (GBT.32907-2016) is a cryptographic standard issued by the
- Organization of State Commercial Administration of China (OSCCA)
- as an authorized cryptographic algorithms for the use within China.
-
- This is SM4 optimized implementation using AES-NI/AVX2/x86_64
- instruction set for block cipher. Through two affine transforms,
- we can use the AES S-Box to simulate the SM4 S-Box to achieve the
- effect of instruction acceleration.
-
- If unsure, say N.
-
-config CRYPTO_TEA
- tristate "TEA, XTEA and XETA cipher algorithms"
- depends on CRYPTO_USER_API_ENABLE_OBSOLETE
- select CRYPTO_ALGAPI
+config CRYPTO_XXHASH
+ tristate "xxHash"
+ select CRYPTO_HASH
+ select XXHASH
help
- TEA cipher algorithm.
-
- Tiny Encryption Algorithm is a simple cipher that uses
- many rounds for security. It is very fast and uses
- little memory.
-
- Xtendend Tiny Encryption Algorithm is a modification to
- the TEA algorithm to address a potential key weakness
- in the TEA algorithm.
-
- Xtendend Encryption Tiny Algorithm is a mis-implementation
- of the XTEA algorithm for compatibility purposes.
+ xxHash non-cryptographic hash algorithm
-config CRYPTO_TWOFISH
- tristate "Twofish cipher algorithm"
- select CRYPTO_ALGAPI
- select CRYPTO_TWOFISH_COMMON
- help
- Twofish cipher algorithm.
+ Extremely fast, working at speeds close to RAM limits.
- Twofish was submitted as an AES (Advanced Encryption Standard)
- candidate cipher by researchers at CounterPane Systems. It is a
- 16 round block cipher supporting key sizes of 128, 192, and 256
- bits.
+ Used by the btrfs filesystem.
- See also:
- <https://www.schneier.com/twofish.html>
+endmenu
-config CRYPTO_TWOFISH_COMMON
- tristate
- help
- Common parts of the Twofish cipher algorithm shared by the
- generic c and the assembler implementations.
+menu "CRCs (cyclic redundancy checks)"
-config CRYPTO_TWOFISH_586
- tristate "Twofish cipher algorithms (i586)"
- depends on (X86 || UML_X86) && !64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_TWOFISH_COMMON
- imply CRYPTO_CTR
+config CRYPTO_CRC32C
+ tristate "CRC32c"
+ select CRYPTO_HASH
+ select CRC32
help
- Twofish cipher algorithm.
+ CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
- Twofish was submitted as an AES (Advanced Encryption Standard)
- candidate cipher by researchers at CounterPane Systems. It is a
- 16 round block cipher supporting key sizes of 128, 192, and 256
- bits.
+ A 32-bit CRC (cyclic redundancy check) with a polynomial defined
+ by G. Castagnoli, S. Braeuer and M. Herrman in "Optimization of Cyclic
+ Redundancy-Check Codes with 24 and 32 Parity Bits", IEEE Transactions
+ on Communications, Vol. 41, No. 6, June 1993, selected for use with
+ iSCSI.
- See also:
- <https://www.schneier.com/twofish.html>
+ Used by btrfs, ext4, jbd2, NVMeoF/TCP, and iSCSI.
-config CRYPTO_TWOFISH_X86_64
- tristate "Twofish cipher algorithm (x86_64)"
- depends on (X86 || UML_X86) && 64BIT
- select CRYPTO_ALGAPI
- select CRYPTO_TWOFISH_COMMON
- imply CRYPTO_CTR
+config CRYPTO_CRC32
+ tristate "CRC32"
+ select CRYPTO_HASH
+ select CRC32
help
- Twofish cipher algorithm (x86_64).
+ CRC32 CRC algorithm (IEEE 802.3)
- Twofish was submitted as an AES (Advanced Encryption Standard)
- candidate cipher by researchers at CounterPane Systems. It is a
- 16 round block cipher supporting key sizes of 128, 192, and 256
- bits.
+ Used by RoCEv2 and f2fs.
- See also:
- <https://www.schneier.com/twofish.html>
-
-config CRYPTO_TWOFISH_X86_64_3WAY
- tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_TWOFISH_COMMON
- select CRYPTO_TWOFISH_X86_64
+config CRYPTO_CRCT10DIF
+ tristate "CRCT10DIF"
+ select CRYPTO_HASH
help
- Twofish cipher algorithm (x86_64, 3-way parallel).
-
- Twofish was submitted as an AES (Advanced Encryption Standard)
- candidate cipher by researchers at CounterPane Systems. It is a
- 16 round block cipher supporting key sizes of 128, 192, and 256
- bits.
+ CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
- This module provides Twofish cipher algorithm that processes three
- blocks parallel, utilizing resources of out-of-order CPUs better.
+ CRC algorithm used by the SCSI Block Commands standard.
- See also:
- <https://www.schneier.com/twofish.html>
-
-config CRYPTO_TWOFISH_AVX_X86_64
- tristate "Twofish cipher algorithm (x86_64/AVX)"
- depends on X86 && 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_SIMD
- select CRYPTO_TWOFISH_COMMON
- select CRYPTO_TWOFISH_X86_64
- select CRYPTO_TWOFISH_X86_64_3WAY
- imply CRYPTO_XTS
+config CRYPTO_CRC64_ROCKSOFT
+ tristate "CRC64 based on Rocksoft Model algorithm"
+ depends on CRC64
+ select CRYPTO_HASH
help
- Twofish cipher algorithm (x86_64/AVX).
+ CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm
- Twofish was submitted as an AES (Advanced Encryption Standard)
- candidate cipher by researchers at CounterPane Systems. It is a
- 16 round block cipher supporting key sizes of 128, 192, and 256
- bits.
+ Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY)
- This module provides the Twofish cipher algorithm that processes
- eight blocks parallel using the AVX Instruction Set.
+ See https://zlib.net/crc_v3.txt
- See also:
- <https://www.schneier.com/twofish.html>
+endmenu
-comment "Compression"
+menu "Compression"
config CRYPTO_DEFLATE
- tristate "Deflate compression algorithm"
+ tristate "Deflate"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select ZLIB_INFLATE
select ZLIB_DEFLATE
help
- This is the Deflate algorithm (RFC1951), specified for use in
- IPSec with the IPCOMP protocol (RFC3173, RFC2394).
+ Deflate compression algorithm (RFC1951)
- You will most probably want this if using IPSec.
+ Used by IPSec with the IPCOMP protocol (RFC3173, RFC2394)
config CRYPTO_LZO
- tristate "LZO compression algorithm"
+ tristate "LZO"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select LZO_COMPRESS
select LZO_DECOMPRESS
help
- This is the LZO algorithm.
+ LZO compression algorithm
+
+ See https://www.oberhumer.com/opensource/lzo/ for further information.
config CRYPTO_842
- tristate "842 compression algorithm"
+ tristate "842"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select 842_COMPRESS
select 842_DECOMPRESS
help
- This is the 842 algorithm.
+ 842 compression algorithm by IBM
+
+ See https://github.com/plauth/lib842 for further information.
config CRYPTO_LZ4
- tristate "LZ4 compression algorithm"
+ tristate "LZ4"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select LZ4_COMPRESS
select LZ4_DECOMPRESS
help
- This is the LZ4 algorithm.
+ LZ4 compression algorithm
+
+ See https://github.com/lz4/lz4 for further information.
config CRYPTO_LZ4HC
- tristate "LZ4HC compression algorithm"
+ tristate "LZ4HC"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select LZ4HC_COMPRESS
select LZ4_DECOMPRESS
help
- This is the LZ4 high compression mode algorithm.
+ LZ4 high compression mode algorithm
+
+ See https://github.com/lz4/lz4 for further information.
config CRYPTO_ZSTD
- tristate "Zstd compression algorithm"
+ tristate "Zstd"
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
help
- This is the zstd algorithm.
+ zstd compression algorithm
-comment "Random Number Generation"
+ See https://github.com/facebook/zstd for further information.
+
+endmenu
+
+menu "Random number generation"
config CRYPTO_ANSI_CPRNG
- tristate "Pseudo Random Number Generation for Cryptographic modules"
+ tristate "ANSI PRNG (Pseudo Random Number Generator)"
select CRYPTO_AES
select CRYPTO_RNG
help
- This option enables the generic pseudo random number generator
- for cryptographic modules. Uses the Algorithm specified in
- ANSI X9.31 A.2.4. Note that this option must be enabled if
- CRYPTO_FIPS is selected
+ Pseudo RNG (random number generator) (ANSI X9.31 Appendix A.2.4)
+
+ This uses the AES cipher algorithm.
+
+ Note that this option must be enabled if CRYPTO_FIPS is selected
menuconfig CRYPTO_DRBG_MENU
- tristate "NIST SP800-90A DRBG"
+ tristate "NIST SP800-90A DRBG (Deterministic Random Bit Generator)"
help
- NIST SP800-90A compliant DRBG. In the following submenu, one or
- more of the DRBG types must be selected.
+ DRBG (Deterministic Random Bit Generator) (NIST SP800-90A)
+
+ In the following submenu, one or more of the DRBG types must be selected.
if CRYPTO_DRBG_MENU
@@ -2019,17 +1253,21 @@ config CRYPTO_DRBG_HMAC
select CRYPTO_SHA512
config CRYPTO_DRBG_HASH
- bool "Enable Hash DRBG"
+ bool "Hash_DRBG"
select CRYPTO_SHA256
help
- Enable the Hash DRBG variant as defined in NIST SP800-90A.
+ Hash_DRBG variant as defined in NIST SP800-90A.
+
+ This uses the SHA-1, SHA-256, SHA-384, or SHA-512 hash algorithms.
config CRYPTO_DRBG_CTR
- bool "Enable CTR DRBG"
+ bool "CTR_DRBG"
select CRYPTO_AES
select CRYPTO_CTR
help
- Enable the CTR DRBG variant as defined in NIST SP800-90A.
+ CTR_DRBG variant as defined in NIST SP800-90A.
+
+ This uses the AES cipher algorithm with the counter block mode.
config CRYPTO_DRBG
tristate
@@ -2040,72 +1278,90 @@ config CRYPTO_DRBG
endif # if CRYPTO_DRBG_MENU
config CRYPTO_JITTERENTROPY
- tristate "Jitterentropy Non-Deterministic Random Number Generator"
+ tristate "CPU Jitter Non-Deterministic RNG (Random Number Generator)"
select CRYPTO_RNG
help
- The Jitterentropy RNG is a noise that is intended
- to provide seed to another RNG. The RNG does not
- perform any cryptographic whitening of the generated
- random numbers. This Jitterentropy RNG registers with
- the kernel crypto API and can be used by any caller.
+ CPU Jitter RNG (Random Number Generator) from the Jitterentropy library
+
+ A non-physical non-deterministic ("true") RNG (e.g., an entropy source
+ compliant with NIST SP800-90B) intended to provide a seed to a
+ deterministic RNG (e.g. per NIST SP800-90C).
+ This RNG does not perform any cryptographic whitening of the generated
+
+ See https://www.chronox.de/jent.html
config CRYPTO_KDF800108_CTR
tristate
select CRYPTO_HMAC
select CRYPTO_SHA256
+endmenu
+menu "Userspace interface"
+
config CRYPTO_USER_API
tristate
config CRYPTO_USER_API_HASH
- tristate "User-space interface for hash algorithms"
+ tristate "Hash algorithms"
depends on NET
select CRYPTO_HASH
select CRYPTO_USER_API
help
- This option enables the user-spaces interface for hash
- algorithms.
+ Enable the userspace interface for hash algorithms.
+
+ See Documentation/crypto/userspace-if.rst and
+ https://www.chronox.de/libkcapi/html/index.html
config CRYPTO_USER_API_SKCIPHER
- tristate "User-space interface for symmetric key cipher algorithms"
+ tristate "Symmetric key cipher algorithms"
depends on NET
select CRYPTO_SKCIPHER
select CRYPTO_USER_API
help
- This option enables the user-spaces interface for symmetric
- key cipher algorithms.
+ Enable the userspace interface for symmetric key cipher algorithms.
+
+ See Documentation/crypto/userspace-if.rst and
+ https://www.chronox.de/libkcapi/html/index.html
config CRYPTO_USER_API_RNG
- tristate "User-space interface for random number generator algorithms"
+ tristate "RNG (random number generator) algorithms"
depends on NET
select CRYPTO_RNG
select CRYPTO_USER_API
help
- This option enables the user-spaces interface for random
- number generator algorithms.
+ Enable the userspace interface for RNG (random number generator)
+ algorithms.
+
+ See Documentation/crypto/userspace-if.rst and
+ https://www.chronox.de/libkcapi/html/index.html
config CRYPTO_USER_API_RNG_CAVP
bool "Enable CAVP testing of DRBG"
depends on CRYPTO_USER_API_RNG && CRYPTO_DRBG
help
- This option enables extra API for CAVP testing via the user-space
- interface: resetting of DRBG entropy, and providing Additional Data.
+ Enable extra APIs in the userspace interface for NIST CAVP
+ (Cryptographic Algorithm Validation Program) testing:
+ - resetting DRBG entropy
+ - providing Additional Data
+
This should only be enabled for CAVP testing. You should say
no unless you know what this is.
config CRYPTO_USER_API_AEAD
- tristate "User-space interface for AEAD cipher algorithms"
+ tristate "AEAD cipher algorithms"
depends on NET
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
select CRYPTO_NULL
select CRYPTO_USER_API
help
- This option enables the user-spaces interface for AEAD
- cipher algorithms.
+ Enable the userspace interface for AEAD cipher algorithms.
+
+ See Documentation/crypto/userspace-if.rst and
+ https://www.chronox.de/libkcapi/html/index.html
config CRYPTO_USER_API_ENABLE_OBSOLETE
- bool "Enable obsolete cryptographic algorithms for userspace"
+ bool "Obsolete cryptographic algorithms"
depends on CRYPTO_USER_API
default y
help
@@ -2114,20 +1370,49 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE
only useful for userspace clients that still rely on them.
config CRYPTO_STATS
- bool "Crypto usage statistics for User-space"
+ bool "Crypto usage statistics"
depends on CRYPTO_USER
help
- This option enables the gathering of crypto stats.
- This will collect:
- - encrypt/decrypt size and numbers of symmeric operations
- - compress/decompress size and numbers of compress operations
- - size and numbers of hash operations
- - encrypt/decrypt/sign/verify numbers for asymmetric operations
- - generate/seed numbers for rng operations
+ Enable the gathering of crypto stats.
+
+ This collects data sizes, numbers of requests, and numbers
+ of errors processed by:
+ - AEAD ciphers (encrypt, decrypt)
+ - asymmetric key ciphers (encrypt, decrypt, verify, sign)
+ - symmetric key ciphers (encrypt, decrypt)
+ - compression algorithms (compress, decompress)
+ - hash algorithms (hash)
+ - key-agreement protocol primitives (setsecret, generate
+ public key, compute shared secret)
+ - RNG (generate, seed)
+
+endmenu
config CRYPTO_HASH_INFO
bool
+if ARM
+source "arch/arm/crypto/Kconfig"
+endif
+if ARM64
+source "arch/arm64/crypto/Kconfig"
+endif
+if MIPS
+source "arch/mips/crypto/Kconfig"
+endif
+if PPC
+source "arch/powerpc/crypto/Kconfig"
+endif
+if S390
+source "arch/s390/crypto/Kconfig"
+endif
+if SPARC
+source "arch/sparc/crypto/Kconfig"
+endif
+if X86
+source "arch/x86/crypto/Kconfig"
+endif
+
source "drivers/crypto/Kconfig"
source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"
diff --git a/crypto/Makefile b/crypto/Makefile
index a6f94e04e1da..303b21c43df0 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -149,7 +149,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
-obj-$(CONFIG_CRYPTO_ARIA) += aria.o
+obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index f866085c8a4a..ab975a420e1e 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req)
return -ENOSYS;
}
+static int akcipher_default_set_key(struct crypto_akcipher *tfm,
+ const void *key, unsigned int keylen)
+{
+ return -ENOSYS;
+}
+
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
@@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
alg->decrypt = akcipher_default_op;
+ if (!alg->set_priv_key)
+ alg->set_priv_key = akcipher_default_set_key;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d1c99288af3e..5c69ff8e8fa5 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -997,77 +997,6 @@ void crypto_inc(u8 *a, unsigned int size)
}
EXPORT_SYMBOL_GPL(crypto_inc);
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
-{
- int relalign = 0;
-
- if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- int size = sizeof(unsigned long);
- int d = (((unsigned long)dst ^ (unsigned long)src1) |
- ((unsigned long)dst ^ (unsigned long)src2)) &
- (size - 1);
-
- relalign = d ? 1 << __ffs(d) : size;
-
- /*
- * If we care about alignment, process as many bytes as
- * needed to advance dst and src to values whose alignments
- * equal their relative alignment. This will allow us to
- * process the remainder of the input using optimal strides.
- */
- while (((unsigned long)dst & (relalign - 1)) && len > 0) {
- *dst++ = *src1++ ^ *src2++;
- len--;
- }
- }
-
- while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- u64 l = get_unaligned((u64 *)src1) ^
- get_unaligned((u64 *)src2);
- put_unaligned(l, (u64 *)dst);
- } else {
- *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
- }
- dst += 8;
- src1 += 8;
- src2 += 8;
- len -= 8;
- }
-
- while (len >= 4 && !(relalign & 3)) {
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- u32 l = get_unaligned((u32 *)src1) ^
- get_unaligned((u32 *)src2);
- put_unaligned(l, (u32 *)dst);
- } else {
- *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
- }
- dst += 4;
- src1 += 4;
- src2 += 4;
- len -= 4;
- }
-
- while (len >= 2 && !(relalign & 1)) {
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
- u16 l = get_unaligned((u16 *)src1) ^
- get_unaligned((u16 *)src2);
- put_unaligned(l, (u16 *)dst);
- } else {
- *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
- }
- dst += 2;
- src1 += 2;
- src2 += 2;
- len -= 2;
- }
-
- while (len--)
- *dst++ = *src1++ ^ *src2++;
-}
-EXPORT_SYMBOL_GPL(__crypto_xor);
-
unsigned int crypto_alg_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize +
diff --git a/crypto/api.c b/crypto/api.c
index 69508ae9345e..64f2d365a8e9 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -114,7 +114,7 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
larval->alg.cra_priority = -1;
larval->alg.cra_destroy = crypto_larval_destroy;
- strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
+ strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
init_completion(&larval->completion);
return larval;
@@ -321,7 +321,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
/*
* If the internal flag is set for a cipher, require a caller to
- * to invoke the cipher with the internal flag to use that cipher.
+ * invoke the cipher with the internal flag to use that cipher.
* Also, if a caller wants to allocate a cipher that may or may
* not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
* !(mask & CRYPTO_ALG_INTERNAL).
diff --git a/crypto/aria.c b/crypto/aria_generic.c
index ac3dffac34bb..4cc29b82b99d 100644
--- a/crypto/aria.c
+++ b/crypto/aria_generic.c
@@ -16,6 +16,14 @@
#include <crypto/aria.h>
+static const u32 key_rc[20] = {
+ 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
+ 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0,
+ 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e,
+ 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
+ 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0
+};
+
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
@@ -25,7 +33,7 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
const u32 *ck;
int rkidx = 0;
- ck = &key_rc[(key_len - 16) / 8][0];
+ ck = &key_rc[(key_len - 16) / 2];
w0[0] = be32_to_cpu(key[0]);
w0[1] = be32_to_cpu(key[1]);
@@ -163,8 +171,7 @@ static void aria_set_decrypt_key(struct aria_ctx *ctx)
}
}
-static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
+int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -179,6 +186,7 @@ static int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
+EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
@@ -235,14 +243,30 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
dst[3] = cpu_to_be32(reg3);
}
-static void aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
+{
+ struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
+
+ __aria_crypt(ctx, out, in, ctx->enc_key);
+}
+EXPORT_SYMBOL_GPL(aria_encrypt);
+
+void aria_decrypt(void *_ctx, u8 *out, const u8 *in)
+{
+ struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
+
+ __aria_crypt(ctx, out, in, ctx->dec_key);
+}
+EXPORT_SYMBOL_GPL(aria_decrypt);
+
+static void __aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
__aria_crypt(ctx, out, in, ctx->enc_key);
}
-static void aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+static void __aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -263,8 +287,8 @@ static struct crypto_alg aria_alg = {
.cia_min_keysize = ARIA_MIN_KEY_SIZE,
.cia_max_keysize = ARIA_MAX_KEY_SIZE,
.cia_setkey = aria_set_key,
- .cia_encrypt = aria_encrypt,
- .cia_decrypt = aria_decrypt
+ .cia_encrypt = __aria_encrypt,
+ .cia_decrypt = __aria_decrypt
}
}
};
@@ -286,3 +310,4 @@ MODULE_DESCRIPTION("ARIA Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
MODULE_ALIAS_CRYPTO("aria");
+MODULE_ALIAS_CRYPTO("aria-generic");
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index c9d218e53bcb..9719c7520661 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -189,7 +189,7 @@ static int test(int disks, int *tests)
}
-static int raid6_test(void)
+static int __init raid6_test(void)
{
int err = 0;
int tests = 0;
@@ -236,7 +236,7 @@ static int raid6_test(void)
return 0;
}
-static void raid6_test_exit(void)
+static void __exit raid6_test_exit(void)
{
}
diff --git a/crypto/curve25519-generic.c b/crypto/curve25519-generic.c
index bd88fd571393..d055b0784c77 100644
--- a/crypto/curve25519-generic.c
+++ b/crypto/curve25519-generic.c
@@ -72,12 +72,12 @@ static struct kpp_alg curve25519_alg = {
.max_size = curve25519_max_size,
};
-static int curve25519_init(void)
+static int __init curve25519_init(void)
{
return crypto_register_kpp(&curve25519_alg);
}
-static void curve25519_exit(void)
+static void __exit curve25519_exit(void)
{
crypto_unregister_kpp(&curve25519_alg);
}
diff --git a/crypto/dh.c b/crypto/dh.c
index 4406aeb1ff61..99c3b2ef7adc 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -893,7 +893,7 @@ static struct crypto_template crypto_ffdhe_templates[] = {};
#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
-static int dh_init(void)
+static int __init dh_init(void)
{
int err;
@@ -911,7 +911,7 @@ static int dh_init(void)
return 0;
}
-static void dh_exit(void)
+static void __exit dh_exit(void)
{
crypto_unregister_templates(crypto_ffdhe_templates,
ARRAY_SIZE(crypto_ffdhe_templates));
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 177983b6ae38..982d4ca4526d 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1703,7 +1703,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
{
- struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+ struct sdesc *sdesc = drbg->priv_data;
if (sdesc) {
crypto_free_shash(sdesc->shash.tfm);
kfree_sensitive(sdesc);
@@ -1715,7 +1715,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg)
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
- struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+ struct sdesc *sdesc = drbg->priv_data;
crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
}
@@ -1723,7 +1723,7 @@ static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in)
{
- struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+ struct sdesc *sdesc = drbg->priv_data;
struct drbg_string *input = NULL;
crypto_shash_init(&sdesc->shash);
@@ -1818,8 +1818,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
- struct crypto_cipher *tfm =
- (struct crypto_cipher *)drbg->priv_data;
+ struct crypto_cipher *tfm = drbg->priv_data;
crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
}
@@ -1827,8 +1826,7 @@ static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in)
{
- struct crypto_cipher *tfm =
- (struct crypto_cipher *)drbg->priv_data;
+ struct crypto_cipher *tfm = drbg->priv_data;
/* there is only component in *in */
BUG_ON(in->len < drbg_blocklen(drbg));
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index e4857d534344..80afee3234fb 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -200,7 +200,7 @@ static struct kpp_alg ecdh_nist_p384 = {
static bool ecdh_nist_p192_registered;
-static int ecdh_init(void)
+static int __init ecdh_init(void)
{
int ret;
@@ -227,7 +227,7 @@ nist_p256_error:
return ret;
}
-static void ecdh_exit(void)
+static void __exit ecdh_exit(void)
{
if (ecdh_nist_p192_registered)
crypto_unregister_kpp(&ecdh_nist_p192);
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index b3a8a6b572ba..fbd76498aba8 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -332,7 +332,7 @@ static struct akcipher_alg ecdsa_nist_p192 = {
};
static bool ecdsa_nist_p192_registered;
-static int ecdsa_init(void)
+static int __init ecdsa_init(void)
{
int ret;
@@ -359,7 +359,7 @@ nist_p256_error:
return ret;
}
-static void ecdsa_exit(void)
+static void __exit ecdsa_exit(void)
{
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
diff --git a/crypto/essiv.c b/crypto/essiv.c
index 8bcc5bdcb2a9..e33369df9034 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -543,7 +543,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
}
/* record the driver name so we can instantiate this exact algo later */
- strlcpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
+ strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
CRYPTO_MAX_ALG_NAME);
/* Instance fields */
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 0e555ee4addb..c50f2d2a4d06 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -327,7 +327,7 @@ static struct akcipher_alg rsa = {
},
};
-static int rsa_init(void)
+static int __init rsa_init(void)
{
int err;
@@ -344,7 +344,7 @@ static int rsa_init(void)
return 0;
}
-static void rsa_exit(void)
+static void __exit rsa_exit(void)
{
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
diff --git a/crypto/sm2.c b/crypto/sm2.c
index f3e1592965c0..ed9307dac3d1 100644
--- a/crypto/sm2.c
+++ b/crypto/sm2.c
@@ -441,12 +441,12 @@ static struct akcipher_alg sm2 = {
},
};
-static int sm2_init(void)
+static int __init sm2_init(void)
{
return crypto_register_akcipher(&sm2);
}
-static void sm2_exit(void)
+static void __exit sm2_exit(void)
{
crypto_unregister_akcipher(&sm2);
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 59eb8ec36664..a82679b576bb 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -66,17 +66,6 @@ static u32 num_mb = 8;
static unsigned int klen;
static char *tvmem[TVMEMSIZE];
-static const char *check[] = {
- "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
- "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
- "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
- "khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt",
- "camellia", "seed", "rmd160", "aria",
- "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384",
- "sha3-512", "streebog256", "streebog512",
- NULL
-};
-
static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 };
static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
@@ -1454,18 +1443,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
false);
}
-static void test_available(void)
-{
- const char **name = check;
-
- while (*name) {
- printk("alg %s ", *name);
- printk(crypto_has_alg(*name, 0, 0) ?
- "found\n" : "not found\n");
- name++;
- }
-}
-
static inline int tcrypt_test(const char *alg)
{
int ret;
@@ -2228,6 +2205,13 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
NULL, 0, 16, 8, speed_template_16_24_32);
break;
+ case 229:
+ test_mb_aead_speed("gcm(aria)", ENCRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16, num_mb);
+ test_mb_aead_speed("gcm(aria)", DECRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16, num_mb);
+ break;
+
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -2648,6 +2632,17 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16);
break;
+ case 519:
+ test_acipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_acipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_acipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_acipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ break;
+
case 600:
test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
@@ -2860,9 +2855,17 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_8_32, num_mb);
break;
- case 1000:
- test_available();
+ case 610:
+ test_mb_skcipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
break;
+
}
return ret;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5349ffee6bbd..e4bb03b8b924 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3322,7 +3322,7 @@ out:
}
static int test_acomp(struct crypto_acomp *tfm,
- const struct comp_testvec *ctemplate,
+ const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
int ctcount, int dtcount)
{
@@ -3417,6 +3417,21 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ crypto_init_wait(&wait);
+ sg_init_one(&src, input_vec, ilen);
+ acomp_request_set_params(req, &src, NULL, ilen, 0);
+
+ ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
+ if (ret) {
+ pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+#endif
+
kfree(input_vec);
acomp_request_free(req);
}
@@ -3478,6 +3493,20 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ crypto_init_wait(&wait);
+ acomp_request_set_params(req, &src, NULL, ilen, 0);
+
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
+ if (ret) {
+ pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+#endif
+
kfree(input_vec);
acomp_request_free(req);
}
@@ -5801,8 +5830,11 @@ test_done:
driver, alg,
fips_enabled ? "fips" : "panic_on_fail");
}
- WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)",
- driver, alg, rc);
+ pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
+ alg, driver, rc);
+ WARN(rc != -ENOENT,
+ "alg: self-tests for %s using %s failed (rc=%d)",
+ alg, driver, rc);
} else {
if (fips_enabled)
pr_info("alg: self-tests for %s (%s) passed\n",
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index b24ac39a903b..e34c3ea692b6 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
MAX_BITS_PER_CALL);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
- if ((int)res.a0 < 0)
- return (int)res.a0;
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return copied;
cond_resched();
break;
+ default:
+ return -EIO;
}
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 16f227b995e8..cc002b0c2f0c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(default_quality,
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void hwrng_manage_rngd(struct hwrng *rng);
+static int hwrng_fillfn(void *unused);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -96,6 +96,15 @@ static int set_current_rng(struct hwrng *rng)
drop_current_rng();
current_rng = rng;
+ /* if necessary, start hwrng thread */
+ if (!hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
+ }
+
return 0;
}
@@ -167,8 +176,6 @@ skip_init:
rng->quality = 1024;
current_quality = rng->quality; /* obsolete */
- hwrng_manage_rngd(rng);
-
return 0;
}
@@ -454,10 +461,6 @@ static ssize_t rng_quality_store(struct device *dev,
/* the best available RNG may have changed */
ret = enable_best_rng();
- /* start/stop rngd if necessary */
- if (current_rng)
- hwrng_manage_rngd(current_rng);
-
out:
mutex_unlock(&rng_mutex);
return ret ? ret : len;
@@ -507,16 +510,14 @@ static int hwrng_fillfn(void *unused)
rng->quality = current_quality; /* obsolete */
quality = rng->quality;
mutex_unlock(&reading_mutex);
- put_rng(rng);
- if (!quality)
- break;
+ if (rc <= 0)
+ hwrng_msleep(rng, 10000);
- if (rc <= 0) {
- pr_warn("hwrng: no data available\n");
- msleep_interruptible(10000);
+ put_rng(rng);
+
+ if (rc <= 0)
continue;
- }
/* If we cannot credit at least one bit of entropy,
* keep track of the remainder for the next iteration
@@ -533,22 +534,6 @@ static int hwrng_fillfn(void *unused)
return 0;
}
-static void hwrng_manage_rngd(struct hwrng *rng)
-{
- if (WARN_ON(!mutex_is_locked(&rng_mutex)))
- return;
-
- if (rng->quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (rng->quality > 0 && !hwrng_fill) {
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
- }
- }
-}
-
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
@@ -570,6 +555,7 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ init_completion(&rng->dying);
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
@@ -617,6 +603,7 @@ void hwrng_unregister(struct hwrng *rng)
old_rng = current_rng;
list_del(&rng->list);
+ complete_all(&rng->dying);
if (current_rng == rng) {
err = enable_best_rng();
if (err) {
@@ -685,6 +672,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+{
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+ return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
+}
+EXPORT_SYMBOL_GPL(hwrng_msleep);
+
static int __init hwrng_modinit(void)
{
int ret;
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index b05d676ca814..a1c24148ed31 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -245,7 +245,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(rngc->clk)) {
dev_err(&pdev->dev, "Can not get rng_clk\n");
return PTR_ERR(rngc->clk);
@@ -255,27 +255,14 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(rngc->clk);
- if (ret)
- return ret;
-
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = ver_id >> RNGC_TYPE_SHIFT;
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
- ret = -ENODEV;
- goto err;
- }
-
- ret = devm_request_irq(&pdev->dev,
- irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret) {
- dev_err(rngc->dev, "Can't get interrupt working.\n");
- goto err;
- }
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ return -ENODEV;
init_completion(&rngc->rng_op_done);
@@ -290,18 +277,25 @@ static int imx_rngc_probe(struct platform_device *pdev)
imx_rngc_irq_mask_clear(rngc);
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
dev_err(rngc->dev, "self test failed\n");
- goto err;
+ return ret;
}
}
- ret = hwrng_register(&rngc->rng);
+ ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret) {
dev_err(&pdev->dev, "hwrng registration failed\n");
- goto err;
+ return ret;
}
dev_info(&pdev->dev,
@@ -309,22 +303,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
-
-err:
- clk_disable_unprepare(rngc->clk);
-
- return ret;
-}
-
-static int __exit imx_rngc_remove(struct platform_device *pdev)
-{
- struct imx_rngc *rngc = platform_get_drvdata(pdev);
-
- hwrng_unregister(&rngc->rng);
-
- clk_disable_unprepare(rngc->clk);
-
- return 0;
}
static int __maybe_unused imx_rngc_suspend(struct device *dev)
@@ -355,11 +333,10 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
- .name = "imx_rngc",
+ .name = KBUILD_MODNAME,
.pm = &imx_rngc_pm_ops,
.of_match_table = imx_rngc_dt_ids,
},
- .remove = __exit_p(imx_rngc_remove),
};
module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3e6aa319920b..55e75fbb658e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -802,9 +802,7 @@ source "drivers/crypto/amlogic/Kconfig"
config CRYPTO_DEV_SA2UL
tristate "Support for TI security accelerator"
depends on ARCH_K3 || COMPILE_TEST
- select ARM64_CRYPTO
select CRYPTO_AES
- select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_SHA1
@@ -818,5 +816,6 @@ config CRYPTO_DEV_SA2UL
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/keembay/Kconfig"
+source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f81703a86b98..116de173a66c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 44b8fc4b786d..006e40133c28 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
-static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v)
{
unsigned int i;
@@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun4i_ss_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun4i_ss_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs);
/*
* Power management strategy: The device is suspended unless a TFM exists for
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 19cd2e52f89d..c4b0a8b58842 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
goto err_dst;
}
- err = pm_runtime_get_sync(ce->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
goto err_pm;
- }
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e79514fce731..af017a087ebf 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -177,7 +177,7 @@ static int meson_cipher(struct skcipher_request *areq)
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
- if (nr_sgs < 0) {
+ if (!nr_sgs) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
@@ -186,14 +186,14 @@ static int meson_cipher(struct skcipher_request *areq)
} else {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
- if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+ if (!nr_sgs || nr_sgs > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
- if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+ if (!nr_sgd || nr_sgd > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
err = -EINVAL;
goto theend;
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
new file mode 100644
index 000000000000..ae2710ae8d8f
--- /dev/null
+++ b/drivers/crypto/aspeed/Kconfig
@@ -0,0 +1,48 @@
+config CRYPTO_DEV_ASPEED
+ tristate "Support for Aspeed cryptographic engine driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select CRYPTO_ENGINE
+ help
+ Hash and Crypto Engine (HACE) is designed to accelerate the
+ throughput of hash data digest, encryption and decryption.
+
+ Select y here to have support for the cryptographic driver
+ available on Aspeed SoC.
+
+config CRYPTO_DEV_ASPEED_DEBUG
+ bool "Enable Aspeed crypto debug messages"
+ depends on CRYPTO_DEV_ASPEED
+ help
+ Print Aspeed crypto debugging messages if you use this
+ option to ask for those messages.
+ Avoid enabling this option for production build to
+ minimize driver timing.
+
+config CRYPTO_DEV_ASPEED_HACE_HASH
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) hash"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ hash driver.
+ Supports multiple message digest standards, including
+ SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on.
+
+config CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CFB
+ select CRYPTO_OFB
+ select CRYPTO_CTR
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ crypto driver.
+ Supports AES/DES symmetric-key encryption and decryption
+ with ECB/CBC/CFB/OFB/CTR options.
diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
new file mode 100644
index 000000000000..a0ed40ddaad1
--- /dev/null
+++ b/drivers/crypto/aspeed/Makefile
@@ -0,0 +1,7 @@
+hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o
+hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o
+
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
+aspeed_crypto-objs := aspeed-hace.o \
+ $(hace-hash-y) \
+ $(hace-crypto-y)
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
new file mode 100644
index 000000000000..ef73b0028b4d
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+static int aspeed_crypto_do_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+
+ if (rctx->enc_cmd & HACE_CMD_ENCRYPT)
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+
+ return err;
+}
+
+static bool aspeed_crypto_need_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+
+ if (areq->cryptlen == 0)
+ return true;
+
+ if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) &&
+ !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE))
+ return true;
+
+ if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) &&
+ !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE))
+ return true;
+
+ return false;
+}
+
+static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct skcipher_request *req)
+{
+ if (hace_dev->version == AST2500_VERSION &&
+ aspeed_crypto_need_fallback(req)) {
+ CIPHER_DBG(hace_dev, "SW fallback\n");
+ return aspeed_crypto_do_fallback(req);
+ }
+
+ return crypto_transfer_skcipher_request_to_engine(
+ hace_dev->crypt_engine_crypto, req);
+}
+
+static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct aspeed_engine_crypto *crypto_engine;
+ int rc;
+
+ crypto_engine = &hace_dev->crypto_engine;
+ crypto_engine->req = req;
+ crypto_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ rc = ctx->start(hace_dev);
+
+ if (rc != -EINPROGRESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(req->iv, crypto_engine->cipher_ctx +
+ DES_KEY_SIZE, DES_KEY_SIZE);
+ else
+ memcpy(req->iv, crypto_engine->cipher_ctx,
+ AES_BLOCK_SIZE);
+ }
+
+ crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req,
+ err);
+
+ return err;
+}
+
+static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct device *dev = hace_dev->dev;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE);
+ }
+
+ return aspeed_sk_complete(hace_dev, 0);
+}
+
+static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *out_sg;
+ int nbytes = 0;
+ int rc = 0;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ out_sg = req->dst;
+
+ /* Copy output buffer to dst scatter-gather lists */
+ nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ rc = -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_out_sg", rctx->dst_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ return aspeed_sk_complete(hace_dev, rc);
+}
+
+static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *in_sg;
+ int nbytes;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ in_sg = req->src;
+
+ nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_in_sg", rctx->src_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ return -EINVAL;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer;
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+}
+
+static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_sg_list *src_list, *dst_list;
+ dma_addr_t src_dma_addr, dst_dma_addr;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *s;
+ int src_sg_len;
+ int dst_sg_len;
+ int total, i;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL |
+ HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN;
+
+ /* BIDIRECTIONAL */
+ if (req->dst == req->src) {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_BIDIRECTIONAL);
+ dst_sg_len = src_sg_len;
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ } else {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_TO_DEVICE);
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ dst_sg_len = dma_map_sg(hace_dev->dev, req->dst,
+ rctx->dst_nents, DMA_FROM_DEVICE);
+ if (!dst_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() dst error\n");
+ rc = -EINVAL;
+ goto free_req_src;
+ }
+ }
+
+ src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr;
+ src_dma_addr = crypto_engine->cipher_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->src, s, src_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ if (req->dst == req->src) {
+ dst_list = src_list;
+ dst_dma_addr = src_dma_addr;
+
+ } else {
+ dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr;
+ dst_dma_addr = crypto_engine->dst_sg_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->dst, s, dst_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ dst_list[i].phy_addr = cpu_to_le32(phy_addr);
+ dst_list[i].len = cpu_to_le32(len);
+
+ }
+
+ dst_list[dst_sg_len].phy_addr = 0;
+ dst_list[dst_sg_len].len = 0;
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer_sg;
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+
+free_req:
+ if (req->dst == req->src) {
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_BIDIRECTIONAL);
+
+ } else {
+ dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_TO_DEVICE);
+ }
+
+ return rc;
+
+free_req_src:
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct crypto_skcipher *cipher;
+ struct aspeed_cipher_ctx *ctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ cipher = crypto_skcipher_reqtfm(req);
+ ctx = crypto_skcipher_ctx(cipher);
+
+ /* enable interrupt */
+ rctx->enc_cmd |= HACE_CMD_ISR_EN;
+
+ rctx->dst_nents = sg_nents(req->dst);
+ rctx->src_nents = sg_nents(req->src);
+
+ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma,
+ ASPEED_HACE_CONTEXT);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE,
+ req->iv, DES_BLOCK_SIZE);
+ else
+ memcpy(crypto_engine->cipher_ctx, req->iv,
+ AES_BLOCK_SIZE);
+ }
+
+ if (hace_dev->version == AST2600_VERSION) {
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len);
+
+ return aspeed_sk_start_sg(hace_dev);
+ }
+
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH);
+
+ return aspeed_sk_start(hace_dev);
+}
+
+static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE |
+ HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen);
+
+ if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+ dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen);
+ return -EINVAL;
+ }
+
+ if (keylen == DES_KEY_SIZE) {
+ rc = crypto_des_verify_key(tfm, key);
+ if (rc)
+ return rc;
+
+ } else if (keylen == DES3_EDE_KEY_SIZE) {
+ rc = crypto_des3_ede_verify_key(tfm, key);
+ if (rc)
+ return rc;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_des_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s\n",
+ (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt");
+
+ cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cmd |= HACE_CMD_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cmd |= HACE_CMD_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cmd |= HACE_CMD_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct crypto_aes_ctx gen_aes_key;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8));
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ if (ctx->hace_dev->version == AST2500_VERSION) {
+ aes_expandkey(&gen_aes_key, key, keylen);
+ memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH);
+
+ } else {
+ memcpy(ctx->key, key, keylen);
+ }
+
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct aspeed_hace_alg *crypto_alg;
+
+
+ crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+ ctx->hace_dev = crypto_alg->hace_dev;
+ ctx->start = aspeed_hace_skcipher_trigger;
+
+ CIPHER_DBG(ctx->hace_dev, "%s\n", name);
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm));
+
+ ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+
+ CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base));
+ crypto_free_skcipher(ctx->fallback_tfm);
+}
+
+static struct aspeed_hace_alg aspeed_crypto_algs[] = {
+ {
+ .alg.skcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ecb_encrypt,
+ .decrypt = aspeed_aes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aspeed-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cbc_encrypt,
+ .decrypt = aspeed_aes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aspeed-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cfb_encrypt,
+ .decrypt = aspeed_aes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "aspeed-cfb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ofb_encrypt,
+ .decrypt = aspeed_aes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "aspeed-ofb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ecb_encrypt,
+ .decrypt = aspeed_des_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "aspeed-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cbc_encrypt,
+ .decrypt = aspeed_des_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "aspeed-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cfb_encrypt,
+ .decrypt = aspeed_des_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des)",
+ .cra_driver_name = "aspeed-cfb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ofb_encrypt,
+ .decrypt = aspeed_des_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des)",
+ .cra_driver_name = "aspeed-ofb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ecb_encrypt,
+ .decrypt = aspeed_tdes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "aspeed-ecb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cbc_encrypt,
+ .decrypt = aspeed_tdes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "aspeed-cbc-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cfb_encrypt,
+ .decrypt = aspeed_tdes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des3_ede)",
+ .cra_driver_name = "aspeed-cfb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ofb_encrypt,
+ .decrypt = aspeed_tdes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des3_ede)",
+ .cra_driver_name = "aspeed-ofb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ctr_encrypt,
+ .decrypt = aspeed_aes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aspeed-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ctr_encrypt,
+ .decrypt = aspeed_des_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des)",
+ .cra_driver_name = "aspeed-ctr-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ctr_encrypt,
+ .decrypt = aspeed_tdes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des3_ede)",
+ .cra_driver_name = "aspeed-ctr-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+
+};
+
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+}
+
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
+ aspeed_crypto_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
+ aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
new file mode 100644
index 000000000000..935135229ebd
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define AHASH_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define AHASH_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* Initialization Vectors for SHA-family */
+static const __be32 sha1_iv[8] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0
+};
+
+static const __be32 sha224_iv[8] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 sha256_iv[8] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+static const __be64 sha384_iv[8] = {
+ cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+ cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+ cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+ cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
+};
+
+static const __be64 sha512_iv[8] = {
+ cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+ cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+ cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+ cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
+};
+
+static const __be32 sha512_224_iv[16] = {
+ cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
+ cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
+ cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
+ cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
+ cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
+ cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
+ cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
+ cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
+};
+
+static const __be32 sha512_256_iv[16] = {
+ cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
+ cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
+ cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
+ cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
+ cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
+ cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
+ cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
+ cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
+};
+
+/* The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
+ *
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
+ * - if message length < 56 bytes then padlen = 56 - message length
+ * - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ * - if message length < 112 bytes then padlen = 112 - message length
+ * - else padlen = 128 + 112 - message length
+ */
+static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx)
+{
+ unsigned int index, padlen;
+ __be64 bits[2];
+
+ AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
+
+ switch (rctx->flags & SHA_FLAGS_MASK) {
+ case SHA_FLAGS_SHA1:
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
+ index = rctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
+ rctx->bufcnt += padlen + 8;
+ break;
+ default:
+ bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
+ bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
+ rctx->digcnt[0] >> 61);
+ index = rctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
+ rctx->bufcnt += padlen + 16;
+ break;
+ }
+}
+
+/*
+ * Prepare DMA buffer before hardware engine
+ * processing.
+ */
+static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int length, remain;
+
+ length = rctx->total + rctx->bufcnt;
+ remain = length % rctx->block_size;
+
+ AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
+
+ if (rctx->bufcnt)
+ memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
+
+ if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
+ rctx->bufcnt, rctx->src_sg,
+ rctx->offset, rctx->total - remain, 0);
+ rctx->offset += rctx->total - remain;
+
+ } else {
+ dev_warn(hace_dev->dev, "Hash data length is too large\n");
+ return -EINVAL;
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
+ rctx->offset, remain, 0);
+
+ rctx->bufcnt = remain;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ return -ENOMEM;
+ }
+
+ hash_engine->src_length = length - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+}
+
+/*
+ * Prepare DMA buffer as SG list buffer before
+ * hardware engine processing.
+ */
+static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct aspeed_sg_list *src_list;
+ struct scatterlist *s;
+ int length, remain, sg_len, i;
+ int rc = 0;
+
+ remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
+ length = rctx->total + rctx->bufcnt - remain;
+
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ "length", length, "remain", remain);
+
+ sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+ if (!sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto free_src_sg;
+ }
+
+ if (rctx->bufcnt != 0) {
+ u32 phy_addr;
+ u32 len;
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ phy_addr = rctx->buffer_dma_addr;
+ len = rctx->bufcnt;
+ length -= len;
+
+ /* Last sg list */
+ if (length == 0)
+ len |= HASH_SG_LAST_LIST;
+
+ src_list[0].phy_addr = cpu_to_le32(phy_addr);
+ src_list[0].len = cpu_to_le32(len);
+ src_list++;
+ }
+
+ if (length != 0) {
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ len |= HASH_SG_LAST_LIST;
+ length = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+ }
+
+ if (length != 0) {
+ rc = -EINVAL;
+ goto free_rctx_buffer;
+ }
+
+ rctx->offset = rctx->total - remain;
+ hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+
+free_rctx_buffer:
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+free_src_sg:
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
+
+ return 0;
+}
+
+/*
+ * Copy digest to the corresponding request result.
+ * This function will be called at final() stage.
+ */
+static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+/*
+ * Trigger hardware engines to do the math.
+ */
+static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
+ aspeed_hace_fn_t resume)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
+ &hash_engine->src_dma, &hash_engine->digest_dma,
+ hash_engine->src_length);
+
+ rctx->cmd |= HASH_CMD_INT_ENABLE;
+ hash_engine->resume = resume;
+
+ ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_DIGEST_BUFF);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_KEY_BUFF);
+ ast_hace_write(hace_dev, hash_engine->src_length,
+ ASPEED_HACE_HASH_DATA_LEN);
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
+
+ return -EINPROGRESS;
+}
+
+/*
+ * HMAC resume aims to do the second pass produces
+ * the final HMAC code derived from the inner hash
+ * result and the outer key.
+ */
+static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ /* o key pad + hash sum 1 */
+ memcpy(rctx->buffer, bctx->opad, rctx->block_size);
+ memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
+
+ rctx->bufcnt = rctx->block_size + rctx->digsize;
+ rctx->digcnt[0] = rctx->block_size + rctx->digsize;
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+ memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ if (rctx->flags & SHA_FLAGS_HMAC)
+ return aspeed_hace_ahash_trigger(hace_dev,
+ aspeed_ahash_hmac_resume);
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
+ rctx->total - rctx->offset, 0);
+
+ rctx->bufcnt = rctx->total - rctx->offset;
+ rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ aspeed_hace_fn_t resume;
+ int ret;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ if (hace_dev->version == AST2600_VERSION) {
+ rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
+ resume = aspeed_ahash_update_resume_sg;
+
+ } else {
+ resume = aspeed_ahash_update_resume;
+ }
+
+ ret = hash_engine->dma_prepare(hace_dev);
+ if (ret)
+ return ret;
+
+ return aspeed_hace_ahash_trigger(hace_dev, resume);
+}
+
+static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(
+ hace_dev->crypt_engine_hash, req);
+}
+
+static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+ int ret = 0;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ if (rctx->op == SHA_OP_UPDATE)
+ ret = aspeed_ahash_req_update(hace_dev);
+ else if (rctx->op == SHA_OP_FINAL)
+ ret = aspeed_ahash_req_final(hace_dev);
+
+ if (ret != -EINPROGRESS)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->req = req;
+
+ if (hace_dev->version == AST2600_VERSION)
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
+ else
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+
+ return 0;
+}
+
+static int aspeed_sham_update(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->total = req->nbytes;
+ rctx->src_sg = req->src;
+ rctx->offset = 0;
+ rctx->src_nents = sg_nents(req->src);
+ rctx->op = SHA_OP_UPDATE;
+
+ rctx->digcnt[0] += rctx->total;
+ if (rctx->digcnt[0] < rctx->total)
+ rctx->digcnt[1]++;
+
+ if (rctx->bufcnt + rctx->total < rctx->block_size) {
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
+ rctx->src_sg, rctx->offset,
+ rctx->total, 0);
+ rctx->bufcnt += rctx->total;
+
+ return 0;
+ }
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ return crypto_shash_digest(shash, data, len, out);
+}
+
+static int aspeed_sham_final(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
+ req->nbytes, rctx->total);
+ rctx->op = SHA_OP_FINAL;
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_finup(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ int rc1, rc2;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->flags |= SHA_FLAGS_FINUP;
+
+ rc1 = aspeed_sham_update(req);
+ if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
+ return rc1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ rc2 = aspeed_sham_final(req);
+
+ return rc1 ? : rc2;
+}
+
+static int aspeed_sham_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "%s: digest size:%d\n",
+ crypto_tfm_alg_name(&tfm->base),
+ crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA1;
+ rctx->digsize = SHA1_DIGEST_SIZE;
+ rctx->block_size = SHA1_BLOCK_SIZE;
+ rctx->sha_iv = sha1_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha1_iv, rctx->ivsize);
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA224_BLOCK_SIZE;
+ rctx->sha_iv = sha224_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA256_BLOCK_SIZE;
+ rctx->sha_iv = sha256_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha256_iv, rctx->ivsize);
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA384;
+ rctx->digsize = SHA384_DIGEST_SIZE;
+ rctx->block_size = SHA384_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha384_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha384_iv, rctx->ivsize);
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512;
+ rctx->digsize = SHA512_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha512_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sha512s_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_224_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_256_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sham_digest(struct ahash_request *req)
+{
+ return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
+}
+
+static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int err = 0;
+ int i;
+
+ AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
+ keylen);
+
+ if (keylen > bs) {
+ err = aspeed_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= HMAC_IPAD_VALUE;
+ bctx->opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ return err;
+}
+
+static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
+{
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_alg *ast_alg;
+
+ ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+ tctx->hace_dev = ast_alg->hace_dev;
+ tctx->flags = 0;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct aspeed_sham_reqctx));
+
+ if (ast_alg->alg_base) {
+ /* hmac related */
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ tctx->flags |= SHA_FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ dev_warn(ast_alg->hace_dev->dev,
+ "base driver '%s' could not be loaded.\n",
+ ast_alg->alg_base);
+ return PTR_ERR(bctx->shash);
+ }
+ }
+
+ tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
+ tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
+ tctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
+
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct aspeed_hace_alg aspeed_ahash_algs[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "aspeed-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "aspeed-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "aspeed-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha1",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "aspeed-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha224",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "aspeed-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha256",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "aspeed-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "aspeed-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "aspeed-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_224",
+ .cra_driver_name = "aspeed-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_256",
+ .cra_driver_name = "aspeed-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha384",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "aspeed-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "aspeed-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_224",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_224)",
+ .cra_driver_name = "aspeed-hmac-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_256",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_256)",
+ .cra_driver_name = "aspeed-hmac-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+}
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
+ aspeed_ahash_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
+ aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
new file mode 100644
index 000000000000..656cb92c8bb6
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define HACE_DBG(d, fmt, ...) \
+ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define HACE_DBG(d, fmt, ...) \
+ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* HACE interrupt service routine */
+static irqreturn_t aspeed_hace_irq(int irq, void *dev)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ u32 sts;
+
+ sts = ast_hace_read(hace_dev, ASPEED_HACE_STS);
+ ast_hace_write(hace_dev, sts, ASPEED_HACE_STS);
+
+ HACE_DBG(hace_dev, "irq status: 0x%x\n", sts);
+
+ if (sts & HACE_HASH_ISR) {
+ if (hash_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&hash_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "HASH no active requests.\n");
+ }
+
+ if (sts & HACE_CRYPTO_ISR) {
+ if (crypto_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&crypto_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "CRYPTO no active requests.\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void aspeed_hace_crypto_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+
+ crypto_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_hash_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ hash_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_register_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_register_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_unregister_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_unregister_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static const struct of_device_id aspeed_hace_of_matches[] = {
+ { .compatible = "aspeed,ast2500-hace", .data = (void *)5, },
+ { .compatible = "aspeed,ast2600-hace", .data = (void *)6, },
+ {},
+};
+
+static int aspeed_hace_probe(struct platform_device *pdev)
+{
+ struct aspeed_engine_crypto *crypto_engine;
+ const struct of_device_id *hace_dev_id;
+ struct aspeed_engine_hash *hash_engine;
+ struct aspeed_hace_dev *hace_dev;
+ struct resource *res;
+ int rc;
+
+ hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
+ GFP_KERNEL);
+ if (!hace_dev)
+ return -ENOMEM;
+
+ hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev);
+ if (!hace_dev_id) {
+ dev_err(&pdev->dev, "Failed to match hace dev id\n");
+ return -EINVAL;
+ }
+
+ hace_dev->dev = &pdev->dev;
+ hace_dev->version = (unsigned long)hace_dev_id->data;
+ hash_engine = &hace_dev->hash_engine;
+ crypto_engine = &hace_dev->crypto_engine;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ platform_set_drvdata(pdev, hace_dev);
+
+ hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hace_dev->regs))
+ return PTR_ERR(hace_dev->regs);
+
+ /* Get irq number and register it */
+ hace_dev->irq = platform_get_irq(pdev, 0);
+ if (hace_dev->irq < 0)
+ return -ENXIO;
+
+ rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0,
+ dev_name(&pdev->dev), hace_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request interrupt\n");
+ return rc;
+ }
+
+ /* Get clk and enable it */
+ hace_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hace_dev->clk)) {
+ dev_err(&pdev->dev, "Failed to get clk\n");
+ return -ENODEV;
+ }
+
+ rc = clk_prepare_enable(hace_dev->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc);
+ return rc;
+ }
+
+ /* Initialize crypto hardware engine structure for hash */
+ hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_hash) {
+ rc = -ENOMEM;
+ goto clk_exit;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_hash);
+ if (rc)
+ goto err_engine_hash_start;
+
+ tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
+ (unsigned long)hace_dev);
+
+ /* Initialize crypto hardware engine structure for crypto */
+ hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_crypto) {
+ rc = -ENOMEM;
+ goto err_engine_hash_start;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
+ if (rc)
+ goto err_engine_crypto_start;
+
+ tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
+ (unsigned long)hace_dev);
+
+ /* Allocate DMA buffer for hash engine input used */
+ hash_engine->ahash_src_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_HASH_SRC_DMA_BUF_LEN,
+ &hash_engine->ahash_src_dma_addr,
+ GFP_KERNEL);
+ if (!hash_engine->ahash_src_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dma buffer\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine context used */
+ crypto_engine->cipher_ctx =
+ dmam_alloc_coherent(&pdev->dev,
+ PAGE_SIZE,
+ &crypto_engine->cipher_ctx_dma,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_ctx) {
+ dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine input used */
+ crypto_engine->cipher_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
+ &crypto_engine->cipher_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_addr) {
+ dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine output used */
+ if (hace_dev->version == AST2600_VERSION) {
+ crypto_engine->dst_sg_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_DST_DMA_BUF_LEN,
+ &crypto_engine->dst_sg_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->dst_sg_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+ }
+
+ aspeed_hace_register(hace_dev);
+
+ dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n");
+
+ return 0;
+
+err_engine_crypto_start:
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+err_engine_hash_start:
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+clk_exit:
+ clk_disable_unprepare(hace_dev->clk);
+
+ return rc;
+}
+
+static int aspeed_hace_remove(struct platform_device *pdev)
+{
+ struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev);
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ aspeed_hace_unregister(hace_dev);
+
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+
+ tasklet_kill(&hash_engine->done_task);
+ tasklet_kill(&crypto_engine->done_task);
+
+ clk_disable_unprepare(hace_dev->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
+
+static struct platform_driver aspeed_hace_driver = {
+ .probe = aspeed_hace_probe,
+ .remove = aspeed_hace_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_hace_of_matches,
+ },
+};
+
+module_platform_driver(aspeed_hace_driver);
+
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
new file mode 100644
index 000000000000..f2cde23b56ae
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __ASPEED_HACE_H__
+#define __ASPEED_HACE_H__
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fips.h>
+#include <linux/dma-mapping.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+/*****************************
+ * *
+ * HACE register definitions *
+ * *
+ * ***************************/
+#define ASPEED_HACE_SRC 0x00 /* Crypto Data Source Base Address Register */
+#define ASPEED_HACE_DEST 0x04 /* Crypto Data Destination Base Address Register */
+#define ASPEED_HACE_CONTEXT 0x08 /* Crypto Context Buffer Base Address Register */
+#define ASPEED_HACE_DATA_LEN 0x0C /* Crypto Data Length Register */
+#define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */
+
+/* G5 */
+#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */
+/* G6 */
+#define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */
+#define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */
+
+#define ASPEED_HACE_STS 0x1C /* HACE Status Register */
+
+#define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */
+#define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */
+#define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */
+#define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */
+#define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */
+
+/* crypto cmd */
+#define HACE_CMD_SINGLE_DES 0
+#define HACE_CMD_TRIPLE_DES BIT(17)
+#define HACE_CMD_AES_SELECT 0
+#define HACE_CMD_DES_SELECT BIT(16)
+#define HACE_CMD_ISR_EN BIT(12)
+#define HACE_CMD_CONTEXT_SAVE_ENABLE (0)
+#define HACE_CMD_CONTEXT_SAVE_DISABLE BIT(9)
+#define HACE_CMD_AES (0)
+#define HACE_CMD_DES (0)
+#define HACE_CMD_RC4 BIT(8)
+#define HACE_CMD_DECRYPT (0)
+#define HACE_CMD_ENCRYPT BIT(7)
+
+#define HACE_CMD_ECB (0x0 << 4)
+#define HACE_CMD_CBC (0x1 << 4)
+#define HACE_CMD_CFB (0x2 << 4)
+#define HACE_CMD_OFB (0x3 << 4)
+#define HACE_CMD_CTR (0x4 << 4)
+#define HACE_CMD_OP_MODE_MASK (0x7 << 4)
+
+#define HACE_CMD_AES128 (0x0 << 2)
+#define HACE_CMD_AES192 (0x1 << 2)
+#define HACE_CMD_AES256 (0x2 << 2)
+#define HACE_CMD_OP_CASCADE (0x3)
+#define HACE_CMD_OP_INDEPENDENT (0x1)
+
+/* G5 */
+#define HACE_CMD_RI_WO_DATA_ENABLE (0)
+#define HACE_CMD_RI_WO_DATA_DISABLE BIT(11)
+#define HACE_CMD_CONTEXT_LOAD_ENABLE (0)
+#define HACE_CMD_CONTEXT_LOAD_DISABLE BIT(10)
+/* G6 */
+#define HACE_CMD_AES_KEY_FROM_OTP BIT(24)
+#define HACE_CMD_GHASH_TAG_XOR_EN BIT(23)
+#define HACE_CMD_GHASH_PAD_LEN_INV BIT(22)
+#define HACE_CMD_GCM_TAG_ADDR_SEL BIT(21)
+#define HACE_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HACE_CMD_DES_SG_CTRL BIT(19)
+#define HACE_CMD_SRC_SG_CTRL BIT(18)
+#define HACE_CMD_CTR_IV_AES_96 (0x1 << 14)
+#define HACE_CMD_CTR_IV_DES_32 (0x1 << 14)
+#define HACE_CMD_CTR_IV_AES_64 (0x2 << 14)
+#define HACE_CMD_CTR_IV_AES_32 (0x3 << 14)
+#define HACE_CMD_AES_KEY_HW_EXP BIT(13)
+#define HACE_CMD_GCM (0x5 << 4)
+
+/* interrupt status reg */
+#define HACE_CRYPTO_ISR BIT(12)
+#define HACE_HASH_ISR BIT(9)
+#define HACE_HASH_BUSY BIT(0)
+
+/* hash cmd reg */
+#define HASH_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HASH_CMD_HASH_SRC_SG_CTRL BIT(18)
+#define HASH_CMD_SHA512_224 (0x3 << 10)
+#define HASH_CMD_SHA512_256 (0x2 << 10)
+#define HASH_CMD_SHA384 (0x1 << 10)
+#define HASH_CMD_SHA512 (0)
+#define HASH_CMD_INT_ENABLE BIT(9)
+#define HASH_CMD_HMAC (0x1 << 7)
+#define HASH_CMD_ACC_MODE (0x2 << 7)
+#define HASH_CMD_HMAC_KEY (0x3 << 7)
+#define HASH_CMD_SHA1 (0x2 << 4)
+#define HASH_CMD_SHA224 (0x4 << 4)
+#define HASH_CMD_SHA256 (0x5 << 4)
+#define HASH_CMD_SHA512_SER (0x6 << 4)
+#define HASH_CMD_SHA_SWAP (0x2 << 2)
+
+#define HASH_SG_LAST_LIST BIT(31)
+
+#define CRYPTO_FLAGS_BUSY BIT(1)
+
+#define SHA_OP_UPDATE 1
+#define SHA_OP_FINAL 2
+
+#define SHA_FLAGS_SHA1 BIT(0)
+#define SHA_FLAGS_SHA224 BIT(1)
+#define SHA_FLAGS_SHA256 BIT(2)
+#define SHA_FLAGS_SHA384 BIT(3)
+#define SHA_FLAGS_SHA512 BIT(4)
+#define SHA_FLAGS_SHA512_224 BIT(5)
+#define SHA_FLAGS_SHA512_256 BIT(6)
+#define SHA_FLAGS_HMAC BIT(8)
+#define SHA_FLAGS_FINUP BIT(9)
+#define SHA_FLAGS_MASK (0xff)
+
+#define ASPEED_CRYPTO_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_DST_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_GCM_TAG_OFFSET 0x9ff0
+#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_HASH_QUEUE_LENGTH 50
+
+#define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \
+ HACE_CMD_OFB | HACE_CMD_CTR)
+
+struct aspeed_hace_dev;
+
+typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
+
+struct aspeed_sg_list {
+ __le32 len;
+ __le32 phy_addr;
+};
+
+struct aspeed_engine_hash {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct ahash_request *req;
+
+ /* input buffer */
+ void *ahash_src_addr;
+ dma_addr_t ahash_src_dma_addr;
+
+ dma_addr_t src_dma;
+ dma_addr_t digest_dma;
+
+ size_t src_length;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+ aspeed_hace_fn_t dma_prepare;
+};
+
+struct aspeed_sha_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA512_BLOCK_SIZE];
+ u8 opad[SHA512_BLOCK_SIZE];
+};
+
+struct aspeed_sham_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ unsigned long flags; /* hmac flag */
+
+ struct aspeed_sha_hmac_ctx base[0];
+};
+
+struct aspeed_sham_reqctx {
+ unsigned long flags; /* final update flag should no use*/
+ unsigned long op; /* final or update */
+ u32 cmd; /* trigger cmd */
+
+ /* walk state */
+ struct scatterlist *src_sg;
+ int src_nents;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* per update length */
+
+ size_t digsize;
+ size_t block_size;
+ size_t ivsize;
+ const __be32 *sha_iv;
+
+ /* remain data buffer */
+ u8 buffer[SHA512_BLOCK_SIZE * 2];
+ dma_addr_t buffer_dma_addr;
+ size_t bufcnt; /* buffer counter */
+
+ /* output buffer */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+ dma_addr_t digest_dma_addr;
+ u64 digcnt[2];
+};
+
+struct aspeed_engine_crypto {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct skcipher_request *req;
+
+ /* context buffer */
+ void *cipher_ctx;
+ dma_addr_t cipher_ctx_dma;
+
+ /* input buffer, could be single/scatter-gather lists */
+ void *cipher_addr;
+ dma_addr_t cipher_dma_addr;
+
+ /* output buffer, only used in scatter-gather lists */
+ void *dst_sg_addr;
+ dma_addr_t dst_sg_dma_addr;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+};
+
+struct aspeed_cipher_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ int key_len;
+ u8 key[AES_MAX_KEYLENGTH];
+
+ /* callback func */
+ aspeed_hace_fn_t start;
+
+ struct crypto_skcipher *fallback_tfm;
+};
+
+struct aspeed_cipher_reqctx {
+ int enc_cmd;
+ int src_nents;
+ int dst_nents;
+
+ struct skcipher_request fallback_req; /* keep at the end */
+};
+
+struct aspeed_hace_dev {
+ void __iomem *regs;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ unsigned long version;
+
+ struct crypto_engine *crypt_engine_hash;
+ struct crypto_engine *crypt_engine_crypto;
+
+ struct aspeed_engine_hash hash_engine;
+ struct aspeed_engine_crypto crypto_engine;
+};
+
+struct aspeed_hace_alg {
+ struct aspeed_hace_dev *hace_dev;
+
+ const char *alg_base;
+
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ } alg;
+};
+
+enum aspeed_version {
+ AST2500_VERSION = 5,
+ AST2600_VERSION
+};
+
+#define ast_hace_write(hace, val, offset) \
+ writel((val), (hace)->regs + (offset))
+#define ast_hace_read(hace, offset) \
+ readl((hace)->regs + (offset))
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+
+#endif
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 9ad188cffd0d..51c66afbe677 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1712,7 +1712,7 @@ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
cipher_len = regk_crypto_key_256;
break;
default:
- pr_err("%s: Invalid key length %d!\n",
+ pr_err("%s: Invalid key length %zu!\n",
MODULE_NAME, ctx->key_length);
return -EINVAL;
}
@@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data)
return;
}
- spin_lock_bh(&ac->queue_lock);
+ spin_lock(&ac->queue_lock);
list_for_each_entry_safe(req, n, &ac->pending, list) {
struct artpec6_crypto_dma_descriptors *dma = req->dma;
@@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data)
artpec6_crypto_process_queue(ac, &complete_in_progress);
- spin_unlock_bh(&ac->queue_lock);
+ spin_unlock(&ac->queue_lock);
/* Perform the completion callbacks without holding the queue lock
* to allow new request submissions from the callbacks.
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 053315e260c2..c8c799428fe0 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1928,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req)
/* SPU2 hardware does not compute hash of zero length data */
if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
(iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
flow_log("Doing %sfinal %s zero-len hash request in software\n",
rctx->is_final ? "" : "non-", alg_name);
err = do_shash((unsigned char *)alg_name, req->result,
@@ -2029,7 +2029,7 @@ static int ahash_init(struct ahash_request *req)
* supported by the hardware, we need to handle it in software
* by calling synchronous hash functions.
*/
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
hash = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(hash)) {
ret = PTR_ERR(hash);
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 71281a3bdbdc..d6d87332140a 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -231,7 +231,7 @@ struct iproc_ctx_s {
/*
* shash descriptor - needed to perform incremental hashing in
- * in software, when hw doesn't support it.
+ * software, when hw doesn't support it.
*/
struct shash_desc *shash;
diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h
index 8ec6edc69f3f..ae4791a8ec4a 100644
--- a/drivers/crypto/cavium/cpt/cpt_hw_types.h
+++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h
@@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s {
* Word0
* reserved_20_63:44 [63:20] Reserved.
* dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
- * to the CPT instruction doorbell count. Readback value is the the
+ * to the CPT instruction doorbell count. Readback value is the
* current number of pending doorbell requests. If counter overflows
* CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
* zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 8c32d0eb8fcf..6872ac344001 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
+ unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
@@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
- mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size) {
+ code_length = ntohl(ucode->code_length);
+ if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
+ mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 7df71fcebe8f..1046a746d36f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -198,22 +198,16 @@ static int zip_decompress(const u8 *src, unsigned int slen,
/* Legacy Compress framework start */
int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 0);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 0);
}
int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 1);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 1);
}
void zip_free_comp_ctx(struct crypto_tfm *tfm)
@@ -227,24 +221,18 @@ int zip_comp_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_comp_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* Legacy compress framework end */
/* SCOMP framework start */
@@ -298,22 +286,16 @@ int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_scomp_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* SCOMP framework end */
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ec97daf0fcb7..278636ed251a 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -64,7 +64,6 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
- int ret;
if (!ctx->u.des3.key_len)
return -EINVAL;
@@ -100,9 +99,7 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
- ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
- return ret;
+ return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_des3_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 7d4b4ad1db1f..9f753cb4f5f1 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -641,6 +641,10 @@ static void ccp_dma_release(struct ccp_device *ccp)
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
@@ -766,8 +770,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
- dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
+ dma_async_device_unregister(dma_dev);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 9f588c9728f8..06fc7156c04f 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -211,18 +211,24 @@ static int sev_read_init_ex_file(void)
if (IS_ERR(fp)) {
int ret = PTR_ERR(fp);
- dev_err(sev->dev,
- "SEV: could not open %s for read, error %d\n",
- init_ex_path, ret);
+ if (ret == -ENOENT) {
+ dev_info(sev->dev,
+ "SEV: %s does not exist and will be created later.\n",
+ init_ex_path);
+ ret = 0;
+ } else {
+ dev_err(sev->dev,
+ "SEV: could not open %s for read, error %d\n",
+ init_ex_path, ret);
+ }
return ret;
}
nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
if (nread != NV_LENGTH) {
- dev_err(sev->dev,
- "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n",
+ dev_info(sev->dev,
+ "SEV: could not read %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nread);
- return -EIO;
}
dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
@@ -231,7 +237,7 @@ static int sev_read_init_ex_file(void)
return 0;
}
-static void sev_write_init_ex_file(void)
+static int sev_write_init_ex_file(void)
{
struct sev_device *sev = psp_master->sev_data;
struct file *fp;
@@ -241,14 +247,16 @@ static void sev_write_init_ex_file(void)
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600);
if (IS_ERR(fp)) {
+ int ret = PTR_ERR(fp);
+
dev_err(sev->dev,
- "SEV: could not open file for write, error %ld\n",
- PTR_ERR(fp));
- return;
+ "SEV: could not open file for write, error %d\n",
+ ret);
+ return ret;
}
nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
@@ -259,18 +267,20 @@ static void sev_write_init_ex_file(void)
dev_err(sev->dev,
"SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nwrite);
- return;
+ return -EIO;
}
dev_dbg(sev->dev, "SEV: write successful to NV file\n");
+
+ return 0;
}
-static void sev_write_init_ex_file_if_required(int cmd_id)
+static int sev_write_init_ex_file_if_required(int cmd_id)
{
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
/*
* Only a few platform commands modify the SPI/NV area, but none of the
@@ -285,10 +295,10 @@ static void sev_write_init_ex_file_if_required(int cmd_id)
case SEV_CMD_PEK_GEN:
break;
default:
- return;
+ return 0;
}
- sev_write_init_ex_file();
+ return sev_write_init_ex_file();
}
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
@@ -361,7 +371,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
cmd, reg & PSP_CMDRESP_ERR_MASK);
ret = -EIO;
} else {
- sev_write_init_ex_file_if_required(cmd);
+ ret = sev_write_init_ex_file_if_required(cmd);
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
@@ -410,17 +420,12 @@ static int __sev_init_locked(int *error)
static int __sev_init_ex_locked(int *error)
{
struct sev_data_init_ex data;
- int ret;
memset(&data, 0, sizeof(data));
data.length = sizeof(data);
data.nv_address = __psp_pa(sev_init_ex_buffer);
data.nv_len = NV_LENGTH;
- ret = sev_read_init_ex_file();
- if (ret)
- return ret;
-
if (sev_es_tmr) {
/*
* Do not include the encryption mask on the physical
@@ -439,7 +444,7 @@ static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc, psp_ret = -1;
+ int rc = 0, psp_ret = -1;
int (*init_function)(int *error);
if (!psp || !psp->sev_data)
@@ -450,8 +455,15 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
- init_function = sev_init_ex_buffer ? __sev_init_ex_locked :
- __sev_init_locked;
+ if (sev_init_ex_buffer) {
+ init_function = __sev_init_ex_locked;
+ rc = sev_read_init_ex_file();
+ if (rc)
+ return rc;
+ } else {
+ init_function = __sev_init_locked;
+ }
+
rc = init_function(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
/*
@@ -744,6 +756,11 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
+ if (!sev_version_greater_or_equal(0, 15)) {
+ dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
+ return -1;
+ }
+
if (sev_get_firmware(dev, &firmware) == -ENOENT) {
dev_dbg(dev, "No SEV firmware file present\n");
return -1;
@@ -776,6 +793,14 @@ static int sev_update_firmware(struct device *dev)
data->len = firmware->size;
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
+ /*
+ * A quirk for fixing the committed TCB version, when upgrading from
+ * earlier firmware version than 1.50.
+ */
+ if (!ret && !sev_version_greater_or_equal(1, 50))
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
else
@@ -1285,8 +1310,7 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(sev->dev) == 0)
+ if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
/* If an init_ex_path is provided rely on INIT_EX for PSP initialization
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 6140e4927322..9efd88f871d1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -274,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
}
ret = dma_map_sg(dev, sg, *nents, direction);
- if (dma_mapping_error(dev, ret)) {
+ if (!ret) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9a0558ed82f9..9f0b94c8e03d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -22,7 +22,8 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
- HPRE_CLUSTER3
+ HPRE_CLUSTER3,
+ HPRE_CLUSTERS_NUM_MAX
};
enum hpre_ctrl_dbgfs_file {
@@ -42,9 +43,6 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
-#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
-#define HPRE_CLUSTERS_NUM_V3 1
-#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
@@ -105,5 +103,5 @@ struct hpre_sqe {
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
-
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 3ba6f15deafc..ef02dadd6217 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -51,6 +51,12 @@ struct hpre_ctx;
#define HPRE_ECC_HW256_KSZ_B 32
#define HPRE_ECC_HW384_KSZ_B 48
+/* capability register mask of driver */
+#define HPRE_DRV_RSA_MASK_CAP BIT(0)
+#define HPRE_DRV_DH_MASK_CAP BIT(1)
+#define HPRE_DRV_ECDH_MASK_CAP BIT(2)
+#define HPRE_DRV_X25519_MASK_CAP BIT(5)
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -147,7 +153,7 @@ static int hpre_alloc_req_id(struct hpre_ctx *ctx)
int id;
spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+ id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_irqrestore(&ctx->req_lock, flags);
return id;
@@ -488,7 +494,7 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
if (ret)
hpre_stop_qp_and_put(qp);
@@ -2002,55 +2008,53 @@ static struct kpp_alg dh = {
},
};
-static struct kpp_alg ecdh_nist_p192 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p192_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh-nist-p192",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p256 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p256_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh-nist-p256",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p384 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p384_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p384",
- .cra_driver_name = "hpre-ecdh-nist-p384",
- .cra_module = THIS_MODULE,
- },
+static struct kpp_alg ecdh_curves[] = {
+ {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh-nist-p192",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh-nist-p256",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p384_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "hpre-ecdh-nist-p384",
+ .cra_module = THIS_MODULE,
+ },
+ }
};
static struct kpp_alg curve25519_alg = {
@@ -2070,78 +2074,144 @@ static struct kpp_alg curve25519_alg = {
},
};
-
-static int hpre_register_ecdh(void)
+static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_kpp(&ecdh_nist_p192);
- if (ret)
- return ret;
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return 0;
- ret = crypto_register_kpp(&ecdh_nist_p256);
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
if (ret)
- goto unregister_ecdh_p192;
+ dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
- ret = crypto_register_kpp(&ecdh_nist_p384);
+ return ret;
+}
+
+static void hpre_unregister_rsa(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return;
+
+ crypto_unregister_akcipher(&rsa);
+}
+
+static int hpre_register_dh(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&dh);
if (ret)
- goto unregister_ecdh_p256;
+ dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_dh(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&dh);
+}
+
+static int hpre_register_ecdh(struct hisi_qm *qm)
+{
+ int ret, i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
+ ret = crypto_register_kpp(&ecdh_curves[i]);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
+ ecdh_curves[i].base.cra_name, ret);
+ goto unreg_kpp;
+ }
+ }
return 0;
-unregister_ecdh_p256:
- crypto_unregister_kpp(&ecdh_nist_p256);
-unregister_ecdh_p192:
- crypto_unregister_kpp(&ecdh_nist_p192);
+unreg_kpp:
+ for (--i; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+
return ret;
}
-static void hpre_unregister_ecdh(void)
+static void hpre_unregister_ecdh(struct hisi_qm *qm)
{
- crypto_unregister_kpp(&ecdh_nist_p384);
- crypto_unregister_kpp(&ecdh_nist_p256);
- crypto_unregister_kpp(&ecdh_nist_p192);
+ int i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return;
+
+ for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+}
+
+static int hpre_register_x25519(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_x25519(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&curve25519_alg);
}
int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
+ ret = hpre_register_rsa(qm);
if (ret)
return ret;
- ret = crypto_register_kpp(&dh);
+ ret = hpre_register_dh(qm);
if (ret)
goto unreg_rsa;
- if (qm->ver >= QM_HW_V3) {
- ret = hpre_register_ecdh();
- if (ret)
- goto unreg_dh;
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- goto unreg_ecdh;
- }
- return 0;
+ ret = hpre_register_ecdh(qm);
+ if (ret)
+ goto unreg_dh;
+
+ ret = hpre_register_x25519(qm);
+ if (ret)
+ goto unreg_ecdh;
+
+ return ret;
unreg_ecdh:
- hpre_unregister_ecdh();
+ hpre_unregister_ecdh(qm);
unreg_dh:
- crypto_unregister_kpp(&dh);
+ hpre_unregister_dh(qm);
unreg_rsa:
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_rsa(qm);
return ret;
}
void hpre_algs_unregister(struct hisi_qm *qm)
{
- if (qm->ver >= QM_HW_V3) {
- crypto_unregister_kpp(&curve25519_alg);
- hpre_unregister_ecdh();
- }
-
- crypto_unregister_kpp(&dh);
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_x25519(qm);
+ hpre_unregister_ecdh(qm);
+ hpre_unregister_dh(qm);
+ hpre_unregister_rsa(qm);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 9d529df0eab9..471e5ca720f5 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -53,9 +53,7 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23))
#define HPRE_RAS_NFE_ENB 0x301414
-#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
#define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
#define HPRE_HAC_RAS_FE_ENABLE 0
@@ -79,8 +77,6 @@
#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
#define HPRE_BD_USR_MASK GENMASK(1, 0)
-#define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0)
-#define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0)
#define HPRE_PREFETCH_CFG 0x301130
#define HPRE_SVA_PREFTCH_DFX 0x30115C
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
@@ -122,6 +118,8 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43
+#define HPRE_DEV_ALG_MAX_LEN 256
+
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -137,6 +135,38 @@ struct hpre_hw_error {
const char *msg;
};
+struct hpre_dev_alg {
+ u32 alg_msk;
+ const char *alg;
+};
+
+static const struct hpre_dev_alg hpre_dev_algs[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = "rsa\n"
+ }, {
+ .alg_msk = BIT(1),
+ .alg = "dh\n"
+ }, {
+ .alg_msk = BIT(2),
+ .alg = "ecdh\n"
+ }, {
+ .alg_msk = BIT(3),
+ .alg = "ecdsa\n"
+ }, {
+ .alg_msk = BIT(4),
+ .alg = "sm2\n"
+ }, {
+ .alg_msk = BIT(5),
+ .alg = "x25519\n"
+ }, {
+ .alg_msk = BIT(6),
+ .alg = "x448\n"
+ }, {
+ /* sentinel */
+ }
+};
+
static struct hisi_qm_list hpre_devices = {
.register_to_crypto = hpre_algs_register,
.unregister_from_crypto = hpre_algs_unregister,
@@ -147,6 +177,62 @@ static const char * const hpre_debug_file_name[] = {
[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};
+enum hpre_cap_type {
+ HPRE_QM_NFE_MASK_CAP,
+ HPRE_QM_RESET_MASK_CAP,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_QM_CE_MASK_CAP,
+ HPRE_NFE_MASK_CAP,
+ HPRE_RESET_MASK_CAP,
+ HPRE_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_CE_MASK_CAP,
+ HPRE_CLUSTER_NUM_CAP,
+ HPRE_CORE_TYPE_NUM_CAP,
+ HPRE_CORE_NUM_CAP,
+ HPRE_CLUSTER_CORE_NUM_CAP,
+ HPRE_CORE_ENABLE_BITMAP_CAP,
+ HPRE_DRV_ALG_BITMAP_CAP,
+ HPRE_DEV_ALG_BITMAP_CAP,
+ HPRE_CORE1_ALG_BITMAP_CAP,
+ HPRE_CORE2_ALG_BITMAP_CAP,
+ HPRE_CORE3_ALG_BITMAP_CAP,
+ HPRE_CORE4_ALG_BITMAP_CAP,
+ HPRE_CORE5_ALG_BITMAP_CAP,
+ HPRE_CORE6_ALG_BITMAP_CAP,
+ HPRE_CORE7_ALG_BITMAP_CAP,
+ HPRE_CORE8_ALG_BITMAP_CAP,
+ HPRE_CORE9_ALG_BITMAP_CAP,
+ HPRE_CORE10_ALG_BITMAP_CAP
+};
+
+static const struct hisi_qm_cap_info hpre_basic_info[] = {
+ {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
+ {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
+ {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
+ {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
+ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
+ {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
+ {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
+ {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
+ {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
+ {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
+ {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
+};
+
static const struct hpre_hw_error hpre_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -262,6 +348,46 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
+ if (alg & cap_val)
+ return true;
+
+ return false;
+}
+
+static int hpre_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_msk;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
+ if (alg_msk & hpre_dev_algs[i].alg_msk)
+ strcat(algs, hpre_dev_algs[i].alg);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int hpre_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -330,14 +456,12 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static inline int hpre_cluster_num(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
- HPRE_CLUSTERS_NUM_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
}
static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ?
- HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
}
struct hisi_qp *hpre_create_qp(u8 type)
@@ -457,7 +581,7 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -478,7 +602,7 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
@@ -630,7 +754,8 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = HPRE_HAC_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -644,21 +769,30 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- /* disable hpre hw error interrupts */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ /* disable hpre hw error interrupts */
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+
/* clear HPRE hw error source if having */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
- writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
@@ -708,7 +842,7 @@ static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
}
-static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
int cluster_index = file->index - HPRE_CLUSTER_CTRL;
@@ -716,8 +850,6 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
HPRE_CLSTR_ADDR_INTRVL;
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
-
- return 0;
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -792,9 +924,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
goto err_input;
break;
case HPRE_CLUSTER_CTRL:
- ret = hpre_cluster_inqry_write(file, val);
- if (ret)
- goto err_input;
+ hpre_cluster_inqry_write(file, val);
break;
default:
ret = -EINVAL;
@@ -1006,15 +1136,13 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
- else
- qm->algs = "rsa\ndh";
qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
@@ -1030,7 +1158,19 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qm_list = &hpre_devices;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init hpre qm configures!\n");
+ return ret;
+ }
+
+ ret = hpre_set_qm_algs(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to set hpre algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static int hpre_show_last_regs_init(struct hisi_qm *qm)
@@ -1129,7 +1269,11 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,14 +1291,20 @@ static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+ err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
+ err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hpre_err_ini = {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index ad83c194d664..8b387de69d22 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -22,20 +22,17 @@
#define QM_VF_AEQ_INT_MASK 0x4
#define QM_VF_EQ_INT_SOURCE 0x8
#define QM_VF_EQ_INT_MASK 0xc
-#define QM_IRQ_NUM_V1 1
-#define QM_IRQ_NUM_PF_V2 4
-#define QM_IRQ_NUM_VF_V2 2
-#define QM_IRQ_NUM_VF_V3 3
-#define QM_EQ_EVENT_IRQ_VECTOR 0
-#define QM_AEQ_EVENT_IRQ_VECTOR 1
-#define QM_CMD_EVENT_IRQ_VECTOR 2
-#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
+#define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_SHIFT 16
+#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
#define QM_MB_CMD_DATA_SHIFT 32
#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
+#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@@ -77,6 +74,9 @@
#define QM_EQ_OVERFLOW 1
#define QM_CQE_ERROR 2
+#define QM_XQ_DEPTH_SHIFT 16
+#define QM_XQ_DEPTH_MASK GENMASK(15, 0)
+
#define QM_DOORBELL_CMD_SQ 0
#define QM_DOORBELL_CMD_CQ 1
#define QM_DOORBELL_CMD_EQ 2
@@ -86,11 +86,7 @@
#define QM_DB_CMD_SHIFT_V1 16
#define QM_DB_INDEX_SHIFT_V1 32
#define QM_DB_PRIORITY_SHIFT_V1 48
-#define QM_QUE_ISO_CFG_V 0x0030
#define QM_PAGE_SIZE 0x0034
-#define QM_QUE_ISO_EN 0x100154
-#define QM_CAPBILITY 0x100158
-#define QM_QP_NUN_MASK GENMASK(10, 0)
#define QM_QP_DB_INTERVAL 0x10000
#define QM_MEM_START_INIT 0x100040
@@ -126,7 +122,6 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
#define QM_ABNORMAL_INT_STATUS 0x100008
@@ -144,8 +139,10 @@
#define QM_RAS_NFE_ENABLE 0x1000f4
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
-#define QM_RAS_MSI_INT_SEL 0x1040f4
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
+#define QM_ECC_MBIT BIT(2)
+#define QM_DB_TIMEOUT BIT(10)
+#define QM_OF_FIFO_OF BIT(11)
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -205,6 +202,8 @@
#define MAX_WAIT_COUNTS 1000
#define QM_CACHE_WB_START 0x204
#define QM_CACHE_WB_DONE 0x208
+#define QM_FUNC_CAPS_REG 0x3100
+#define QM_CAPBILITY_VERSION GENMASK(7, 0)
#define PCI_BAR_2 2
#define PCI_BAR_4 4
@@ -221,7 +220,6 @@
#define WAIT_PERIOD 20
#define REMOVE_WAIT_DELAY 10
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
-#define QM_EQ_DEPTH (1024 * 2)
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
@@ -270,8 +268,8 @@
((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
-#define QM_MK_CQC_DW3_V2(cqe_sz) \
- ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
+#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
+ ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_SQC_W13(priority, orders, alg_type) \
(((priority) << QM_SQ_PRIORITY_SHIFT) | \
@@ -284,8 +282,8 @@
((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define QM_MK_SQC_DW3_V2(sqe_sz) \
- ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
+#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
+ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
#define INIT_QC_COMMON(qc, base, pasid) do { \
(qc)->head = 0; \
@@ -329,6 +327,48 @@ enum qm_mb_cmd {
QM_VF_GET_QOS,
};
+enum qm_basic_type {
+ QM_TOTAL_QP_NUM_CAP = 0x0,
+ QM_FUNC_MAX_QP_CAP,
+ QM_XEQ_DEPTH_CAP,
+ QM_QP_DEPTH_CAP,
+ QM_EQ_IRQ_TYPE_CAP,
+ QM_AEQ_IRQ_TYPE_CAP,
+ QM_ABN_IRQ_TYPE_CAP,
+ QM_PF2VF_IRQ_TYPE_CAP,
+ QM_PF_IRQ_NUM_CAP,
+ QM_VF_IRQ_NUM_CAP,
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
+ {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
+ {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
+};
+
+static const struct hisi_qm_cap_info qm_basic_info[] = {
+ {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800},
+ {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
+ {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
+ {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
+ {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
+ {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -421,15 +461,11 @@ struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
u8 cmd, u16 index, u8 priority);
- u32 (*get_irq_num)(struct hisi_qm *qm);
int (*debug_init)(struct hisi_qm *qm);
- void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
+ void (*hw_error_init)(struct hisi_qm *qm);
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
- int (*stop_qp)(struct hisi_qp *qp);
int (*set_msi)(struct hisi_qm *qm, bool set);
- int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
- int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
};
struct qm_dfx_item {
@@ -533,6 +569,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
{50100, 100000, 19}
};
+static void qm_irqs_unregister(struct hisi_qm *qm);
+
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
enum qm_state curr = atomic_read(&qm->status.flags);
@@ -623,22 +661,17 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
}
/* Check if the error causes the master ooo block */
-static int qm_check_dev_error(struct hisi_qm *qm)
+static bool qm_check_dev_error(struct hisi_qm *qm)
{
u32 val, dev_val;
if (qm->fun_type == QM_HW_VF)
- return 0;
+ return false;
- val = qm_get_hw_error_status(qm);
- dev_val = qm_get_dev_err_status(qm);
+ val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
+ dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
- if (qm->ver < QM_HW_V3)
- return (val & QM_ECC_MBIT) ||
- (dev_val & qm->err_info.ecc_2bits_mask);
-
- return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
- (dev_val & (~qm->err_info.dev_ce_mask));
+ return val || dev_val;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -728,8 +761,12 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
+ int ret;
+ u32 val;
+
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
+ ret = -EBUSY;
goto mb_busy;
}
@@ -737,6 +774,14 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
+ ret = -ETIMEDOUT;
+ goto mb_busy;
+ }
+
+ val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
+ if (val & QM_MB_STATUS_MASK) {
+ dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
+ ret = -EIO;
goto mb_busy;
}
@@ -744,7 +789,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
mb_busy:
atomic64_inc(&qm->debug.dfx.mb_err_cnt);
- return -EBUSY;
+ return ret;
}
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
@@ -828,25 +873,52 @@ static int qm_dev_mem_reset(struct hisi_qm *qm)
POLL_TIMEOUT);
}
-static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
+/**
+ * hisi_qm_get_hw_info() - Get device information.
+ * @qm: The qm which want to get information.
+ * @info_table: Array for storing device information.
+ * @index: Index in info_table.
+ * @is_read: Whether read from reg, 0: not support read from reg.
+ *
+ * This function returns device information the caller needs.
+ */
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read)
{
- return QM_IRQ_NUM_V1;
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return (val >> info_table[index].shift) & info_table[index].mask;
+ }
}
+EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
-static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
+static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ u16 *high_bits, enum qm_basic_type type)
{
- if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
- else
- return QM_IRQ_NUM_VF_V2;
+ u32 depth;
+
+ depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
+ *high_bits = depth & QM_XQ_DEPTH_MASK;
+ *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
}
-static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
+static u32 qm_get_irq_num(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
- return QM_IRQ_NUM_VF_V3;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
}
static int qm_pm_get_sync(struct hisi_qm *qm)
@@ -854,7 +926,7 @@ static int qm_pm_get_sync(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return 0;
ret = pm_runtime_resume_and_get(dev);
@@ -870,7 +942,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_mark_last_busy(dev);
@@ -879,7 +951,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
static void qm_cq_head_update(struct hisi_qp *qp)
{
- if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
+ if (qp->qp_status.cq_head == qp->cq_depth - 1) {
qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
qp->qp_status.cq_head = 0;
} else {
@@ -911,6 +983,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
struct hisi_qm *qm = poll_data->qm;
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+ u16 eq_depth = qm->eq_depth;
int eqe_num = 0;
u16 cqn;
@@ -919,7 +992,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
poll_data->qp_finish_id[eqe_num] = cqn;
eqe_num++;
- if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
+ if (qm->status.eq_head == eq_depth - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
eqe = qm->eqe;
qm->status.eq_head = 0;
@@ -928,7 +1001,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
qm->status.eq_head++;
}
- if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
+ if (eqe_num == (eq_depth >> 1) - 1)
break;
}
@@ -1068,6 +1141,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
@@ -1092,7 +1166,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
break;
}
- if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
+ if (qm->status.aeq_head == aeq_depth - 1) {
qm->status.aeqc_phase = !qm->status.aeqc_phase;
aeqe = qm->aeqe;
qm->status.aeq_head = 0;
@@ -1118,24 +1192,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-static void qm_irq_unregister(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
-
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->ver > QM_HW_V1) {
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
- }
-
- if (qm->ver > QM_HW_V2)
- free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
-}
-
static void qm_init_qp_status(struct hisi_qp *qp)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1151,7 +1207,7 @@ static void qm_init_prefetch(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
u32 page_type = 0x0;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
switch (PAGE_SIZE) {
@@ -1270,7 +1326,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
}
break;
case SHAPER_VFT:
- if (qm->ver >= QM_HW_V3) {
+ if (factor) {
tmp = factor->cir_b |
(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
@@ -1288,10 +1344,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
u32 fun_num, u32 base, u32 number)
{
- struct qm_shaper_factor *factor = &qm->factor[fun_num];
+ struct qm_shaper_factor *factor = NULL;
unsigned int val;
int ret;
+ if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ factor = &qm->factor[fun_num];
+
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
val & BIT(0), POLL_PERIOD,
POLL_TIMEOUT);
@@ -1349,7 +1408,7 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
}
/* init default shaper qos val */
- if (qm->ver >= QM_HW_V3) {
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
ret = qm_shaper_init_vft(qm, fun_num);
if (ret)
goto back_sqc_cqc;
@@ -1357,11 +1416,9 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return 0;
back_sqc_cqc:
- for (i = SQC_VFT; i <= CQC_VFT; i++) {
- ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
- if (ret)
- return ret;
- }
+ for (i = SQC_VFT; i <= CQC_VFT; i++)
+ qm_set_vft_common(qm, i, fun_num, 0, 0);
+
return ret;
}
@@ -1857,39 +1914,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
kfree(ctx_addr);
}
-static int dump_show(struct hisi_qm *qm, void *info,
+static void dump_show(struct hisi_qm *qm, void *info,
unsigned int info_size, char *info_name)
{
struct device *dev = &qm->pdev->dev;
- u8 *info_buf, *info_curr = info;
+ u8 *info_curr = info;
u32 i;
#define BYTE_PER_DW 4
- info_buf = kzalloc(info_size, GFP_KERNEL);
- if (!info_buf)
- return -ENOMEM;
-
- for (i = 0; i < info_size; i++, info_curr++) {
- if (i % BYTE_PER_DW == 0)
- info_buf[i + 3UL] = *info_curr;
- else if (i % BYTE_PER_DW == 1)
- info_buf[i + 1UL] = *info_curr;
- else if (i % BYTE_PER_DW == 2)
- info_buf[i - 1] = *info_curr;
- else if (i % BYTE_PER_DW == 3)
- info_buf[i - 3] = *info_curr;
- }
-
dev_info(dev, "%s DUMP\n", info_name);
- for (i = 0; i < info_size; i += BYTE_PER_DW) {
+ for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
- info_buf[i], info_buf[i + 1UL],
- info_buf[i + 2UL], info_buf[i + 3UL]);
+ *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
}
-
- kfree(info_buf);
-
- return 0;
}
static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
@@ -1929,23 +1966,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
if (qm->sqc) {
sqc_curr = qm->sqc + qp_id;
- ret = dump_show(qm, sqc_curr, sizeof(*sqc),
- "SOFT SQC");
- if (ret)
- dev_info(dev, "Show soft sqc failed!\n");
+ dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
- if (ret)
- dev_info(dev, "Show hw sqc failed!\n");
+ dump_show(qm, sqc, sizeof(*sqc), "SQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
- return ret;
+ return 0;
}
static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
@@ -1975,23 +2007,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
if (qm->cqc) {
cqc_curr = qm->cqc + qp_id;
- ret = dump_show(qm, cqc_curr, sizeof(*cqc),
- "SOFT CQC");
- if (ret)
- dev_info(dev, "Show soft cqc failed!\n");
+ dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
- if (ret)
- dev_info(dev, "Show hw cqc failed!\n");
+ dump_show(qm, cqc, sizeof(*cqc), "CQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
- return ret;
+ return 0;
}
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
@@ -2015,9 +2042,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
if (ret)
goto err_free_ctx;
- ret = dump_show(qm, xeqc, size, name);
- if (ret)
- dev_info(dev, "Show hw %s failed!\n", name);
+ dump_show(qm, xeqc, size, name);
err_free_ctx:
qm_ctx_free(qm, size, xeqc, &xeqc_dma);
@@ -2025,7 +2050,7 @@ err_free_ctx:
}
static int q_dump_param_parse(struct hisi_qm *qm, char *s,
- u32 *e_id, u32 *q_id)
+ u32 *e_id, u32 *q_id, u16 q_depth)
{
struct device *dev = &qm->pdev->dev;
unsigned int qp_num = qm->qp_num;
@@ -2051,8 +2076,8 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
}
ret = kstrtou32(presult, 0, e_id);
- if (ret || *e_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+ if (ret || *e_id >= q_depth) {
+ dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
return -EINVAL;
}
@@ -2066,54 +2091,49 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
static int qm_sq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
+ u16 sq_depth = qm->qp_array->cq_depth;
void *sqe, *sqe_curr;
struct hisi_qp *qp;
u32 qp_id, sqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
if (ret)
return ret;
- sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+ sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
if (!sqe)
return -ENOMEM;
qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+ memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
qm->debug.sqe_mask_len);
- ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
- if (ret)
- dev_info(dev, "Show sqe failed!\n");
+ dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
kfree(sqe);
- return ret;
+ return 0;
}
static int qm_cq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
struct qm_cqe *cqe_curr;
struct hisi_qp *qp;
u32 qp_id, cqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
if (ret)
return ret;
qp = &qm->qp_array[qp_id];
cqe_curr = qp->cqe + cqe_id;
- ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- if (ret)
- dev_info(dev, "Show cqe failed!\n");
+ dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- return ret;
+ return 0;
}
static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
@@ -2131,11 +2151,11 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
if (ret)
return -EINVAL;
- if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
- dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
+ if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
+ dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
- } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+ } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
+ dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
}
@@ -2150,9 +2170,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
goto err_unlock;
}
- ret = dump_show(qm, xeqe, size, name);
- if (ret)
- dev_info(dev, "Show %s failed!\n", name);
+ dump_show(qm, xeqe, size, name);
err_unlock:
up_read(&qm->qps_lock);
@@ -2245,8 +2263,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
return ret;
/* Judge if the instance is being reset. */
- if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
- return 0;
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+ ret = 0;
+ goto put_dfx_access;
+ }
if (count > QM_DBG_WRITE_LEN) {
ret = -ENOSPC;
@@ -2300,58 +2320,65 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
file->debug = &qm->debug;
}
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v1(struct hisi_qm *qm)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- qm->error_mask = ce | nfe | fe;
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
/* clear QM hw residual error source */
- writel(QM_ABNORMAL_INT_SOURCE_CLR,
- qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v2(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v3(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
/* disable close master ooo when hardware error happened */
writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
@@ -2396,7 +2423,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp, val;
+ u32 error_status, tmp;
/* read err sts */
tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -2407,17 +2434,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
- /* ce error does not need to be reset */
- if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
- writel(error_status, qm->io_base +
- QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe,
- qm->io_base + QM_RAS_NFE_ENABLE);
- return ACC_ERR_RECOVERED;
- }
+ if (error_status & qm->err_info.qm_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_NEED_RESET;
+ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -2493,7 +2514,7 @@ static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
u64 val;
u32 i;
- if (!qm->vfs_num || qm->ver < QM_HW_V3)
+ if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return 0;
while (true) {
@@ -2756,7 +2777,6 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
- .get_irq_num = qm_get_irq_num_v1,
.hw_error_init = qm_hw_error_init_v1,
.set_msi = qm_set_msi,
};
@@ -2764,7 +2784,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v2,
.hw_error_init = qm_hw_error_init_v2,
.hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
@@ -2774,14 +2793,10 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v3,
.hw_error_init = qm_hw_error_init_v3,
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
- .stop_qp = qm_stop_qp,
.set_msi = qm_set_msi_v3,
- .ping_all_vfs = qm_ping_all_vfs,
- .ping_pf = qm_ping_pf,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2789,7 +2804,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
+ if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
return NULL;
return qp->sqe + sq_tail * qp->qm->sqe_size;
@@ -2830,7 +2845,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp = &qm->qp_array[qp_id];
hisi_qm_unset_hw_reset(qp);
- memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
qp->event_cb = NULL;
qp->req_cb = NULL;
@@ -2911,9 +2926,9 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
- sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
} else {
- sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
sqc->w8 = 0; /* rand_qc */
}
sqc->cq_num = cpu_to_le16(qp_id);
@@ -2954,9 +2969,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
if (ver == QM_HW_V1) {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
QM_QC_CQE_SIZE));
- cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
} else {
- cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
cqc->w8 = 0; /* rand_qc */
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
@@ -3043,13 +3058,14 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
{
int qp_used = atomic_read(&qp->qp_status.used);
u16 cur_tail = qp->qp_status.sq_tail;
- u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+ u16 sq_depth = qp->sq_depth;
+ u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
struct hisi_qm *qm = qp->qm;
u16 pos;
int i;
for (i = 0; i < qp_used; i++) {
- pos = (i + cur_head) % QM_Q_DEPTH;
+ pos = (i + cur_head) % sq_depth;
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
atomic_dec(&qp->qp_status.used);
}
@@ -3078,8 +3094,8 @@ static int qm_drain_qp(struct hisi_qp *qp)
return 0;
/* Kunpeng930 supports drain qp by device */
- if (qm->ops->stop_qp) {
- ret = qm->ops->stop_qp(qp);
+ if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
+ ret = qm_stop_qp(qp);
if (ret)
dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
return ret;
@@ -3197,7 +3213,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
+ u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
void *sqe = qm_get_avail_sqe(qp);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
@@ -3286,7 +3302,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
{
struct hisi_qp *qp = q->priv;
- hisi_qm_cache_wb(qp->qm);
hisi_qm_release_qp(qp);
}
@@ -3310,7 +3325,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
if (qm->ver == QM_HW_V1) {
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
- } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
+ } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
@@ -3387,6 +3402,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
unsigned long arg)
{
struct hisi_qp *qp = q->priv;
+ struct hisi_qp_info qp_info;
struct hisi_qp_ctx qp_ctx;
if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
@@ -3403,11 +3419,25 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
if (copy_to_user((void __user *)arg, &qp_ctx,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- } else {
- return -EINVAL;
+
+ return 0;
+ } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
+ if (copy_from_user(&qp_info, (void __user *)arg,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ qp_info.sqe_size = qp->qm->sqe_size;
+ qp_info.sq_depth = qp->sq_depth;
+ qp_info.cq_depth = qp->cq_depth;
+
+ if (copy_to_user((void __user *)arg, &qp_info,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ return 0;
}
- return 0;
+ return -EINVAL;
}
static const struct uacce_ops uacce_qm_ops = {
@@ -3427,6 +3457,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
struct uacce_device *uacce;
unsigned long mmio_page_nr;
unsigned long dus_page_nr;
+ u16 sq_depth, cq_depth;
struct uacce_interface interface = {
.flags = UACCE_DEV_SVA,
.ops = &uacce_qm_ops,
@@ -3453,7 +3484,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->is_vf = pdev->is_virtfn;
uacce->priv = qm;
- uacce->algs = qm->algs;
if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE;
@@ -3464,15 +3494,17 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
- else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
+ else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
else
mmio_page_nr = qm->db_interval / PAGE_SIZE;
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* Add one more page for device or qp status */
- dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
+ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
+ sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
PAGE_SHIFT;
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
@@ -3577,10 +3609,11 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
kfree(qm->qp_array);
}
-static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
+ u16 sq_depth, u16 cq_depth)
{
struct device *dev = &qm->pdev->dev;
- size_t off = qm->sqe_size * QM_Q_DEPTH;
+ size_t off = qm->sqe_size * sq_depth;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -3600,6 +3633,8 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
qp->cqe = qp->qdma.va + off;
qp->cqe_dma = qp->qdma.dma + off;
qp->qdma.size = dma_size;
+ qp->sq_depth = sq_depth;
+ qp->cq_depth = cq_depth;
qp->qm = qm;
qp->qp_id = id;
@@ -3626,7 +3661,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
qm->misc_ctl = false;
- if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
}
@@ -3636,7 +3671,7 @@ static void qm_cmd_uninit(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
val = readl(qm->io_base + QM_IFC_INT_MASK);
@@ -3648,7 +3683,7 @@ static void qm_cmd_init(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
/* Clear communication interrupt source */
@@ -3664,7 +3699,7 @@ static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
iounmap(qm->io_base);
@@ -3714,7 +3749,9 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
}
idr_destroy(&qm->qp_idr);
- kfree(qm->factor);
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
}
/**
@@ -3740,7 +3777,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
if (qm->use_sva) {
uacce_remove(qm->uacce);
@@ -3841,7 +3878,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
- eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
DMA_TO_DEVICE);
@@ -3870,7 +3907,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
DMA_TO_DEVICE);
@@ -4136,14 +4173,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
static void qm_hw_error_init(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
-
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
- qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
+ qm->ops->hw_error_init(qm);
}
static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -4497,12 +4532,10 @@ static int qm_vf_read_qos(struct hisi_qm *qm)
qm->mb_qos = 0;
/* vf ping pf to get function qos */
- if (qm->ops->ping_pf) {
- ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
- if (ret) {
- pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
- return ret;
- }
+ ret = qm_ping_pf(qm, QM_VF_GET_QOS);
+ if (ret) {
+ pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
+ return ret;
}
while (true) {
@@ -4674,14 +4707,14 @@ static const struct file_operations qm_algqos_fops = {
* hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
* @qm: The qm for which we want to add debugfs files.
*
- * Create function qos debugfs files.
+ * Create function qos debugfs files, VF ping PF to get function qos.
*/
static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
qm, &qm_algqos_fops);
- else
+ else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
qm, &qm_algqos_fops);
}
@@ -4729,7 +4762,7 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
&qm_atomic64_ops);
}
- if (qm->ver >= QM_HW_V3)
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
hisi_qm_set_algqos_init(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
@@ -4768,6 +4801,14 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
+{
+ int i;
+
+ for (i = 1; i <= total_func; i++)
+ qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+}
+
/**
* hisi_qm_sriov_enable() - enable virtual functions
* @pdev: the PCIe device
@@ -4794,7 +4835,17 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
- num_vfs = min_t(int, max_vfs, total_vfs);
+ if (max_vfs > total_vfs) {
+ pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
+ ret = -ERANGE;
+ goto err_put_sync;
+ }
+
+ num_vfs = max_vfs;
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ hisi_qm_init_vf_qos(qm, num_vfs);
+
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
@@ -4830,7 +4881,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
int ret;
if (pci_vfs_assigned(pdev)) {
@@ -4845,8 +4895,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
- /* clear vf function shaper configure array */
- memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+
ret = qm_clear_vft_config(qm);
if (ret)
return ret;
@@ -4891,17 +4940,11 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
if (qm->err_ini->log_dev_hw_err)
qm->err_ini->log_dev_hw_err(qm, err_sts);
- /* ce error does not need to be reset */
- if ((err_sts | qm->err_info.dev_ce_mask) ==
- qm->err_info.dev_ce_mask) {
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm,
- err_sts);
+ if (err_sts & qm->err_info.dev_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_RECOVERED;
- }
-
- return ACC_ERR_NEED_RESET;
+ if (qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
}
return ACC_ERR_RECOVERED;
@@ -5070,8 +5113,8 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
return 0;
/* Kunpeng930 supports to notify VFs to stop before PF reset */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
} else {
@@ -5262,8 +5305,8 @@ static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
}
/* Kunpeng930 supports to notify VFs to start after PF reset. */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
} else {
@@ -5466,8 +5509,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
if (pdev->is_virtfn)
return PCI_ERS_RESULT_RECOVERED;
- pci_aer_clear_nonfatal_status(pdev);
-
/* reset pcie device controller */
ret = qm_controller_reset(qm);
if (ret) {
@@ -5599,51 +5640,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int qm_irq_register(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, 0, qm->dev_name, qm);
- if (ret)
- return ret;
-
- if (qm->ver > QM_HW_V1) {
- ret = request_threaded_irq(pci_irq_vector(pdev,
- QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, qm_aeq_thread,
- 0, qm->dev_name, qm);
- if (ret)
- goto err_aeq_irq;
-
- if (qm->fun_type == QM_HW_PF) {
- ret = request_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_abonormal_irq;
- }
- }
-
- if (qm->ver > QM_HW_V2) {
- ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
- qm_mb_cmd_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_mb_cmd_irq;
- }
-
- return 0;
-
-err_mb_cmd_irq:
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
-err_abonormal_irq:
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- return ret;
-}
/**
* hisi_qm_dev_shutdown() - Shutdown device.
@@ -5711,7 +5707,7 @@ err_prepare:
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
out:
pci_save_state(pdev);
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
}
@@ -5729,7 +5725,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
cmd = QM_VF_START_FAIL;
}
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
@@ -5924,21 +5920,193 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
+static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
+ qm_aeq_thread, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_irqs_unregister(struct hisi_qm *qm)
+{
+ qm_unregister_mb_cmd_irq(qm);
+ qm_unregister_abnormal_irq(qm);
+ qm_unregister_aeq_irq(qm);
+ qm_unregister_eq_irq(qm);
+}
+
+static int qm_irqs_register(struct hisi_qm *qm)
+{
+ int ret;
+
+ ret = qm_register_eq_irq(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_register_aeq_irq(qm);
+ if (ret)
+ goto free_eq_irq;
+
+ ret = qm_register_abnormal_irq(qm);
+ if (ret)
+ goto free_aeq_irq;
+
+ ret = qm_register_mb_cmd_irq(qm);
+ if (ret)
+ goto free_abnormal_irq;
+
+ return 0;
+
+free_abnormal_irq:
+ qm_unregister_abnormal_irq(qm);
+free_aeq_irq:
+ qm_unregister_aeq_irq(qm);
+free_eq_irq:
+ qm_unregister_eq_irq(qm);
+ return ret;
+}
+
static int qm_get_qp_num(struct hisi_qm *qm)
{
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = QM_QNUM_V1;
- else if (qm->ver == QM_HW_V2)
- qm->ctrl_qp_num = QM_QNUM_V2;
- else
- qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
- QM_QP_NUN_MASK;
+ bool is_db_isolation;
- if (qm->use_db_isolation)
- qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
- QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
- else
- qm->max_qp_num = qm->ctrl_qp_num;
+ /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver != QM_HW_V1)
+ /* v2 starts to support get vft by mailbox */
+ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+
+ return 0;
+ }
+
+ is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+ qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
+ qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ QM_FUNC_MAX_QP_CAP, is_db_isolation);
/* check if qp number is valid */
if (qm->qp_num > qm->max_qp_num) {
@@ -5950,6 +6118,39 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
+static void qm_get_hw_caps(struct hisi_qm *qm)
+{
+ const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
+ qm_cap_info_pf : qm_cap_info_vf;
+ u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
+ ARRAY_SIZE(qm_cap_info_vf);
+ u32 val, i;
+
+ /* Doorbell isolate register is a independent register. */
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
+ if (val)
+ set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+
+ if (qm->ver >= QM_HW_V3) {
+ val = readl(qm->io_base + QM_FUNC_CAPS_REG);
+ qm->cap_ver = val & QM_CAPBILITY_VERSION;
+ }
+
+ /* Get PF/VF common capbility */
+ for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
+ if (val)
+ set_bit(qm_cap_info_comm[i].type, &qm->caps);
+ }
+
+ /* Get PF/VF different capbility */
+ for (i = 0; i < size; i++) {
+ val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
+ if (val)
+ set_bit(cap_info[i].type, &qm->caps);
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5969,16 +6170,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
- if (qm->ver > QM_HW_V2) {
- if (qm->fun_type == QM_HW_PF)
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_EN) & BIT(0);
- else
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_CFG_V) & BIT(0);
- }
-
- if (qm->use_db_isolation) {
+ qm_get_hw_caps(qm);
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
qm->db_interval = QM_QP_DB_INTERVAL;
qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
qm->db_io_base = ioremap(qm->db_phys_base,
@@ -5993,16 +6186,14 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
- if (qm->fun_type == QM_HW_PF) {
- ret = qm_get_qp_num(qm);
- if (ret)
- goto err_db_ioremap;
- }
+ ret = qm_get_qp_num(qm);
+ if (ret)
+ goto err_db_ioremap;
return 0;
err_db_ioremap:
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
err_ioremap:
iounmap(qm->io_base);
@@ -6033,11 +6224,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
goto err_get_pci_res;
pci_set_master(pdev);
- if (!qm->ops->get_irq_num) {
- ret = -EOPNOTSUPP;
- goto err_get_pci_res;
- }
- num_vec = qm->ops->get_irq_num(qm);
+ num_vec = qm_get_irq_num(qm);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -6080,6 +6267,7 @@ static int hisi_qm_init_work(struct hisi_qm *qm)
static int hisi_qp_alloc_memory(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
+ u16 sq_depth, cq_depth;
size_t qp_dma_size;
int i, ret;
@@ -6093,13 +6281,14 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
return -ENOMEM;
}
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* one more page for device or qp statuses */
- qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
for (i = 0; i < qm->qp_num; i++) {
qm->poll_data[i].qm = qm;
- ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
if (ret)
goto err_init_qp_mem;
@@ -6116,15 +6305,18 @@ err_init_qp_mem:
static int hisi_qm_memory_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret, total_func, i;
+ int ret, total_func;
size_t off = 0;
- total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
- qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
- if (!qm->factor)
- return -ENOMEM;
- for (i = 0; i < total_func; i++)
- qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
+ total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
+ qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ if (!qm->factor)
+ return -ENOMEM;
+
+ /* Only the PF value needs to be initialized */
+ qm->factor[0].func_qos = QM_QOS_MAX_VAL;
+ }
#define QM_INIT_BUF(qm, type, num) do { \
(qm)->type = ((qm)->qdma.va + (off)); \
@@ -6133,20 +6325,21 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
} while (0)
idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
GFP_ATOMIC);
dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
if (!qm->qdma.va) {
- ret = -ENOMEM;
- goto err_alloc_qdma;
+ ret = -ENOMEM;
+ goto err_destroy_idr;
}
- QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, eqe, qm->eq_depth);
+ QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
@@ -6158,8 +6351,10 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
err_alloc_qp_array:
dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
-err_alloc_qdma:
- kfree(qm->factor);
+err_destroy_idr:
+ idr_destroy(&qm->qp_idr);
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
return ret;
}
@@ -6202,17 +6397,10 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
return ret;
- ret = qm_irq_register(qm);
+ ret = qm_irqs_register(qm);
if (ret)
goto err_pci_init;
- if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_irq_register;
- }
-
if (qm->fun_type == QM_HW_PF) {
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
@@ -6251,7 +6439,7 @@ err_alloc_uacce:
qm->uacce = NULL;
}
err_irq_register:
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
err_pci_init:
hisi_qm_pci_uninit(qm);
return ret;
@@ -6302,7 +6490,7 @@ void hisi_qm_pm_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
@@ -6321,7 +6509,7 @@ void hisi_qm_pm_uninit(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_get_noresume(dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index d2a0bc93e752..3e57fc04b377 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -17,6 +17,7 @@ struct sec_alg_res {
dma_addr_t a_ivin_dma;
u8 *out_mac;
dma_addr_t out_mac_dma;
+ u16 depth;
};
/* Cipher request of SEC private */
@@ -115,9 +116,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req *req_list[QM_Q_DEPTH];
+ struct sec_req **req_list;
struct idr req_idr;
- struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
struct list_head backlog;
@@ -191,8 +192,37 @@ struct sec_dev {
bool iommu_used;
};
+enum sec_cap_type {
+ SEC_QM_NFE_MASK_CAP = 0x0,
+ SEC_QM_RESET_MASK_CAP,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP,
+ SEC_QM_CE_MASK_CAP,
+ SEC_NFE_MASK_CAP,
+ SEC_RESET_MASK_CAP,
+ SEC_OOO_SHUTDOWN_MASK_CAP,
+ SEC_CE_MASK_CAP,
+ SEC_CLUSTER_NUM_CAP,
+ SEC_CORE_TYPE_NUM_CAP,
+ SEC_CORE_NUM_CAP,
+ SEC_CORES_PER_CLUSTER_NUM_CAP,
+ SEC_CORE_ENABLE_BITMAP,
+ SEC_DRV_ALG_BITMAP_LOW,
+ SEC_DRV_ALG_BITMAP_HIGH,
+ SEC_DEV_ALG_BITMAP_LOW,
+ SEC_DEV_ALG_BITMAP_HIGH,
+ SEC_CORE1_ALG_BITMAP_LOW,
+ SEC_CORE1_ALG_BITMAP_HIGH,
+ SEC_CORE2_ALG_BITMAP_LOW,
+ SEC_CORE2_ALG_BITMAP_HIGH,
+ SEC_CORE3_ALG_BITMAP_LOW,
+ SEC_CORE3_ALG_BITMAP_HIGH,
+ SEC_CORE4_ALG_BITMAP_LOW,
+ SEC_CORE4_ALG_BITMAP_HIGH,
+};
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm);
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 77c9f13cf69a..84ae8ddd1a13 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -59,14 +59,14 @@
#define SEC_ICV_MASK 0x000E
#define SEC_SQE_LEN_RATE_MASK 0x3
-#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
#define SEC_SGL_SGE_NR 128
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
#define SEC_MAX_AAD_LEN 65535
#define SEC_MAX_CCM_AAD_LEN 65279
-#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
@@ -74,11 +74,11 @@
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
SEC_MAX_MAC_LEN * 2)
#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
-#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
-#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
- SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
-#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
- SEC_PBUF_LEFT_SZ)
+#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
+ SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
+ SEC_PBUF_LEFT_SZ(depth))
#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
@@ -104,6 +104,16 @@
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+struct sec_skcipher {
+ u64 alg_msk;
+ struct skcipher_alg alg;
+};
+
+struct sec_aead {
+ u64 alg_msk;
+ struct aead_alg alg;
+};
+
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -128,9 +138,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
int req_id;
spin_lock_bh(&qp_ctx->req_lock);
-
- req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
- 0, QM_Q_DEPTH, GFP_ATOMIC);
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
dev_err(req->ctx->dev, "alloc req id fail!\n");
@@ -148,7 +156,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
@@ -300,14 +308,15 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
/* Get DMA memory resources */
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->c_ivin_dma, GFP_KERNEL);
if (!res->c_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
}
@@ -318,20 +327,21 @@ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->c_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->c_ivin, res->c_ivin_dma);
}
static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->a_ivin_dma, GFP_KERNEL);
if (!res->a_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
}
@@ -342,20 +352,21 @@ static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->a_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->a_ivin, res->a_ivin_dma);
}
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
&res->out_mac_dma, GFP_KERNEL);
if (!res->out_mac)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].out_mac_dma = res->out_mac_dma +
i * (SEC_MAX_MAC_LEN << 1);
res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
@@ -367,14 +378,14 @@ static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->out_mac)
- dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
res->out_mac, res->out_mac_dma);
}
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->pbuf)
- dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
res->pbuf, res->pbuf_dma);
}
@@ -384,10 +395,12 @@ static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
*/
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
+ int size = SEC_PBUF_PAGE_NUM(q_depth);
int pbuf_page_offset;
int i, j, k;
- res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
&res->pbuf_dma, GFP_KERNEL);
if (!res->pbuf)
return -ENOMEM;
@@ -400,11 +413,11 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
* So we need SEC_PBUF_PAGE_NUM numbers of PAGE
* for the SEC_TOTAL_PBUF_SZ
*/
- for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ for (i = 0; i <= size; i++) {
pbuf_page_offset = PAGE_SIZE * i;
for (j = 0; j < SEC_PBUF_NUM; j++) {
k = i * SEC_PBUF_NUM + j;
- if (k == QM_Q_DEPTH)
+ if (k == q_depth)
break;
res[k].pbuf = res->pbuf +
j * SEC_PBUF_PKG + pbuf_page_offset;
@@ -470,36 +483,29 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
sec_free_mac_resource(dev, qp_ctx->res);
}
-static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
- int qp_ctx_id, int alg_type)
+static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
+ u16 q_depth = qp_ctx->qp->sq_depth;
struct device *dev = ctx->dev;
- struct sec_qp_ctx *qp_ctx;
- struct hisi_qp *qp;
int ret = -ENOMEM;
- qp_ctx = &ctx->qp_ctx[qp_ctx_id];
- qp = ctx->qps[qp_ctx_id];
- qp->req_type = 0;
- qp->qp_ctx = qp_ctx;
- qp_ctx->qp = qp;
- qp_ctx->ctx = ctx;
-
- qp->req_cb = sec_req_cb;
+ qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
+ if (!qp_ctx->req_list)
+ return ret;
- spin_lock_init(&qp_ctx->req_lock);
- idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
+ if (!qp_ctx->res)
+ goto err_free_req_list;
+ qp_ctx->res->depth = q_depth;
- qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_destroy_idr;
+ goto err_free_res;
}
- qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
@@ -509,34 +515,72 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
if (ret)
goto err_free_c_out_pool;
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0)
- goto err_queue_free;
-
return 0;
-err_queue_free:
- sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_destroy_idr:
- idr_destroy(&qp_ctx->req_idr);
+err_free_res:
+ kfree(qp_ctx->res);
+err_free_req_list:
+ kfree(qp_ctx->req_list);
return ret;
}
-static void sec_release_qp_ctx(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
+static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
struct device *dev = ctx->dev;
- hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
-
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+ kfree(qp_ctx->res);
+ kfree(qp_ctx->req_list);
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+ int qp_ctx_id, int alg_type)
+{
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret;
+ qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
+ qp->req_type = 0;
+ qp->qp_ctx = qp_ctx;
+ qp_ctx->qp = qp;
+ qp_ctx->ctx = ctx;
+
+ qp->req_cb = sec_req_cb;
+
+ spin_lock_init(&qp_ctx->req_lock);
+ idr_init(&qp_ctx->req_idr);
+ INIT_LIST_HEAD(&qp_ctx->backlog);
+
+ ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
+ if (ret)
+ goto err_destroy_idr;
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0)
+ goto err_resource_free;
+
+ return 0;
+
+err_resource_free:
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
+ return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ hisi_qm_stop_qp(qp_ctx->qp);
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
idr_destroy(&qp_ctx->req_idr);
}
@@ -559,7 +603,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->pbuf_supported = ctx->sec->iommu_used;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -1679,7 +1723,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
aead_req->out_mac,
authsize, a_req->cryptlen +
a_req->assoclen);
-
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1966,7 +2009,6 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-
static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
struct sec_req *sreq)
{
@@ -2126,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.min_keysize = sec_min_key_size,\
.max_keysize = sec_max_key_size,\
.ivsize = iv_size,\
-},
+}
#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_skciphers[] = {
- SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-};
-
-static struct skcipher_alg sec_skciphers_v3[] = {
- SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+static struct sec_skcipher sec_skciphers[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(1),
+ .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(2),
+ .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(3),
+ .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(4),
+ .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(5),
+ .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(12),
+ .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(13),
+ .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(14),
+ .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(15),
+ .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(16),
+ .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(23),
+ .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(24),
+ .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
+ DES3_EDE_BLOCK_SIZE),
+ },
};
static int aead_iv_demension_check(struct aead_request *aead_req)
@@ -2380,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.maxauthsize = max_authsize,\
}
-static struct aead_alg sec_aeads[] = {
- SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
- sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+static struct sec_aead sec_aeads[] = {
+ {
+ .alg_msk = BIT(6),
+ .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(7),
+ .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(17),
+ .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(18),
+ .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(43),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
+ sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(44),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
+ sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(45),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
+ sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ },
+};
- SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
- sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+static void sec_unregister_skcipher(u64 alg_mask, int end)
+{
+ int i;
- SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
- sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ for (i = 0; i < end; i++)
+ if (sec_skciphers[i].alg_msk & alg_mask)
+ crypto_unregister_skcipher(&sec_skciphers[i].alg);
+}
- SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+static int sec_register_skcipher(u64 alg_mask)
+{
+ int i, ret, count;
- SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ count = ARRAY_SIZE(sec_skciphers);
-static struct aead_alg sec_aeads_v3[] = {
- SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ for (i = 0; i < count; i++) {
+ if (!(sec_skciphers[i].alg_msk & alg_mask))
+ continue;
- SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ ret = crypto_register_skcipher(&sec_skciphers[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_skcipher(alg_mask, i);
+
+ return ret;
+}
+
+static void sec_unregister_aead(u64 alg_mask, int end)
+{
+ int i;
+
+ for (i = 0; i < end; i++)
+ if (sec_aeads[i].alg_msk & alg_mask)
+ crypto_unregister_aead(&sec_aeads[i].alg);
+}
+
+static int sec_register_aead(u64 alg_mask)
+{
+ int i, ret, count;
+
+ count = ARRAY_SIZE(sec_aeads);
+
+ for (i = 0; i < count; i++) {
+ if (!(sec_aeads[i].alg_msk & alg_mask))
+ continue;
+
+ ret = crypto_register_aead(&sec_aeads[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_aead(alg_mask, i);
+
+ return ret;
+}
int sec_register_to_crypto(struct hisi_qm *qm)
{
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
int ret;
- /* To avoid repeat register */
- ret = crypto_register_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ ret = sec_register_skcipher(alg_mask);
if (ret)
return ret;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- if (ret)
- goto reg_skcipher_fail;
- }
-
- ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ ret = sec_register_aead(alg_mask);
if (ret)
- goto reg_aead_fail;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
- if (ret)
- goto reg_aead_v3_fail;
- }
- return ret;
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
-reg_aead_v3_fail:
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
-reg_aead_fail:
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
-reg_skcipher_fail:
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(struct hisi_qm *qm)
{
- if (qm->ver > QM_HW_V2)
- crypto_unregister_aeads(sec_aeads_v3,
- ARRAY_SIZE(sec_aeads_v3));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2c0be91c0b09..3705412bac5f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -27,7 +27,6 @@
#define SEC_BD_ERR_CHK_EN3 0xffffbfff
#define SEC_SQE_SIZE 128
-#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 256
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 2
@@ -42,16 +41,11 @@
#define SEC_ECC_NUM 16
#define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0
-#define SEC_CORE_INT_ENABLE 0x7c1ff
-#define SEC_CORE_INT_CLEAR 0x7c1ff
-#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
#define SEC_RAS_NFE_REG 0x301058
-#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
-#define SEC_RAS_NFE_ENB_MSK 0x7c177
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x301100
@@ -119,6 +113,16 @@
#define SEC_DFX_COMMON1_LEN 0x45
#define SEC_DFX_COMMON2_LEN 0xBA
+#define SEC_ALG_BITMAP_SHIFT 32
+
+#define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \
+ GENMASK(24, 21))
+#define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \
+ GENMASK_ULL(42, 25))
+#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
+ GENMASK_ULL(45, 43))
+#define SEC_DEV_ALG_MAX_LEN 256
+
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -129,6 +133,11 @@ struct sec_dfx_item {
u32 offset;
};
+struct sec_dev_alg {
+ u64 alg_msk;
+ const char *algs;
+};
+
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
@@ -137,6 +146,46 @@ static struct hisi_qm_list sec_devices = {
.unregister_from_crypto = sec_unregister_from_crypto,
};
+static const struct hisi_qm_cap_info sec_basic_info[] = {
+ {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77},
+ {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77},
+ {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177},
+ {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},
+ {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},
+ {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},
+ {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
+ {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
+ {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+};
+
+static const struct sec_dev_alg sec_dev_algs[] = { {
+ .alg_msk = SEC_CIPHER_BITMAP,
+ .algs = "cipher\n",
+ }, {
+ .alg_msk = SEC_DIGEST_BITMAP,
+ .algs = "digest\n",
+ }, {
+ .alg_msk = SEC_AEAD_BITMAP,
+ .algs = "aead\n",
+ },
+};
+
static const struct sec_hw_error sec_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -339,6 +388,16 @@ struct hisi_qp **sec_create_qps(void)
return NULL;
}
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
+{
+ u32 cap_val_h, cap_val_l;
+
+ cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
+ cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
+
+ return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
+}
+
static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
@@ -415,7 +474,7 @@ static void sec_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -435,7 +494,7 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
@@ -506,7 +565,8 @@ static int sec_engine_init(struct hisi_qm *qm)
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
+ reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
+ writel(reg, qm->io_base + SEC_SAA_EN_REG);
if (qm->ver < QM_HW_V3) {
/* HW V2 enable sm4 extra mode, as ctr/ecb */
@@ -576,7 +636,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = SEC_RAS_NFE_ENB_MSK;
+ val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -590,25 +651,30 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
+ ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+
/* clear SEC hw error source if having */
- writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(ce, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -939,7 +1005,11 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -955,14 +1025,20 @@ static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = SEC_RAS_FE_ENB_MSK;
+ err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
}
static const struct hisi_qm_err_ini sec_err_ini = {
@@ -1001,11 +1077,41 @@ static int sec_pf_probe_init(struct sec_dev *sec)
return ret;
}
+static int sec_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u64 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
+
+ for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
+ if (alg_mask & sec_dev_algs[i].alg_msk)
+ strcat(algs, sec_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "cipher\ndigest\naead";
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -1028,7 +1134,19 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init sec qm configures!\n");
+ return ret;
+ }
+
+ ret = sec_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set sec algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void sec_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 3dfd3bac5a33..f2e6da3240ae 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,7 +81,8 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index ad35434a3fdb..6608971d10cd 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -39,6 +39,9 @@
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
+#define HZIP_ALG_ZLIB GENMASK(1, 0)
+#define HZIP_ALG_GZIP GENMASK(3, 2)
+
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
@@ -123,19 +126,19 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
return -EINVAL;
- return param_set_int(val, kp);
+ return param_set_ushort(val, kp);
}
static const struct kernel_param_ops sgl_sge_nr_ops = {
.set = sgl_sge_nr_set,
- .get = param_get_int,
+ .get = param_get_ushort,
};
static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static u16 get_extra_field_size(const u8 *start)
+static u32 get_extra_field_size(const u8 *start)
{
return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
}
@@ -167,7 +170,7 @@ static u32 __get_gzip_head_size(const u8 *src)
return size;
}
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
{
char buf[HZIP_GZIP_HEAD_BUF];
@@ -183,7 +186,7 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
int ret;
ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size) {
+ if (unlikely(ret != head_size)) {
pr_err("the head size of buffer is wrong (%d)!\n", ret);
return -ENOMEM;
}
@@ -193,11 +196,11 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
{
- if (!acomp_req->src || !acomp_req->slen)
+ if (unlikely(!acomp_req->src || !acomp_req->slen))
return -EINVAL;
- if (req_type == HZIP_ALG_TYPE_GZIP &&
- acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+ if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
+ acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
return -EINVAL;
switch (req_type) {
@@ -230,6 +233,8 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
}
set_bit(req_id, req_q->req_bitmap);
+ write_unlock(&req_q->req_lock);
+
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
@@ -242,8 +247,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache->dskip = 0;
}
- write_unlock(&req_q->req_lock);
-
return req_cache;
}
@@ -254,7 +257,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
- memset(req, 0, sizeof(struct hisi_zip_req));
write_unlock(&req_q->req_lock);
}
@@ -339,7 +341,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_sqe zip_sqe;
int ret;
- if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+ if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
@@ -365,7 +367,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
ret = hisi_qp_send(qp, &zip_sqe);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
dev_dbg_ratelimited(dev, "failed to send request!\n");
@@ -417,7 +419,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
atomic64_inc(&dfx->recv_cnt);
status = ops->get_status(sqe);
- if (status != 0 && status != HZIP_NC_ERR) {
+ if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
@@ -450,7 +452,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
/* let's output compression head now */
head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
head_size);
return head_size;
@@ -461,7 +463,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
hisi_zip_remove_req(qp_ctx, req);
}
@@ -478,7 +480,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
int head_size, ret;
head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
head_size);
return head_size;
@@ -489,7 +491,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
ret);
hisi_zip_remove_req(qp_ctx, req);
@@ -498,7 +500,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return ret;
}
-static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
int alg_type, int req_type)
{
struct device *dev = &qp->qm->pdev->dev;
@@ -506,7 +508,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
qp->req_type = req_type;
qp->alg_type = alg_type;
- qp->qp_ctx = ctx;
+ qp->qp_ctx = qp_ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
@@ -514,15 +516,15 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
return ret;
}
- ctx->qp = qp;
+ qp_ctx->qp = qp;
return 0;
}
-static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
+static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
{
- hisi_qm_stop_qp(ctx->qp);
- hisi_qm_free_qps(&ctx->qp, 1);
+ hisi_qm_stop_qp(qp_ctx->qp);
+ hisi_qm_free_qps(&qp_ctx->qp, 1);
}
static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
@@ -594,18 +596,19 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
{
int i;
- for (i = 1; i >= 0; i--)
+ for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_req_q *req_q;
int i, ret;
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
req_q = &ctx->qp_ctx[i].req_q;
- req_q->size = QM_Q_DEPTH;
+ req_q->size = q_depth;
req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
if (!req_q->req_bitmap) {
@@ -613,7 +616,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (i == 0)
return ret;
- goto err_free_loop0;
+ goto err_free_comp_q;
}
rwlock_init(&req_q->req_lock);
@@ -622,19 +625,19 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (!req_q->q) {
ret = -ENOMEM;
if (i == 0)
- goto err_free_bitmap;
+ goto err_free_comp_bitmap;
else
- goto err_free_loop1;
+ goto err_free_decomp_bitmap;
}
}
return 0;
-err_free_loop1:
+err_free_decomp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
-err_free_loop0:
+err_free_comp_q:
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
-err_free_bitmap:
+err_free_comp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
return ret;
}
@@ -651,6 +654,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_qp_ctx *tmp;
struct device *dev;
int i;
@@ -658,7 +662,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
tmp = &ctx->qp_ctx[i];
dev = &tmp->qp->qm->pdev->dev;
- tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+ tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
sgl_sge_nr);
if (IS_ERR(tmp->sgl_pool)) {
if (i == 1)
@@ -755,6 +759,28 @@ static struct acomp_alg hisi_zip_acomp_zlib = {
}
};
+static int hisi_zip_register_zlib(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return 0;
+
+ ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
+{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return;
+
+ crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
static struct acomp_alg hisi_zip_acomp_gzip = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
@@ -769,27 +795,45 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+static int hisi_zip_register_gzip(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
- if (ret) {
- pr_err("failed to register to zlib (%d)!\n", ret);
- return ret;
- }
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return 0;
ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
- if (ret) {
- pr_err("failed to register to gzip (%d)!\n", ret);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
- }
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
return ret;
}
-void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return;
+
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+{
+ int ret = 0;
+
+ ret = hisi_zip_register_zlib(qm);
+ if (ret)
+ return ret;
+
+ ret = hisi_zip_register_gzip(qm);
+ if (ret)
+ hisi_zip_unregister_zlib(qm);
+
+ return ret;
+}
+
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+{
+ hisi_zip_unregister_zlib(qm);
+ hisi_zip_unregister_gzip(qm);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c3303d99acac..c863435e8c75 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -20,18 +20,6 @@
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_CLOCK_GATE_CTRL 0x301004
-#define COMP0_ENABLE BIT(0)
-#define COMP1_ENABLE BIT(1)
-#define DECOMP0_ENABLE BIT(2)
-#define DECOMP1_ENABLE BIT(3)
-#define DECOMP2_ENABLE BIT(4)
-#define DECOMP3_ENABLE BIT(5)
-#define DECOMP4_ENABLE BIT(6)
-#define DECOMP5_ENABLE BIT(7)
-#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
- DECOMP0_ENABLE | DECOMP1_ENABLE | \
- DECOMP2_ENABLE | DECOMP3_ENABLE | \
- DECOMP4_ENABLE | DECOMP5_ENABLE)
#define HZIP_DECOMP_CHECK_ENABLE BIT(16)
#define HZIP_FSM_MAX_CNT 0x301008
@@ -69,20 +57,14 @@
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
-#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0
#define HZIP_OOO_SHUTDOWN_SEL 0x30120C
-#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
-#define HZIP_COMP_CORE_NUM 2
-#define HZIP_DECOMP_CORE_NUM 6
-#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
- HZIP_DECOMP_CORE_NUM)
#define HZIP_SQE_SIZE 128
-#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -92,6 +74,12 @@
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)
+#define HZIP_DEV_ALG_MAX_LEN 256
+#define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
+#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
+#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
+#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
#define HZIP_SQE_MASK_LEN 48
@@ -132,6 +120,26 @@ struct zip_dfx_item {
u32 offset;
};
+struct zip_dev_alg {
+ u32 alg_msk;
+ const char *algs;
+};
+
+static const struct zip_dev_alg zip_dev_algs[] = { {
+ .alg_msk = HZIP_ALG_ZLIB_BIT,
+ .algs = "zlib\n",
+ }, {
+ .alg_msk = HZIP_ALG_GZIP_BIT,
+ .algs = "gzip\n",
+ }, {
+ .alg_msk = HZIP_ALG_DEFLATE_BIT,
+ .algs = "deflate\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .algs = "lz77_zstd\n",
+ },
+};
+
static struct hisi_qm_list zip_devices = {
.register_to_crypto = hisi_zip_register_to_crypto,
.unregister_from_crypto = hisi_zip_unregister_from_crypto,
@@ -187,6 +195,58 @@ struct hisi_zip_ctrl {
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
+enum zip_cap_type {
+ ZIP_QM_NFE_MASK_CAP = 0x0,
+ ZIP_QM_RESET_MASK_CAP,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_QM_CE_MASK_CAP,
+ ZIP_NFE_MASK_CAP,
+ ZIP_RESET_MASK_CAP,
+ ZIP_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_CE_MASK_CAP,
+ ZIP_CLUSTER_NUM_CAP,
+ ZIP_CORE_TYPE_NUM_CAP,
+ ZIP_CORE_NUM_CAP,
+ ZIP_CLUSTER_COMP_NUM_CAP,
+ ZIP_CLUSTER_DECOMP_NUM_CAP,
+ ZIP_DECOMP_ENABLE_BITMAP,
+ ZIP_COMP_ENABLE_BITMAP,
+ ZIP_DRV_ALG_BITMAP,
+ ZIP_DEV_ALG_BITMAP,
+ ZIP_CORE1_ALG_BITMAP,
+ ZIP_CORE2_ALG_BITMAP,
+ ZIP_CORE3_ALG_BITMAP,
+ ZIP_CORE4_ALG_BITMAP,
+ ZIP_CORE5_ALG_BITMAP,
+ ZIP_CAP_MAX
+};
+
+static struct hisi_qm_cap_info zip_basic_cap_info[] = {
+ {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
+ {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
+ {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
+ {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
+ {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
+ {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
+ {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
+ {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
+ {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
+ {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
+ {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF},
+ {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF},
+ {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
+};
+
enum {
HZIP_COMP_CORE0,
HZIP_COMP_CORE1,
@@ -343,12 +403,52 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
+ if ((alg & cap_val) == alg)
+ return true;
+
+ return false;
+}
+
+static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
+ if (alg_mask & zip_dev_algs[i].alg_msk)
+ strcat(algs, zip_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -368,7 +468,7 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
@@ -401,6 +501,7 @@ static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
+ u32 dcomp_bm, comp_bm;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -438,8 +539,11 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
- base + HZIP_CLOCK_GATE_CTRL);
+ dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
+ comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
+ writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
@@ -458,7 +562,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = HZIP_CORE_INT_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -472,6 +577,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -479,17 +586,17 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+
/* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(HZIP_CORE_INT_RAS_CE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
@@ -498,10 +605,13 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
/* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
- /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, false);
}
@@ -586,8 +696,9 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
return len;
tbuf[len] = '\0';
- if (kstrtoul(tbuf, 0, &val))
- return -EFAULT;
+ ret = kstrtoul(tbuf, 0, &val);
+ if (ret)
+ return ret;
ret = hisi_qm_get_dfx_access(qm);
if (ret)
@@ -651,18 +762,23 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
+ u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "comp_core%d", i);
else
scnprintf(buf, sizeof(buf), "decomp_core%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -675,7 +791,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
debugfs_create_file("regs", 0444, tmp_d, regset,
- &hisi_zip_regs_fops);
+ &hisi_zip_regs_fops);
}
return 0;
@@ -795,10 +911,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_num;
int i, j, idx;
- debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM +
- com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+
+ debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
+ sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
return -ENOMEM;
@@ -807,7 +926,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
debug->last_words[i] = readl_relaxed(io_base);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
+ for (i = 0; i < zip_core_num; i++) {
io_base = qm->io_base + core_offsets[i];
for (j = 0; j < core_dfx_regs_num; j++) {
idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
@@ -834,6 +953,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
{
int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
+ u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
void __iomem *base;
@@ -847,15 +967,18 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
if (debug->last_words[i] != val)
pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
- hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+ hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
else
scnprintf(buf, sizeof(buf), "Decomp_core-%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
base = qm->io_base + core_offsets[i];
pci_info(qm->pdev, "==>%s:\n", buf);
@@ -865,7 +988,8 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
if (debug->last_words[idx] != val)
pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
- hzip_dump_dfx_regs[j].name, debug->last_words[idx], val);
+ hzip_dump_dfx_regs[j].name,
+ debug->last_words[idx], val);
}
}
}
@@ -900,7 +1024,11 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -934,16 +1062,21 @@ static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
-
- if (qm->ver >= QM_HW_V3)
- err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
@@ -976,7 +1109,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
qm->err_ini = &hisi_zip_err_ini;
qm->err_ini->err_info_init(qm);
- hisi_zip_set_user_domain_and_cache(qm);
+ ret = hisi_zip_set_user_domain_and_cache(qm);
+ if (ret)
+ return ret;
+
hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -990,12 +1126,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd";
- else
- qm->algs = "zlib\ngzip";
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1019,7 +1153,19 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init zip qm configures!\n");
+ return ret;
+ }
+
+ ret = hisi_zip_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set zip algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void hisi_zip_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d68ef16650d4..32a37e3850c5 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -63,7 +63,6 @@ struct safexcel_cipher_ctx {
u32 hash_alg;
u32 state_sz;
- struct crypto_cipher *hkaes;
struct crypto_aead *fback;
};
@@ -642,10 +641,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
/*
@@ -737,23 +742,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
max(totlen_src, totlen_dst));
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
totlen_src);
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
totlen_dst);
- dma_unmap_sg(priv->dev, src, sreq->nr_src,
- DMA_TO_DEVICE);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unmap;
}
- dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+
+ if (sreq->nr_dst > 0)
+ dma_map_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
@@ -883,12 +894,18 @@ rdesc_rollback:
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-
+unmap:
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
return ret;
@@ -2589,15 +2606,8 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ctx->key_len = len;
/* Compute hash key by encrypting zeroes with cipher key */
- crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- if (ret)
- return ret;
-
memset(hashkey, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey);
+ aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey);
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
@@ -2626,15 +2636,11 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->xcm = EIP197_XCM_MODE_GCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
- ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->hkaes);
+ return 0;
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(ctx->hkaes);
safexcel_aead_cra_exit(tfm);
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index bc60b5802256..103fc551d2af 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -30,7 +30,7 @@ struct safexcel_ahash_ctx {
bool fb_init_done;
bool fb_do_setkey;
- struct crypto_cipher *kaes;
+ struct crypto_aes_ctx *aes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
struct shash_desc *shdesc;
@@ -383,7 +383,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
u32 x;
x = ipad[i] ^ ipad[i + 4];
- cache[i] ^= swab(x);
+ cache[i] ^= swab32(x);
}
}
cache_len = AES_BLOCK_SIZE;
@@ -821,10 +821,10 @@ static int safexcel_ahash_final(struct ahash_request *areq)
u32 *result = (void *)areq->result;
/* K3 */
- result[i] = swab(ctx->base.ipad.word[i + 4]);
+ result[i] = swab32(ctx->base.ipad.word[i + 4]);
}
areq->result[0] ^= 0x80; // 10- padding
- crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+ aes_encrypt(ctx->aes, areq->result, areq->result);
return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
@@ -2083,37 +2083,26 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
/* precompute the XCBC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
-
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
- "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
- "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp,
+ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->base.ipad.word[i] = swab(key_tmp[i]);
-
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes,
- (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- AES_MIN_KEY_SIZE);
+ ctx->base.ipad.word[i] = swab32(key_tmp[i]);
+
+ ret = aes_expandkey(ctx->aes,
+ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ AES_MIN_KEY_SIZE);
if (ret)
return ret;
@@ -2121,7 +2110,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
@@ -2130,15 +2118,15 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_ahash_cra_init(tfm);
- ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->kaes);
+ ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
+ return PTR_ERR_OR_ZERO(ctx->aes);
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->kaes);
+ kfree(ctx->aes);
safexcel_ahash_cra_exit(tfm);
}
@@ -2178,31 +2166,23 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
__be64 consts[4];
u64 _const[2];
u8 msb_mask, gfmask;
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ /* precompute the CMAC key material */
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
for (i = 0; i < len / sizeof(u32); i++)
- ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
-
- /* precompute the CMAC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
+ ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]);
/* code below borrowed from crypto/cmac.c */
/* encrypt the zero block */
memset(consts, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+ aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts);
gfmask = 0x87;
_const[0] = be64_to_cpu(consts[1]);
@@ -2234,7 +2214,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
}
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 7942b48dd55a..1cd62f9c3e3a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
config CRYPTO_DEV_KEEMBAY_OCS_ECC
tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select CRYPTO_ECDH
select CRYPTO_ENGINE
@@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU
select CRYPTO_ENGINE
depends on HAS_IOMEM
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
help
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
Control Unit (HCU) hardware acceleration for use with Crypto API.
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
index b8bdb9f134f3..205eacac4a34 100644
--- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -403,7 +403,7 @@ union otx_cptx_pf_exe_bist_status {
* big-endian format in memory.
* iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
* 0 = The hardware issues NCB transient load (LDT) towards the cache,
- * which if the line hits and is is dirty will cause the line to be
+ * which if the line hits and is dirty will cause the line to be
* written back before being replaced.
* 1 = The hardware issues NCB LDWB read-and-invalidate command towards
* the cache when fetching the last word of instructions; as a result the
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 40b482198ebc..df9c2b8747e6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -97,7 +97,7 @@ static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
static void set_ucode_filename(struct otx_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -138,7 +138,7 @@ static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
u32 i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
@@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev,
struct tar_ucode_info_t *tar_info;
struct otx_cpt_ucode_hdr *ucode_hdr;
int ucode_type, ucode_size;
+ unsigned int code_length;
/*
* If size is less than microcode header size then don't report
@@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev,
if (get_ucode_type(ucode_hdr, &ucode_type))
return 0;
- ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Invalid code_length %u\n", code_length);
+ return -EINVAL;
+ }
+
+ ucode_size = code_length * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", filename);
@@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
{
struct otx_cpt_ucode_hdr *ucode_hdr;
const struct firmware *fw;
+ unsigned int code_length;
int ret;
set_ucode_filename(ucode, ucode_filename);
@@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
ucode->ver_num = ucode_hdr->ver_num;
- ucode->size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Ucode invalid code_length %u\n", code_length);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ ucode->size = code_length * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
@@ -1328,7 +1342,7 @@ static ssize_t ucode_load_store(struct device *dev,
eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
err_msg = "Invalid engine group format";
- strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index 36d72e35ebeb..88a41d1ca5f6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -661,7 +661,7 @@ static ssize_t vf_type_show(struct device *dev,
msg = "Invalid";
}
- return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
+ return sysfs_emit(buf, "%s\n", msg);
}
static ssize_t vf_engine_group_show(struct device *dev,
@@ -670,7 +670,7 @@ static ssize_t vf_engine_group_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
+ return sysfs_emit(buf, "%d\n", cptvf->vfgrp);
}
static ssize_t vf_engine_group_store(struct device *dev,
@@ -706,7 +706,7 @@ static ssize_t vf_coalesc_time_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_timewait(cptvf));
}
@@ -716,7 +716,7 @@ static ssize_t vf_coalesc_num_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_numwait(cptvf));
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
index 5663787c7a62..90fdafb7c468 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
@@ -159,12 +159,10 @@ static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_READY;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -174,13 +172,11 @@ int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_QLEN;
mbx.data = cptvf->qsize;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -208,14 +204,12 @@ int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
/* Convey group of the VF */
mbx.data = cptvf->priority;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -224,12 +218,10 @@ int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_UP;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -238,10 +230,8 @@ int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_DOWN;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index f10050fead16..1577986677f6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -68,7 +68,7 @@ static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -126,7 +126,7 @@ static int get_ucode_type(struct device *dev,
int i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 02cb9e44afd8..75c403f2b1d9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -191,7 +191,6 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct mbox_msghdr *req;
- int ret;
req = (struct mbox_msghdr *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
@@ -204,7 +203,5 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
- ret = otx2_cpt_send_mbox_msg(mbox, pdev);
-
- return ret;
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 3b0bf6fea491..31e24df18877 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1494,7 +1494,7 @@ static void n2_unregister_algs(void)
*
* So we have to back-translate, going through the 'intr' and 'ino'
* property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to to figure out which
+ * 'interrupts' property entries, in order to figure out which
* devino goes to which already-translated IRQ.
*/
static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 3793885f928d..c843f4c6f684 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -134,7 +134,6 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
unsigned int cryptlen, u8 *b0)
{
unsigned int l, lp, m = authsize;
- int rc;
memcpy(b0, iv, 16);
@@ -148,9 +147,7 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
if (assoclen)
*b0 |= 64;
- rc = set_msg_len(b0 + 16 - l, cryptlen, l);
-
- return rc;
+ return set_msg_len(b0 + 16 - l, cryptlen, l);
}
static int generate_pat(u8 *iv,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index e61b3e13db3b..1931e5b37f2b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -251,13 +251,13 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
return -ENOMEM;
INIT_LIST_HEAD(&key_val->list);
- strlcpy(key_val->key, key, sizeof(key_val->key));
+ strscpy(key_val->key, key, sizeof(key_val->key));
if (type == ADF_DEC) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"%ld", (*((long *)val)));
} else if (type == ADF_STR) {
- strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+ strscpy(key_val->val, (char *)val, sizeof(key_val->val));
} else if (type == ADF_HEX) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"0x%lx", (unsigned long)val);
@@ -315,7 +315,7 @@ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
if (!sec)
return -ENOMEM;
- strlcpy(sec->name, name, sizeof(sec->name));
+ strscpy(sec->name, name, sizeof(sec->name));
INIT_LIST_HEAD(&sec->param_head);
down_write(&cfg->lock);
list_add_tail(&sec->list, &cfg->sec_list);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e8ac932bbaab..82b69e1f725b 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -16,6 +16,9 @@
#include "adf_cfg_common.h"
#include "adf_cfg_user.h"
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
#define DEVICE_NAME "qat_adf_ctl"
static DEFINE_MUTEX(adf_ctl_lock);
@@ -137,10 +140,11 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
struct adf_user_cfg_key_val key_val;
struct adf_user_cfg_key_val *params_head;
struct adf_user_cfg_section section, *section_head;
+ int i, j;
section_head = ctl_data->config_section;
- while (section_head) {
+ for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
if (copy_from_user(&section, (void __user *)section_head,
sizeof(*section_head))) {
dev_err(&GET_DEV(accel_dev),
@@ -156,7 +160,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
params_head = section.params;
- while (params_head) {
+ for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
if (copy_from_user(&key_val, (void __user *)params_head,
sizeof(key_val))) {
dev_err(&GET_DEV(accel_dev),
@@ -363,7 +367,7 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
dev_info.num_logical_accel = hw_data->num_logical_accel;
dev_info.banks_per_accel = hw_data->num_banks
/ hw_data->num_logical_accel;
- strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+ strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
dev_info.instance_id = hw_data->instance_id;
dev_info.type = hw_data->dev_class->type;
dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 43b8f864806b..4fb4b3df5a18 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -107,7 +107,7 @@ do { \
* Timeout is in cycles. Clock speed may vary across products but this
* value should be a few milli-seconds.
*/
-#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
#define ADF_SSMWDTL_OFFSET 0x54
#define ADF_SSMWDTH_OFFSET 0x5C
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e69e5907f595..08bca1c506c0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -96,7 +96,7 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
if (!ring_debug)
return -ENOMEM;
- strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+ strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
snprintf(entry_name, sizeof(entry_name), "ring_%02d",
ring->ring_number);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index 4b36869bf460..69482abdb8b9 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -86,7 +86,8 @@
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
ICP_QAT_CSS_SIGNATURE_LEN(handle))
-#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index fb45fa83841c..cad9c58caab1 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -673,11 +673,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
size_t sz_out = qat_req->buf.sz_out;
+ int bl_dma_dir;
int i;
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_BIDIRECTIONAL);
+ bl->bufers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
@@ -691,7 +694,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -716,6 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n);
int node = dev_to_node(&GET_DEV(inst->accel_dev));
+ int bufl_dma_dir;
if (unlikely(!n))
return -EINVAL;
@@ -733,6 +737,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
qat_req->buf.sgl_src_valid = true;
}
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -744,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err_in;
@@ -787,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err_out;
bufers[y].len = sg->length;
@@ -817,7 +823,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (!qat_req->buf.sgl_dst_valid)
kfree(buflout);
@@ -831,7 +837,7 @@ err_in:
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
if (!qat_req->buf.sgl_src_valid)
kfree(bufl);
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 095ed2a404d2..94a26702aeae 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,14 +332,14 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0;
/* Mapping in.in.b or in.in_g2.xa is the same */
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
- sizeof(qat_req->in.dh.in.b),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+ sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
- sizeof(qat_req->out.dh.r),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+ sizeof(struct qat_dh_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -729,14 +729,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
- sizeof(qat_req->in.rsa.enc.m),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
- sizeof(qat_req->out.rsa.enc.c),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -875,14 +875,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
else
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
- sizeof(qat_req->in.rsa.dec.c),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
- sizeof(qat_req->out.rsa.dec.m),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 0fe5a474aa45..b7f7869ef8b2 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -1367,6 +1367,48 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
}
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ unsigned int fw_type)
+{
+ char *fw_type_name = fw_type ? "MMP" : "AE";
+ unsigned int css_dword_size = sizeof(u32);
+
+ if (handle->chip_info->fw_auth) {
+ struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+ unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+ if ((css_hdr->header_len * css_dword_size) != header_len)
+ goto err;
+ if ((css_hdr->size * css_dword_size) != size)
+ goto err;
+ if (fw_type != css_hdr->fw_type)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ size -= header_len;
+ }
+
+ if (fw_type == CSS_AE_FIRMWARE) {
+ if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+ ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+ goto err;
+ if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+ goto err;
+ } else if (fw_type == CSS_MMP_FIRMWARE) {
+ if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+ goto err;
+ } else {
+ pr_err("QAT: Unsupported firmware type\n");
+ return -EINVAL;
+ }
+ return 0;
+
+err:
+ pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ return -EINVAL;
+}
+
static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
char *image, unsigned int size,
struct icp_qat_fw_auth_desc **desc)
@@ -1379,7 +1421,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_simg_ae_mode *simg_ae_mode;
struct icp_firml_dram_desc img_desc;
- if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+ if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return -EINVAL;
}
@@ -1547,6 +1589,11 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
{
struct icp_qat_fw_auth_desc *desc = NULL;
int status = 0;
+ int ret;
+
+ ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+ if (ret)
+ return ret;
if (handle->chip_info->fw_auth) {
status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
@@ -2018,8 +2065,15 @@ static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
struct icp_qat_fw_auth_desc *desc = NULL;
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+ int ret;
for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+ ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+ simg_hdr[i].simg_len,
+ CSS_AE_FIRMWARE);
+ if (ret)
+ return ret;
+
if (qat_uclo_map_auth_fw(handle,
(char *)simg_hdr[i].simg_buf,
(unsigned int)
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 97a530171f07..6eb4d2e35629 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -450,8 +450,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req)
if (ret)
return ret;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 59159f5e64e5..37bafd7aeb79 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -97,14 +97,16 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
}
ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return -EIO;
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
- if (ret < 0)
+ if (!ret) {
+ ret = -EIO;
goto error_unmap_src;
+ }
ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
&rctx->result_sg, 1, qce_ahash_done, async_req);
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 3d27cd5210ef..5b493fdc1e74 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -124,15 +124,15 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
rctx->dst_sg = rctx->dst_tbl.sgl;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
if (diff_dst) {
src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (src_nents < 0) {
- ret = src_nents;
+ if (!src_nents) {
+ ret = -EIO;
goto error_unmap_dst;
}
rctx->src_sg = req->src;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 031b5f701a0a..72dd1a4ebac4 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -9,6 +9,7 @@
#include <linux/crypto.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -201,15 +202,13 @@ static int qcom_rng_remove(struct platform_device *pdev)
return 0;
}
-#if IS_ENABLED(CONFIG_ACPI)
-static const struct acpi_device_id qcom_rng_acpi_match[] = {
+static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
{ .id = "QCOM8160", .driver_data = 1 },
{}
};
MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
-#endif
-static const struct of_device_id qcom_rng_of_match[] = {
+static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
{ .compatible = "qcom,prng", .data = (void *)0},
{ .compatible = "qcom,prng-ee", .data = (void *)1},
{}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 457084b344c1..7ab20fb95166 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -26,10 +26,10 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#define SHA_BUFFER_LEN PAGE_SIZE
#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -196,7 +196,7 @@ struct sahara_dev {
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- struct mutex queue_mutex;
+ spinlock_t queue_spinlock;
struct task_struct *kthread;
struct completion dma_completion;
@@ -487,13 +487,13 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (ret != dev->nb_in_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map in sg\n");
goto unmap_in;
}
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
- if (ret != dev->nb_out_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map out sg\n");
goto unmap_out;
}
@@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
err = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data)
do {
__set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
rctx->first = 1;
}
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
ret = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- mutex_init(&dev->queue_mutex);
+ spin_lock_init(&dev->queue_spinlock);
dev_ptr = dev;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 15d4a38b1351..cd4c410da5a5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -85,8 +85,6 @@ config WIREGUARD
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select ARM_CRYPTO if ARM
- select ARM64_CRYPTO if ARM64
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index cb5414265a9b..58c0ab01771b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -83,7 +83,8 @@ static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
if (!wait || !max || likely(bytes_read) || fail_stats > 110)
break;
- msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
+ if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats)))
+ break;
}
if (wait && !bytes_read && max)
diff --git a/include/crypto/aria.h b/include/crypto/aria.h
index 4a86661788e8..254da46cc385 100644
--- a/include/crypto/aria.h
+++ b/include/crypto/aria.h
@@ -32,18 +32,10 @@
#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32))
struct aria_ctx {
- int key_length;
- int rounds;
u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
-};
-
-static const u32 key_rc[5][4] = {
- { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 },
- { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 },
- { 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e },
- { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0 },
- { 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 }
+ int rounds;
+ int key_length;
};
static const u32 s1[256] = {
@@ -458,4 +450,9 @@ static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n)
((y[(q + 2) % 4]) << (32 - r));
}
+void aria_encrypt(void *ctx, u8 *out, const u8 *in);
+void aria_decrypt(void *ctx, u8 *out, const u8 *in);
+int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+
#endif
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 27b7b0224ea6..d482017f3e20 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -114,31 +114,6 @@ static inline void aead_init_queue(struct aead_queue *queue,
crypto_init_queue(&queue->base, max_qlen);
}
-static inline int aead_enqueue_request(struct aead_queue *queue,
- struct aead_request *request)
-{
- return crypto_enqueue_request(&queue->base, &request->base);
-}
-
-static inline struct aead_request *aead_dequeue_request(
- struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_dequeue_request(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
-}
-
-static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_get_backlog(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
-}
-
static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
{
return alg->chunksize;
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 6407b4b61350..ccdb05f68a75 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -46,12 +46,6 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
walk->offset += nbytes;
}
-static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
- unsigned int alignmask)
-{
- return !(walk->offset & alignmask);
-}
-
static inline struct page *scatterwalk_page(struct scatter_walk *walk)
{
return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 9ff4f6e4558c..06d568382c77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -52,5 +52,6 @@
#define ASPEED_RESET_I2C 7
#define ASPEED_RESET_AHB 8
#define ASPEED_RESET_CRT1 9
+#define ASPEED_RESET_HACE 10
#endif
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 62b9520a00fd..d8b0db2f7a7d 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -111,6 +111,7 @@
#define ASPEED_RESET_PCIE_RC_O 19
#define ASPEED_RESET_PCIE_RC_OEN 18
#define ASPEED_RESET_PCI_DP 5
+#define ASPEED_RESET_HACE 4
#define ASPEED_RESET_AHB 1
#define ASPEED_RESET_SDRAM 0
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 116e8bd68c99..e230c7c46110 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -87,29 +87,6 @@
#define PEH_AXUSER_CFG 0x401001
#define PEH_AXUSER_CFG_ENABLE 0xffffffff
-#define QM_AXI_RRESP BIT(0)
-#define QM_AXI_BRESP BIT(1)
-#define QM_ECC_MBIT BIT(2)
-#define QM_ECC_1BIT BIT(3)
-#define QM_ACC_GET_TASK_TIMEOUT BIT(4)
-#define QM_ACC_DO_TASK_TIMEOUT BIT(5)
-#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6)
-#define QM_SQ_CQ_VF_INVALID BIT(7)
-#define QM_CQ_VF_INVALID BIT(8)
-#define QM_SQ_VF_INVALID BIT(9)
-#define QM_DB_TIMEOUT BIT(10)
-#define QM_OF_FIFO_OF BIT(11)
-#define QM_DB_RANDOM_INVALID BIT(12)
-#define QM_MAILBOX_TIMEOUT BIT(13)
-#define QM_FLR_TIMEOUT BIT(14)
-
-#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
- QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
- QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
- QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
-#define QM_BASE_CE QM_ECC_1BIT
-
-#define QM_Q_DEPTH 1024
#define QM_MIN_QNUM 2
#define HISI_ACC_SGL_SGE_NR_MAX 255
#define QM_SHAPER_CFG 0x100164
@@ -168,6 +145,15 @@ enum qm_vf_state {
QM_NOT_READY,
};
+enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+ QM_SUPPORT_STOP_QP,
+ QM_SUPPORT_MB_COMMAND,
+ QM_SUPPORT_SVA_PREFETCH,
+ QM_SUPPORT_RPM,
+};
+
struct dfx_diff_registers {
u32 *regs;
u32 reg_offset;
@@ -232,7 +218,10 @@ struct hisi_qm_err_info {
char *acpi_rst;
u32 msi_wr_port;
u32 ecc_2bits_mask;
- u32 dev_ce_mask;
+ u32 qm_shutdown_mask;
+ u32 dev_shutdown_mask;
+ u32 qm_reset_mask;
+ u32 dev_reset_mask;
u32 ce;
u32 nfe;
u32 fe;
@@ -258,6 +247,18 @@ struct hisi_qm_err_ini {
void (*err_info_init)(struct hisi_qm *qm);
};
+struct hisi_qm_cap_info {
+ u32 type;
+ /* Register offset */
+ u32 offset;
+ /* Bit offset in register */
+ u32 shift;
+ u32 mask;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
@@ -278,6 +279,9 @@ struct hisi_qm {
struct pci_dev *pdev;
void __iomem *io_base;
void __iomem *db_io_base;
+
+ /* Capbility version, 0: not supports */
+ u32 cap_ver;
u32 sqe_size;
u32 qp_base;
u32 qp_num;
@@ -286,6 +290,8 @@ struct hisi_qm {
u32 max_qp_num;
u32 vfs_num;
u32 db_interval;
+ u16 eq_depth;
+ u16 aeq_depth;
struct list_head list;
struct hisi_qm_list *qm_list;
@@ -304,6 +310,8 @@ struct hisi_qm {
struct hisi_qm_err_info err_info;
struct hisi_qm_err_status err_status;
unsigned long misc_ctl; /* driver removing and reset sched */
+ /* Device capability bit */
+ unsigned long caps;
struct rw_semaphore qps_lock;
struct idr qp_idr;
@@ -326,8 +334,6 @@ struct hisi_qm {
bool use_sva;
bool is_frozen;
- /* doorbell isolation enable */
- bool use_db_isolation;
resource_size_t phys_base;
resource_size_t db_phys_base;
struct uacce_device *uacce;
@@ -351,6 +357,8 @@ struct hisi_qp_ops {
struct hisi_qp {
u32 qp_id;
+ u16 sq_depth;
+ u16 cq_depth;
u8 alg_type;
u8 req_type;
@@ -501,6 +509,9 @@ void hisi_qm_pm_init(struct hisi_qm *qm);
int hisi_qm_get_dfx_access(struct hisi_qm *qm);
void hisi_qm_put_dfx_access(struct hisi_qm *qm);
void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
/* Used by VFIO ACC live migration driver */
struct pci_driver *hisi_sec_get_pf_driver(void);
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index aa1d4da03538..77c2885c4c13 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -50,6 +50,7 @@ struct hwrng {
struct list_head list;
struct kref ref;
struct completion cleanup_done;
+ struct completion dying;
};
struct device;
@@ -61,4 +62,6 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
+
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h
index 1faef5ff87ef..3e66dbc2f323 100644
--- a/include/uapi/misc/uacce/hisi_qm.h
+++ b/include/uapi/misc/uacce/hisi_qm.h
@@ -14,11 +14,26 @@ struct hisi_qp_ctx {
__u16 qc_type;
};
+/**
+ * struct hisi_qp_info - User data for hisi qp.
+ * @sqe_size: Submission queue element size
+ * @sq_depth: The number of sqe
+ * @cq_depth: The number of cqe
+ * @reserved: Reserved data
+ */
+struct hisi_qp_info {
+ __u32 sqe_size;
+ __u16 sq_depth;
+ __u16 cq_depth;
+ __u64 reserved;
+};
+
#define HISI_QM_API_VER_BASE "hisi_qm_v1"
#define HISI_QM_API_VER2_BASE "hisi_qm_v2"
#define HISI_QM_API_VER3_BASE "hisi_qm_v3"
/* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */
#define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx)
-
+/* UACCE_CMD_QM_SET_QP_INFO: Set qp depth and BD size */
+#define UACCE_CMD_QM_SET_QP_INFO _IOWR('H', 11, struct hisi_qp_info)
#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index d628235f7934..9bbf8a4b2108 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -127,9 +127,6 @@ config TRACE_MMIO_ACCESS
source "lib/crypto/Kconfig"
-config LIB_MEMNEQ
- bool
-
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Makefile b/lib/Makefile
index 8f6fc027f605..ad570b7699ba 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -255,7 +255,6 @@ obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
-lib-$(CONFIG_LIB_MEMNEQ) += memneq.o
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 47816af9a9d7..7e9683e9f5c6 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -2,6 +2,9 @@
menu "Crypto library routines"
+config CRYPTO_LIB_UTILS
+ tristate
+
config CRYPTO_LIB_AES
tristate
@@ -33,6 +36,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC
tristate
+ select CRYPTO_LIB_UTILS
help
This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a
@@ -42,7 +46,6 @@ config CRYPTO_LIB_CHACHA_GENERIC
config CRYPTO_LIB_CHACHA
tristate "ChaCha library interface"
- depends on CRYPTO
depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
help
@@ -70,7 +73,7 @@ config CRYPTO_LIB_CURVE25519
tristate "Curve25519 scalar multiplication library"
depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
- select LIB_MEMNEQ
+ select CRYPTO_LIB_UTILS
help
Enable the Curve25519 library interface. This interface may be
fulfilled by either the generic implementation or an arch-specific
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 919cbb2c220d..c852f067ab06 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o
+libcryptoutils-y := memneq.o utils.o
+
# chacha is used by the /dev/random driver which is always builtin
obj-y += chacha.o
obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
diff --git a/lib/memneq.c b/lib/crypto/memneq.c
index fb11608b1ec1..243d8677cc51 100644
--- a/lib/memneq.c
+++ b/lib/crypto/memneq.c
@@ -59,10 +59,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <crypto/algapi.h>
#include <asm/unaligned.h>
-
-#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
+#include <crypto/algapi.h>
+#include <linux/module.h>
/* Generic path for arbitrary size */
static inline unsigned long
@@ -172,5 +171,3 @@ noinline unsigned long __crypto_memneq(const void *a, const void *b,
}
}
EXPORT_SYMBOL(__crypto_memneq);
-
-#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
new file mode 100644
index 000000000000..53230ab1b195
--- /dev/null
+++ b/lib/crypto/utils.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto library utility functions
+ *
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+/*
+ * XOR @len bytes from @src1 and @src2 together, writing the result to @dst
+ * (which may alias one of the sources). Don't call this directly; call
+ * crypto_xor() or crypto_xor_cpy() instead.
+ */
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
+{
+ int relalign = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ int size = sizeof(unsigned long);
+ int d = (((unsigned long)dst ^ (unsigned long)src1) |
+ ((unsigned long)dst ^ (unsigned long)src2)) &
+ (size - 1);
+
+ relalign = d ? 1 << __ffs(d) : size;
+
+ /*
+ * If we care about alignment, process as many bytes as
+ * needed to advance dst and src to values whose alignments
+ * equal their relative alignment. This will allow us to
+ * process the remainder of the input using optimal strides.
+ */
+ while (((unsigned long)dst & (relalign - 1)) && len > 0) {
+ *dst++ = *src1++ ^ *src2++;
+ len--;
+ }
+ }
+
+ while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u64 l = get_unaligned((u64 *)src1) ^
+ get_unaligned((u64 *)src2);
+ put_unaligned(l, (u64 *)dst);
+ } else {
+ *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
+ }
+ dst += 8;
+ src1 += 8;
+ src2 += 8;
+ len -= 8;
+ }
+
+ while (len >= 4 && !(relalign & 3)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u32 l = get_unaligned((u32 *)src1) ^
+ get_unaligned((u32 *)src2);
+ put_unaligned(l, (u32 *)dst);
+ } else {
+ *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
+ }
+ dst += 4;
+ src1 += 4;
+ src2 += 4;
+ len -= 4;
+ }
+
+ while (len >= 2 && !(relalign & 1)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u16 l = get_unaligned((u16 *)src1) ^
+ get_unaligned((u16 *)src2);
+ put_unaligned(l, (u16 *)dst);
+ } else {
+ *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
+ }
+ dst += 2;
+ src1 += 2;
+ src2 += 2;
+ len -= 2;
+ }
+
+ while (len--)
+ *dst++ = *src1++ ^ *src2++;
+}
+EXPORT_SYMBOL_GPL(__crypto_xor);
+
+MODULE_LICENSE("GPL");