summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/padata.rst18
-rw-r--r--Documentation/crypto/api-intro.txt2
-rw-r--r--Documentation/crypto/userspace-if.rst4
-rw-r--r--Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml76
-rw-r--r--Documentation/devicetree/bindings/rng/imx-rng.txt3
-rw-r--r--Documentation/devicetree/bindings/rng/ingenic,rng.yaml36
-rw-r--r--Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml36
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/crypto/crc32-ce-core.S2
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c51
-rw-r--r--arch/arm/crypto/sha1-armv4-large.S2
-rw-r--r--arch/arm/crypto/sha256-armv4.pl2
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped2
-rw-r--r--arch/arm/crypto/sha512-armv4.pl4
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped4
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c257
-rw-r--r--arch/sparc/crypto/sha256_glue.c14
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S15
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S739
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S1
-rw-r--r--arch/x86/crypto/chacha-ssse3-x86_64.S16
-rw-r--r--arch/x86/crypto/chacha_glue.c17
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S47
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S7
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c6
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S17
-rw-r--r--arch/x86/include/asm/inst.h163
-rw-r--r--crypto/Kconfig46
-rw-r--r--crypto/acompress.c8
-rw-r--r--crypto/adiantum.c14
-rw-r--r--crypto/af_alg.c11
-rw-r--r--crypto/algapi.c21
-rw-r--r--crypto/algif_aead.c4
-rw-r--r--crypto/algif_skcipher.c4
-rw-r--r--crypto/api.c24
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c14
-rw-r--r--crypto/blake2b_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/ccm.c33
-rw-r--r--crypto/chacha20poly1305.c14
-rw-r--r--crypto/cmac.c5
-rw-r--r--crypto/cryptd.c59
-rw-r--r--crypto/ctr.c17
-rw-r--r--crypto/cts.c13
-rw-r--r--crypto/dh.c38
-rw-r--r--crypto/ecc.c44
-rw-r--r--crypto/ecc.h14
-rw-r--r--crypto/echainiv.c2
-rw-r--r--crypto/essiv.c11
-rw-r--r--crypto/gcm.c40
-rw-r--r--crypto/geniv.c19
-rw-r--r--crypto/hmac.c5
-rw-r--r--crypto/internal.h23
-rw-r--r--crypto/jitterentropy.c4
-rw-r--r--crypto/lrw.c134
-rw-r--r--crypto/pcrypt.c31
-rw-r--r--crypto/rsa-pkcs1pad.c13
-rw-r--r--crypto/salsa20_generic.c4
-rw-r--r--crypto/seqiv.c18
-rw-r--r--crypto/sha3_generic.c2
-rw-r--r--crypto/simd.c6
-rw-r--r--crypto/skcipher.c13
-rw-r--r--crypto/testmgr.h10
-rw-r--r--crypto/vmac.c5
-rw-r--r--crypto/xcbc.c5
-rw-r--r--crypto/xts.c154
-rw-r--r--drivers/char/hw_random/Kconfig27
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/ba431-rng.c235
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c5
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/hisi-rng.c2
-rw-r--r--drivers/char/hw_random/ingenic-rng.c154
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c2
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/char/hw_random/npcm-rng.c2
-rw-r--r--drivers/char/hw_random/octeon-rng.c6
-rw-r--r--drivers/char/hw_random/omap-rng.c11
-rw-r--r--drivers/char/hw_random/pic32-rng.c2
-rw-r--r--drivers/char/hw_random/st-rng.c3
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/crypto/Kconfig19
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c46
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c42
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c12
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h8
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c39
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c12
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h26
-rw-r--r--drivers/crypto/amlogic/Kconfig2
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c27
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h3
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c20
-rw-r--r--drivers/crypto/bcm/cipher.c72
-rw-r--r--drivers/crypto/caam/caamalg.c37
-rw-r--r--drivers/crypto/caam/caamalg_qi.c8
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c42
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c12
-rw-r--r--drivers/crypto/caam/dpseci.c18
-rw-r--r--drivers/crypto/caam/dpseci.h2
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h1
-rw-r--r--drivers/crypto/caam/error.c3
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/caam/regs.h11
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c28
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c24
-rw-r--r--drivers/crypto/cavium/cpt/request_manager.h26
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c16
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c34
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h4
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c8
-rw-r--r--drivers/crypto/ccp/ccp-dev.c4
-rw-r--r--drivers/crypto/ccp/ccp-dev.h13
-rw-r--r--drivers/crypto/ccp/ccp-ops.c43
-rw-r--r--drivers/crypto/ccp/sp-dev.c6
-rw-r--r--drivers/crypto/ccp/sp-dev.h6
-rw-r--r--drivers/crypto/ccp/sp-pci.c17
-rw-r--r--drivers/crypto/ccp/sp-platform.c2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c149
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c87
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h3
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c111
-rw-r--r--drivers/crypto/hisilicon/qm.c43
-rw-r--r--drivers/crypto/hisilicon/qm.h1
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c58
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c95
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c132
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h2
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c6
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c5
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c13
-rw-r--r--drivers/crypto/inside-secure/safexcel.h3
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c47
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c18
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c11
-rw-r--r--drivers/crypto/marvell/cesa/cesa.h1
-rw-r--r--drivers/crypto/marvell/cesa/cipher.c18
-rw-r--r--drivers/crypto/marvell/cesa/hash.c6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c51
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.h6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c9
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h24
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c63
-rw-r--r--drivers/crypto/mxs-dcp.c33
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/crypto/omap-aes.c41
-rw-r--r--drivers/crypto/omap-aes.h3
-rw-r--r--drivers/crypto/omap-des.c6
-rw-r--r--drivers/crypto/omap-sham.c18
-rw-r--r--drivers/crypto/picoxcell_crypto.c55
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h102
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_engine.c52
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c144
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c50
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.h48
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_common.h72
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h48
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_user.h58
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h60
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c52
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c56
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c49
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h48
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c110
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.h52
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h54
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h75
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c48
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c48
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw.h106
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h145
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_la.h206
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h48
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_fw_pke.h100
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hal.h48
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h64
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h54
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c211
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c61
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c48
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h48
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c88
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c77
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c74
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c48
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c48
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h48
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c48
-rw-r--r--drivers/crypto/qce/cipher.h3
-rw-r--r--drivers/crypto/qce/common.h2
-rw-r--r--drivers/crypto/qce/sha.c36
-rw-r--r--drivers/crypto/qce/skcipher.c43
-rw-r--r--drivers/crypto/sa2ul.c2420
-rw-r--r--drivers/crypto/sa2ul.h403
-rw-r--r--drivers/crypto/sahara.c96
-rw-r--r--drivers/crypto/talitos.c117
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c4
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c1
-rw-r--r--drivers/firmware/efi/embedded-firmware.c9
-rw-r--r--include/crypto/acompress.h18
-rw-r--r--include/crypto/algapi.h25
-rw-r--r--include/crypto/chacha.h4
-rw-r--r--include/crypto/chacha20poly1305.h2
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/crypto/if_alg.h4
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/sha.h1
-rw-r--r--include/crypto/skcipher.h2
-rw-r--r--include/linux/crypto.h41
-rw-r--r--include/linux/mpi.h3
-rw-r--r--include/linux/padata.h21
-rw-r--r--kernel/padata.c177
-rw-r--r--lib/crc-t10dif.c75
-rw-r--r--lib/crypto/chacha20poly1305.c2
-rw-r--r--lib/crypto/sha256.c10
-rw-r--r--lib/mpi/Makefile1
-rw-r--r--lib/mpi/mpi-sub-ui.c78
-rw-r--r--net/mptcp/crypto.c15
-rw-r--r--sound/soc/codecs/cros_ec_codec.c27
257 files changed, 6648 insertions, 5497 deletions
diff --git a/Documentation/core-api/padata.rst b/Documentation/core-api/padata.rst
index 0830e5b0e821..35175710b43c 100644
--- a/Documentation/core-api/padata.rst
+++ b/Documentation/core-api/padata.rst
@@ -27,22 +27,11 @@ padata_instance structure for overall control of how jobs are to be run::
#include <linux/padata.h>
- struct padata_instance *padata_alloc_possible(const char *name);
+ struct padata_instance *padata_alloc(const char *name);
'name' simply identifies the instance.
-There are functions for enabling and disabling the instance::
-
- int padata_start(struct padata_instance *pinst);
- void padata_stop(struct padata_instance *pinst);
-
-These functions are setting or clearing the "PADATA_INIT" flag; if that flag is
-not set, other functions will refuse to work. padata_start() returns zero on
-success (flag set) or -EINVAL if the padata cpumask contains no active CPU
-(flag not set). padata_stop() clears the flag and blocks until the padata
-instance is unused.
-
-Finally, complete padata initialization by allocating a padata_shell::
+Then, complete padata initialization by allocating a padata_shell::
struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
@@ -155,11 +144,10 @@ submitted.
Destroying
----------
-Cleaning up a padata instance predictably involves calling the three free
+Cleaning up a padata instance predictably involves calling the two free
functions that correspond to the allocation in reverse::
void padata_free_shell(struct padata_shell *ps);
- void padata_stop(struct padata_instance *pinst);
void padata_free(struct padata_instance *pinst);
It is the user's responsibility to ensure all outstanding jobs are complete
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index 45d943fcae5b..40137f93e04f 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -169,7 +169,7 @@ Portions of this API were derived from the following projects:
and;
- Nettle (http://www.lysator.liu.se/~nisse/nettle/)
+ Nettle (https://www.lysator.liu.se/~nisse/nettle/)
Niels Möller
Original developers of the crypto algorithms:
diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst
index ff86befa61e0..52019e905900 100644
--- a/Documentation/crypto/userspace-if.rst
+++ b/Documentation/crypto/userspace-if.rst
@@ -23,7 +23,7 @@ user space, however. This includes the difference between synchronous
and asynchronous invocations. The user space API call is fully
synchronous.
-[1] http://www.chronox.de/libkcapi.html
+[1] https://www.chronox.de/libkcapi.html
User Space API General Remarks
------------------------------
@@ -384,4 +384,4 @@ Please see [1] for libkcapi which provides an easy-to-use wrapper around
the aforementioned Netlink kernel interface. [1] also contains a test
application that invokes all libkcapi API calls.
-[1] http://www.chronox.de/libkcapi.html
+[1] https://www.chronox.de/libkcapi.html
diff --git a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
new file mode 100644
index 000000000000..85ef69ffebed
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/ti,sa2ul.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: K3 SoC SA2UL crypto module
+
+maintainers:
+ - Tero Kristo <t-kristo@ti.com>
+
+properties:
+ compatible:
+ enum:
+ - ti,j721e-sa2ul
+ - ti,am654-sa2ul
+
+ reg:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: TX DMA Channel
+ - description: RX DMA Channel #1
+ - description: RX DMA Channel #2
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx1
+ - const: rx2
+
+ dma-coherent: true
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 2
+
+ ranges:
+ description:
+ Address translation for the possible RNG child node for SA2UL
+
+patternProperties:
+ "^rng@[a-f0-9]+$":
+ type: object
+ description:
+ Child RNG node for SA2UL
+
+required:
+ - compatible
+ - reg
+ - power-domains
+ - dmas
+ - dma-names
+ - dma-coherent
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ main_crypto: crypto@4e00000 {
+ compatible = "ti,j721-sa2ul";
+ reg = <0x0 0x4e00000 0x0 0x1200>;
+ power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>;
+ dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>,
+ <&main_udmap 0x4001>;
+ dma-names = "tx", "rx1", "rx2";
+ dma-coherent;
+ };
diff --git a/Documentation/devicetree/bindings/rng/imx-rng.txt b/Documentation/devicetree/bindings/rng/imx-rng.txt
index 405c2b00ccb0..659d4efdd664 100644
--- a/Documentation/devicetree/bindings/rng/imx-rng.txt
+++ b/Documentation/devicetree/bindings/rng/imx-rng.txt
@@ -5,6 +5,9 @@ Required properties:
"fsl,imx21-rnga"
"fsl,imx31-rnga" (backward compatible with "fsl,imx21-rnga")
"fsl,imx25-rngb"
+ "fsl,imx6sl-rngb" (backward compatible with "fsl,imx25-rngb")
+ "fsl,imx6sll-rngb" (backward compatible with "fsl,imx25-rngb")
+ "fsl,imx6ull-rngb" (backward compatible with "fsl,imx25-rngb")
"fsl,imx35-rngc"
- reg : offset and length of the register set of this block
- interrupts : the interrupt number for the RNG block
diff --git a/Documentation/devicetree/bindings/rng/ingenic,rng.yaml b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml
new file mode 100644
index 000000000000..b2e4a6a7f93a
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/ingenic,rng.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/ingenic,rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bindings for RNG in Ingenic SoCs
+
+maintainers:
+ - 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+
+description:
+ The Random Number Generator in Ingenic SoCs.
+
+properties:
+ compatible:
+ enum:
+ - ingenic,jz4780-rng
+ - ingenic,x1000-rng
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng: rng@d8 {
+ compatible = "ingenic,jz4780-rng";
+ reg = <0xd8 0x8>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml
new file mode 100644
index 000000000000..48ab82abf50e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/silex-insight,ba431-rng.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/silex-insight,ba431-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Silex Insight BA431 RNG bindings
+
+description: |
+ The BA431 hardware random number generator is an IP that is FIPS-140-2/3
+ certified.
+
+maintainers:
+ - Olivier Sobrie <olivier.sobrie@silexinsight.com>
+
+properties:
+ compatible:
+ const: silex-insight,ba431-rng
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@42800000 {
+ compatible = "silex-insight,ba431-rng";
+ reg = <0x42800000 0x1000>;
+ };
+
+...
diff --git a/MAINTAINERS b/MAINTAINERS
index 4e2698cc7e23..de9b86cb2f36 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -830,11 +830,20 @@ F: include/uapi/rdma/efa-abi.h
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
M: Tom Lendacky <thomas.lendacky@amd.com>
+M: John Allen <john.allen@amd.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/ccp/
F: include/linux/ccp.h
+AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - SEV SUPPORT
+M: Brijesh Singh <brijesh.singh@amd.com>
+M: Tom Lendacky <thomas.lendacky@amd.com>
+L: linux-crypto@vger.kernel.org
+S: Supported
+F: drivers/crypto/ccp/sev*
+F: include/uapi/linux/psp-sev.h
+
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S
index 5cbd4a6fedad..3f13a76b9066 100644
--- a/arch/arm/crypto/crc32-ce-core.S
+++ b/arch/arm/crypto/crc32-ce-core.S
@@ -39,7 +39,7 @@
* CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
* PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
* at:
- * http://www.intel.com/products/processor/manuals/
+ * https://www.intel.com/products/processor/manuals/
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
* Volume 2B: Instruction Set Reference, N-Z
*
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index a00fd329255f..f13401f3e669 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -16,6 +16,7 @@
#include <crypto/gf128mul.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
+#include <linux/jump_label.h>
#include <linux/module.h>
MODULE_DESCRIPTION("GHASH hash function using ARMv8 Crypto Extensions");
@@ -27,12 +28,8 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GHASH_DIGEST_SIZE 16
struct ghash_key {
- u64 h[2];
- u64 h2[2];
- u64 h3[2];
- u64 h4[2];
-
be128 k;
+ u64 h[][2];
};
struct ghash_desc_ctx {
@@ -46,16 +43,12 @@ struct ghash_async_ctx {
};
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k,
- const char *head);
+ u64 const h[][2], const char *head);
asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k,
- const char *head);
+ u64 const h[][2], const char *head);
-static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k,
- const char *head);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_p64);
static int ghash_init(struct shash_desc *desc)
{
@@ -70,7 +63,10 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
{
if (likely(crypto_simd_usable())) {
kernel_neon_begin();
- pmull_ghash_update(blocks, dg, src, key, head);
+ if (static_branch_likely(&use_p64))
+ pmull_ghash_update_p64(blocks, dg, src, key->h, head);
+ else
+ pmull_ghash_update_p8(blocks, dg, src, key->h, head);
kernel_neon_end();
} else {
be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
@@ -161,25 +157,26 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *inkey, unsigned int keylen)
{
struct ghash_key *key = crypto_shash_ctx(tfm);
- be128 h;
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
/* needed for the fallback */
memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
- ghash_reflect(key->h, &key->k);
+ ghash_reflect(key->h[0], &key->k);
- h = key->k;
- gf128mul_lle(&h, &key->k);
- ghash_reflect(key->h2, &h);
+ if (static_branch_likely(&use_p64)) {
+ be128 h = key->k;
- gf128mul_lle(&h, &key->k);
- ghash_reflect(key->h3, &h);
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h[1], &h);
- gf128mul_lle(&h, &key->k);
- ghash_reflect(key->h4, &h);
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h[2], &h);
+ gf128mul_lle(&h, &key->k);
+ ghash_reflect(key->h[3], &h);
+ }
return 0;
}
@@ -195,7 +192,7 @@ static struct shash_alg ghash_alg = {
.base.cra_driver_name = "ghash-ce-sync",
.base.cra_priority = 300 - 1,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ghash_key),
+ .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
};
@@ -354,10 +351,10 @@ static int __init ghash_ce_mod_init(void)
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
- if (elf_hwcap2 & HWCAP2_PMULL)
- pmull_ghash_update = pmull_ghash_update_p64;
- else
- pmull_ghash_update = pmull_ghash_update_p8;
+ if (elf_hwcap2 & HWCAP2_PMULL) {
+ ghash_alg.base.cra_ctxsize += 3 * sizeof(u64[2]);
+ static_branch_enable(&use_p64);
+ }
err = crypto_register_shash(&ghash_alg);
if (err)
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S
index f82cd8cf5a09..1c8b685149f2 100644
--- a/arch/arm/crypto/sha1-armv4-large.S
+++ b/arch/arm/crypto/sha1-armv4-large.S
@@ -13,7 +13,7 @@
@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see http://www.openssl.org/~appro/cryptogams/.
+@ details see https://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ sha1_block procedure for ARMv4.
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
index a03cf4dfb781..9f96ff48e4a8 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -13,7 +13,7 @@
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
+# details see https://www.openssl.org/~appro/cryptogams/.
# ====================================================================
# SHA256 block procedure for ARMv4. May 2007.
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
index 054aae0edfce..ea04b2ab0c33 100644
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -12,7 +12,7 @@
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see http://www.openssl.org/~appro/cryptogams/.
+@ details see https://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ SHA256 block procedure for ARMv4. May 2007.
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
index 788c17b56ecc..69df68981acd 100644
--- a/arch/arm/crypto/sha512-armv4.pl
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -13,7 +13,7 @@
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
+# details see https://www.openssl.org/~appro/cryptogams/.
# ====================================================================
# SHA512 block procedure for ARMv4. September 2007.
@@ -43,7 +43,7 @@
# terms it's 22.6 cycles per byte, which is disappointing result.
# Technical writers asserted that 3-way S4 pipeline can sustain
# multiple NEON instructions per cycle, but dual NEON issue could
-# not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
+# not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html
# for further details. On side note Cortex-A15 processes one byte in
# 16 cycles.
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
index 710ea309769e..cb147db5cbfe 100644
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -12,7 +12,7 @@
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see http://www.openssl.org/~appro/cryptogams/.
+@ details see https://www.openssl.org/~appro/cryptogams/.
@ ====================================================================
@ SHA512 block procedure for ARMv4. September 2007.
@@ -42,7 +42,7 @@
@ terms it's 22.6 cycles per byte, which is disappointing result.
@ Technical writers asserted that 3-way S4 pipeline can sustain
@ multiple NEON instructions per cycle, but dual NEON issue could
-@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
+@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html
@ for further details. On side note Cortex-A15 processes one byte in
@ 16 cycles.
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 22831d3b7f62..da1034867aaa 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -31,12 +31,8 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GCM_IV_SIZE 12
struct ghash_key {
- u64 h[2];
- u64 h2[2];
- u64 h3[2];
- u64 h4[2];
-
be128 k;
+ u64 h[][2];
};
struct ghash_desc_ctx {
@@ -51,22 +47,18 @@ struct gcm_aes_ctx {
};
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k,
- const char *head);
+ u64 const h[][2], const char *head);
asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k,
- const char *head);
+ u64 const h[][2], const char *head);
asmlinkage void pmull_gcm_encrypt(int bytes, u8 dst[], const u8 src[],
- struct ghash_key const *k, u64 dg[],
- u8 ctr[], u32 const rk[], int rounds,
- u8 tag[]);
+ u64 const h[][2], u64 dg[], u8 ctr[],
+ u32 const rk[], int rounds, u8 tag[]);
asmlinkage void pmull_gcm_decrypt(int bytes, u8 dst[], const u8 src[],
- struct ghash_key const *k, u64 dg[],
- u8 ctr[], u32 const rk[], int rounds,
- u8 tag[]);
+ u64 const h[][2], u64 dg[], u8 ctr[],
+ u32 const rk[], int rounds, u8 tag[]);
static int ghash_init(struct shash_desc *desc)
{
@@ -77,48 +69,51 @@ static int ghash_init(struct shash_desc *desc)
}
static void ghash_do_update(int blocks, u64 dg[], const char *src,
- struct ghash_key *key, const char *head,
- void (*simd_update)(int blocks, u64 dg[],
- const char *src,
- struct ghash_key const *k,
- const char *head))
+ struct ghash_key *key, const char *head)
{
- if (likely(crypto_simd_usable() && simd_update)) {
- kernel_neon_begin();
- simd_update(blocks, dg, src, key, head);
- kernel_neon_end();
- } else {
- be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
+ be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
- do {
- const u8 *in = src;
-
- if (head) {
- in = head;
- blocks++;
- head = NULL;
- } else {
- src += GHASH_BLOCK_SIZE;
- }
+ do {
+ const u8 *in = src;
+
+ if (head) {
+ in = head;
+ blocks++;
+ head = NULL;
+ } else {
+ src += GHASH_BLOCK_SIZE;
+ }
- crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
- gf128mul_lle(&dst, &key->k);
- } while (--blocks);
+ crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
+ gf128mul_lle(&dst, &key->k);
+ } while (--blocks);
- dg[0] = be64_to_cpu(dst.b);
- dg[1] = be64_to_cpu(dst.a);
+ dg[0] = be64_to_cpu(dst.b);
+ dg[1] = be64_to_cpu(dst.a);
+}
+
+static __always_inline
+void ghash_do_simd_update(int blocks, u64 dg[], const char *src,
+ struct ghash_key *key, const char *head,
+ void (*simd_update)(int blocks, u64 dg[],
+ const char *src,
+ u64 const h[][2],
+ const char *head))
+{
+ if (likely(crypto_simd_usable())) {
+ kernel_neon_begin();
+ simd_update(blocks, dg, src, key->h, head);
+ kernel_neon_end();
+ } else {
+ ghash_do_update(blocks, dg, src, key, head);
}
}
/* avoid hogging the CPU for too long */
#define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE)
-static int __ghash_update(struct shash_desc *desc, const u8 *src,
- unsigned int len,
- void (*simd_update)(int blocks, u64 dg[],
- const char *src,
- struct ghash_key const *k,
- const char *head))
+static int ghash_update(struct shash_desc *desc, const u8 *src,
+ unsigned int len)
{
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
@@ -143,9 +138,9 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src,
do {
int chunk = min(blocks, MAX_BLOCKS);
- ghash_do_update(chunk, ctx->digest, src, key,
- partial ? ctx->buf : NULL,
- simd_update);
+ ghash_do_simd_update(chunk, ctx->digest, src, key,
+ partial ? ctx->buf : NULL,
+ pmull_ghash_update_p8);
blocks -= chunk;
src += chunk * GHASH_BLOCK_SIZE;
@@ -157,39 +152,7 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src,
return 0;
}
-static int ghash_update_p8(struct shash_desc *desc, const u8 *src,
- unsigned int len)
-{
- return __ghash_update(desc, src, len, pmull_ghash_update_p8);
-}
-
-static int ghash_update_p64(struct shash_desc *desc, const u8 *src,
- unsigned int len)
-{
- return __ghash_update(desc, src, len, pmull_ghash_update_p64);
-}
-
-static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
-{
- struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
- unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
-
- if (partial) {
- struct ghash_key *key = crypto_shash_ctx(desc->tfm);
-
- memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
-
- ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
- pmull_ghash_update_p8);
- }
- put_unaligned_be64(ctx->digest[1], dst);
- put_unaligned_be64(ctx->digest[0], dst + 8);
-
- *ctx = (struct ghash_desc_ctx){};
- return 0;
-}
-
-static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
+static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
@@ -199,8 +162,8 @@ static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
- ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
- pmull_ghash_update_p64);
+ ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL,
+ pmull_ghash_update_p8);
}
put_unaligned_be64(ctx->digest[1], dst);
put_unaligned_be64(ctx->digest[0], dst + 8);
@@ -220,29 +183,6 @@ static void ghash_reflect(u64 h[], const be128 *k)
h[1] ^= 0xc200000000000000UL;
}
-static int __ghash_setkey(struct ghash_key *key,
- const u8 *inkey, unsigned int keylen)
-{
- be128 h;
-
- /* needed for the fallback */
- memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
-
- ghash_reflect(key->h, &key->k);
-
- h = key->k;
- gf128mul_lle(&h, &key->k);
- ghash_reflect(key->h2, &h);
-
- gf128mul_lle(&h, &key->k);
- ghash_reflect(key->h3, &h);
-
- gf128mul_lle(&h, &key->k);
- ghash_reflect(key->h4, &h);
-
- return 0;
-}
-
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *inkey, unsigned int keylen)
{
@@ -251,38 +191,28 @@ static int ghash_setkey(struct crypto_shash *tfm,
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
- return __ghash_setkey(key, inkey, keylen);
+ /* needed for the fallback */
+ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
+
+ ghash_reflect(key->h[0], &key->k);
+ return 0;
}
-static struct shash_alg ghash_alg[] = {{
+static struct shash_alg ghash_alg = {
.base.cra_name = "ghash",
.base.cra_driver_name = "ghash-neon",
.base.cra_priority = 150,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ghash_key),
- .base.cra_module = THIS_MODULE,
-
- .digestsize = GHASH_DIGEST_SIZE,
- .init = ghash_init,
- .update = ghash_update_p8,
- .final = ghash_final_p8,
- .setkey = ghash_setkey,
- .descsize = sizeof(struct ghash_desc_ctx),
-}, {
- .base.cra_name = "ghash",
- .base.cra_driver_name = "ghash-ce",
- .base.cra_priority = 200,
- .base.cra_blocksize = GHASH_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ghash_key),
+ .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
- .update = ghash_update_p64,
- .final = ghash_final_p64,
+ .update = ghash_update,
+ .final = ghash_final,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
-}};
+};
static int num_rounds(struct crypto_aes_ctx *ctx)
{
@@ -301,6 +231,7 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
{
struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
u8 key[GHASH_BLOCK_SIZE];
+ be128 h;
int ret;
ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
@@ -309,7 +240,22 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
- return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
+ /* needed for the fallback */
+ memcpy(&ctx->ghash_key.k, key, GHASH_BLOCK_SIZE);
+
+ ghash_reflect(ctx->ghash_key.h[0], &ctx->ghash_key.k);
+
+ h = ctx->ghash_key.k;
+ gf128mul_lle(&h, &ctx->ghash_key.k);
+ ghash_reflect(ctx->ghash_key.h[1], &h);
+
+ gf128mul_lle(&h, &ctx->ghash_key.k);
+ ghash_reflect(ctx->ghash_key.h[2], &h);
+
+ gf128mul_lle(&h, &ctx->ghash_key.k);
+ ghash_reflect(ctx->ghash_key.h[3], &h);
+
+ return 0;
}
static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
@@ -341,9 +287,9 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
int blocks = count / GHASH_BLOCK_SIZE;
- ghash_do_update(blocks, dg, src, &ctx->ghash_key,
- *buf_count ? buf : NULL,
- pmull_ghash_update_p64);
+ ghash_do_simd_update(blocks, dg, src, &ctx->ghash_key,
+ *buf_count ? buf : NULL,
+ pmull_ghash_update_p64);
src += blocks * GHASH_BLOCK_SIZE;
count %= GHASH_BLOCK_SIZE;
@@ -387,8 +333,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
if (buf_count) {
memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
- ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL,
- pmull_ghash_update_p64);
+ ghash_do_simd_update(1, dg, buf, &ctx->ghash_key, NULL,
+ pmull_ghash_update_p64);
}
}
@@ -433,8 +379,8 @@ static int gcm_encrypt(struct aead_request *req)
}
kernel_neon_begin();
- pmull_gcm_encrypt(nbytes, dst, src, &ctx->ghash_key, dg,
- iv, ctx->aes_key.key_enc, nrounds,
+ pmull_gcm_encrypt(nbytes, dst, src, ctx->ghash_key.h,
+ dg, iv, ctx->aes_key.key_enc, nrounds,
tag);
kernel_neon_end();
@@ -464,7 +410,7 @@ static int gcm_encrypt(struct aead_request *req)
} while (--remaining > 0);
ghash_do_update(blocks, dg, walk.dst.virt.addr,
- &ctx->ghash_key, NULL, NULL);
+ &ctx->ghash_key, NULL);
err = skcipher_walk_done(&walk,
walk.nbytes % AES_BLOCK_SIZE);
@@ -483,7 +429,7 @@ static int gcm_encrypt(struct aead_request *req)
tag = (u8 *)&lengths;
ghash_do_update(1, dg, tag, &ctx->ghash_key,
- walk.nbytes ? buf : NULL, NULL);
+ walk.nbytes ? buf : NULL);
if (walk.nbytes)
err = skcipher_walk_done(&walk, 0);
@@ -547,8 +493,8 @@ static int gcm_decrypt(struct aead_request *req)
}
kernel_neon_begin();
- pmull_gcm_decrypt(nbytes, dst, src, &ctx->ghash_key, dg,
- iv, ctx->aes_key.key_enc, nrounds,
+ pmull_gcm_decrypt(nbytes, dst, src, ctx->ghash_key.h,
+ dg, iv, ctx->aes_key.key_enc, nrounds,
tag);
kernel_neon_end();
@@ -568,7 +514,7 @@ static int gcm_decrypt(struct aead_request *req)
u8 *dst = walk.dst.virt.addr;
ghash_do_update(blocks, dg, walk.src.virt.addr,
- &ctx->ghash_key, NULL, NULL);
+ &ctx->ghash_key, NULL);
do {
aes_encrypt(&ctx->aes_key, buf, iv);
@@ -591,7 +537,7 @@ static int gcm_decrypt(struct aead_request *req)
tag = (u8 *)&lengths;
ghash_do_update(1, dg, tag, &ctx->ghash_key,
- walk.nbytes ? buf : NULL, NULL);
+ walk.nbytes ? buf : NULL);
if (walk.nbytes) {
aes_encrypt(&ctx->aes_key, buf, iv);
@@ -635,43 +581,28 @@ static struct aead_alg gcm_aes_alg = {
.base.cra_driver_name = "gcm-aes-ce",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct gcm_aes_ctx),
+ .base.cra_ctxsize = sizeof(struct gcm_aes_ctx) +
+ 4 * sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
};
static int __init ghash_ce_mod_init(void)
{
- int ret;
-
if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
if (cpu_have_named_feature(PMULL))
- ret = crypto_register_shashes(ghash_alg,
- ARRAY_SIZE(ghash_alg));
- else
- /* only register the first array element */
- ret = crypto_register_shash(ghash_alg);
+ return crypto_register_aead(&gcm_aes_alg);
- if (ret)
- return ret;
-
- if (cpu_have_named_feature(PMULL)) {
- ret = crypto_register_aead(&gcm_aes_alg);
- if (ret)
- crypto_unregister_shashes(ghash_alg,
- ARRAY_SIZE(ghash_alg));
- }
- return ret;
+ return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_ce_mod_exit(void)
{
if (cpu_have_named_feature(PMULL))
- crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
+ crypto_unregister_aead(&gcm_aes_alg);
else
- crypto_unregister_shash(ghash_alg);
- crypto_unregister_aead(&gcm_aes_alg);
+ crypto_unregister_shash(&ghash_alg);
}
static const struct cpu_feature ghash_cpu_feature[] = {
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 286bc8ecf15b..ca2547df9652 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -156,7 +156,7 @@ static int sha256_sparc64_import(struct shash_desc *desc, const void *in)
return 0;
}
-static struct shash_alg sha256 = {
+static struct shash_alg sha256_alg = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_sparc64_init,
.update = sha256_sparc64_update,
@@ -174,7 +174,7 @@ static struct shash_alg sha256 = {
}
};
-static struct shash_alg sha224 = {
+static struct shash_alg sha224_alg = {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_sparc64_init,
.update = sha256_sparc64_update,
@@ -206,13 +206,13 @@ static bool __init sparc64_has_sha256_opcode(void)
static int __init sha256_sparc64_mod_init(void)
{
if (sparc64_has_sha256_opcode()) {
- int ret = crypto_register_shash(&sha224);
+ int ret = crypto_register_shash(&sha224_alg);
if (ret < 0)
return ret;
- ret = crypto_register_shash(&sha256);
+ ret = crypto_register_shash(&sha256_alg);
if (ret < 0) {
- crypto_unregister_shash(&sha224);
+ crypto_unregister_shash(&sha224_alg);
return ret;
}
@@ -225,8 +225,8 @@ static int __init sha256_sparc64_mod_init(void)
static void __exit sha256_sparc64_mod_fini(void)
{
- crypto_unregister_shash(&sha224);
- crypto_unregister_shash(&sha256);
+ crypto_unregister_shash(&sha224_alg);
+ crypto_unregister_shash(&sha256_alg);
}
module_init(sha256_sparc64_mod_init);
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index ec437db1fa54..3f0fc7dd87d7 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -63,7 +63,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
#define VMOVDQ vmovdqu
@@ -127,10 +126,6 @@ ddq_add_8:
/* generate a unique variable for ddq_add_x */
-.macro setddq n
- var_ddq_add = ddq_add_\n
-.endm
-
/* generate a unique variable for xmm register */
.macro setxdata n
var_xdata = %xmm\n
@@ -140,9 +135,7 @@ ddq_add_8:
.macro club name, id
.altmacro
- .if \name == DDQ_DATA
- setddq %\id
- .elseif \name == XDATA
+ .if \name == XDATA
setxdata %\id
.endif
.noaltmacro
@@ -165,9 +158,8 @@ ddq_add_8:
.set i, 1
.rept (by - 1)
- club DDQ_DATA, i
club XDATA, i
- vpaddq var_ddq_add(%rip), xcounter, var_xdata
+ vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
vptest ddq_low_msk(%rip), var_xdata
jnz 1f
vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
@@ -180,8 +172,7 @@ ddq_add_8:
vmovdqa 1*16(p_keys), xkeyA
vpxor xkey0, xdata0, xdata0
- club DDQ_DATA, by
- vpaddq var_ddq_add(%rip), xcounter, xcounter
+ vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
vptest ddq_low_msk(%rip), xcounter
jnz 1f
vpaddq ddq_high_add_1(%rip), xcounter, xcounter
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 54e7d15dbd0d..1852b19a73a0 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -26,7 +26,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
#include <asm/frame.h>
#include <asm/nospec-branch.h>
@@ -201,7 +200,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
mov \SUBKEY, %r12
movdqu (%r12), \TMP3
movdqa SHUF_MASK(%rip), \TMP2
- PSHUFB_XMM \TMP2, \TMP3
+ pshufb \TMP2, \TMP3
# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
@@ -263,10 +262,10 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv
movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm0
+ pshufb %xmm2, %xmm0
movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
- PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7
movdqu HashKey(%arg2), %xmm13
CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
@@ -347,7 +346,7 @@ _zero_cipher_left_\@:
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
movdqu %xmm0, CurCount(%arg2)
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
+ pshufb %xmm10, %xmm0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
movdqu %xmm0, PBlockEncKey(%arg2)
@@ -377,7 +376,7 @@ _large_enough_update_\@:
# get the appropriate shuffle mask
movdqu (%r12), %xmm2
# shift right 16-r13 bytes
- PSHUFB_XMM %xmm2, %xmm1
+ pshufb %xmm2, %xmm1
_data_read_\@:
lea ALL_F+16(%rip), %r12
@@ -393,12 +392,12 @@ _data_read_\@:
.ifc \operation, dec
pand %xmm1, %xmm2
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10 ,%xmm2
+ pshufb %xmm10 ,%xmm2
pxor %xmm2, %xmm8
.else
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10,%xmm0
+ pshufb %xmm10,%xmm0
pxor %xmm0, %xmm8
.endif
@@ -408,17 +407,17 @@ _data_read_\@:
# GHASH computation for the last <16 byte block
movdqa SHUF_MASK(%rip), %xmm10
# shuffle xmm0 back to output as ciphertext
- PSHUFB_XMM %xmm10, %xmm0
+ pshufb %xmm10, %xmm0
.endif
# Output %r13 bytes
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
cmp $8, %r13
jle _less_than_8_bytes_left_\@
mov %rax, (%arg3 , %r11, 1)
add $8, %r11
psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
sub $8, %r13
_less_than_8_bytes_left_\@:
mov %al, (%arg3, %r11, 1)
@@ -449,7 +448,7 @@ _partial_done\@:
movd %r12d, %xmm15 # len(A) in %xmm15
mov InLen(%arg2), %r12
shl $3, %r12 # len(C) in bits (*128)
- MOVQ_R64_XMM %r12, %xmm1
+ movq %r12, %xmm1
pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
@@ -457,7 +456,7 @@ _partial_done\@:
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
# final GHASH computation
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm8
+ pshufb %xmm10, %xmm8
movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
@@ -470,7 +469,7 @@ _return_T_\@:
cmp $8, %r11
jl _T_4_\@
_T_8_\@:
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
mov %rax, (%r10)
add $8, %r10
sub $8, %r11
@@ -518,9 +517,9 @@ _return_T_done_\@:
pshufd $78, \HK, \TMP3
pxor \GH, \TMP2 # TMP2 = a1+a0
pxor \HK, \TMP3 # TMP3 = b1+b0
- PCLMULQDQ 0x11, \HK, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \HK, \GH # GH = a0*b0
- PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
+ pclmulqdq $0x11, \HK, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \HK, \GH # GH = a0*b0
+ pclmulqdq $0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
pxor \GH, \TMP2
pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0)
movdqa \TMP2, \TMP3
@@ -570,7 +569,7 @@ _return_T_done_\@:
cmp $8, \DLEN
jl _read_lt8_\@
mov (\DPTR), %rax
- MOVQ_R64_XMM %rax, \XMMDst
+ movq %rax, \XMMDst
sub $8, \DLEN
jz _done_read_partial_block_\@
xor %eax, %eax
@@ -579,7 +578,7 @@ _read_next_byte_\@:
mov 7(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_\@
- MOVQ_R64_XMM %rax, \XMM1
+ movq %rax, \XMM1
pslldq $8, \XMM1
por \XMM1, \XMMDst
jmp _done_read_partial_block_\@
@@ -590,7 +589,7 @@ _read_next_byte_lt8_\@:
mov -1(\DPTR, \DLEN, 1), %al
dec \DLEN
jnz _read_next_byte_lt8_\@
- MOVQ_R64_XMM %rax, \XMMDst
+ movq %rax, \XMMDst
_done_read_partial_block_\@:
.endm
@@ -608,7 +607,7 @@ _done_read_partial_block_\@:
jl _get_AAD_rest\@
_get_AAD_blocks\@:
movdqu (%r10), \TMP7
- PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pshufb %xmm14, \TMP7 # byte-reflect the AAD data
pxor \TMP7, \TMP6
GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
add $16, %r10
@@ -624,7 +623,7 @@ _get_AAD_rest\@:
je _get_AAD_done\@
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
- PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pshufb %xmm14, \TMP7 # byte-reflect the AAD data
pxor \TMP6, \TMP7
GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
movdqu \TMP7, \TMP6
@@ -667,7 +666,7 @@ _data_read_\@: # Finished reading in data
# r16-r13 is the number of bytes in plaintext mod 16)
add %r13, %r12
movdqu (%r12), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes
+ pshufb %xmm2, %xmm9 # shift right r13 bytes
.ifc \operation, dec
movdqa %xmm1, %xmm3
@@ -689,8 +688,8 @@ _no_extra_mask_1_\@:
pand %xmm1, %xmm3
movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm3
- PSHUFB_XMM %xmm2, %xmm3
+ pshufb %xmm10, %xmm3
+ pshufb %xmm2, %xmm3
pxor %xmm3, \AAD_HASH
cmp $0, %r10
@@ -724,8 +723,8 @@ _no_extra_mask_2_\@:
pand %xmm1, %xmm9
movdqa SHUF_MASK(%rip), %xmm1
- PSHUFB_XMM %xmm1, %xmm9
- PSHUFB_XMM %xmm2, %xmm9
+ pshufb %xmm1, %xmm9
+ pshufb %xmm2, %xmm9
pxor %xmm9, \AAD_HASH
cmp $0, %r10
@@ -744,8 +743,8 @@ _encode_done_\@:
movdqa SHUF_MASK(%rip), %xmm10
# shuffle xmm9 back to output as ciphertext
- PSHUFB_XMM %xmm10, %xmm9
- PSHUFB_XMM %xmm2, %xmm9
+ pshufb %xmm10, %xmm9
+ pshufb %xmm2, %xmm9
.endif
# output encrypted Bytes
cmp $0, %r10
@@ -759,14 +758,14 @@ _partial_fill_\@:
mov \PLAIN_CYPH_LEN, %r13
_count_set_\@:
movdqa %xmm9, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
cmp $8, %r13
jle _less_than_8_bytes_left_\@
mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
add $8, \DATA_OFFSET
psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
+ movq %xmm0, %rax
sub $8, %r13
_less_than_8_bytes_left_\@:
movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
@@ -810,7 +809,7 @@ _partial_block_done_\@:
.else
MOVADQ \XMM0, %xmm\index
.endif
- PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
+ pshufb %xmm14, %xmm\index # perform a 16 byte swap
pxor \TMP2, %xmm\index
.endr
lea 0x10(%arg1),%r10
@@ -821,7 +820,7 @@ _partial_block_done_\@:
aes_loop_initial_\@:
MOVADQ (%r10),\TMP1
.irpc index, \i_seq
- AESENC \TMP1, %xmm\index
+ aesenc \TMP1, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -829,7 +828,7 @@ aes_loop_initial_\@:
MOVADQ (%r10), \TMP1
.irpc index, \i_seq
- AESENCLAST \TMP1, %xmm\index # Last Round
+ aesenclast \TMP1, %xmm\index # Last Round
.endr
.irpc index, \i_seq
movdqu (%arg4 , %r11, 1), \TMP1
@@ -841,7 +840,7 @@ aes_loop_initial_\@:
.ifc \operation, dec
movdqa \TMP1, %xmm\index
.endif
- PSHUFB_XMM %xmm14, %xmm\index
+ pshufb %xmm14, %xmm\index
# prepare plaintext/ciphertext for GHASH computation
.endr
@@ -876,19 +875,19 @@ aes_loop_initial_\@:
MOVADQ ONE(%RIP),\TMP1
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM1
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+ pshufb %xmm14, \XMM1 # perform a 16 byte swap
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM2
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+ pshufb %xmm14, \XMM2 # perform a 16 byte swap
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM3
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+ pshufb %xmm14, \XMM3 # perform a 16 byte swap
paddd \TMP1, \XMM0 # INCR Y0
MOVADQ \XMM0, \XMM4
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+ pshufb %xmm14, \XMM4 # perform a 16 byte swap
MOVADQ 0(%arg1),\TMP1
pxor \TMP1, \XMM1
@@ -897,17 +896,17 @@ aes_loop_initial_\@:
pxor \TMP1, \XMM4
.irpc index, 1234 # do 4 rounds
movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
.endr
.irpc index, 56789 # do next 5 rounds
movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
.endr
lea 0xa0(%arg1),%r10
mov keysize,%eax
@@ -918,7 +917,7 @@ aes_loop_initial_\@:
aes_loop_pre_\@:
MOVADQ (%r10),\TMP2
.irpc index, 1234
- AESENC \TMP2, %xmm\index
+ aesenc \TMP2, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -926,10 +925,10 @@ aes_loop_pre_\@:
aes_loop_pre_done\@:
MOVADQ (%r10), \TMP2
- AESENCLAST \TMP2, \XMM1
- AESENCLAST \TMP2, \XMM2
- AESENCLAST \TMP2, \XMM3
- AESENCLAST \TMP2, \XMM4
+ aesenclast \TMP2, \XMM1
+ aesenclast \TMP2, \XMM2
+ aesenclast \TMP2, \XMM3
+ aesenclast \TMP2, \XMM4
movdqu 16*0(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM1
.ifc \operation, dec
@@ -961,12 +960,12 @@ aes_loop_pre_done\@:
.endif
add $64, %r11
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+ pshufb %xmm14, \XMM1 # perform a 16 byte swap
pxor \XMMDst, \XMM1
# combine GHASHed value with the corresponding ciphertext
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+ pshufb %xmm14, \XMM2 # perform a 16 byte swap
+ pshufb %xmm14, \XMM3 # perform a 16 byte swap
+ pshufb %xmm14, \XMM4 # perform a 16 byte swap
_initial_blocks_done\@:
@@ -978,7 +977,7 @@ _initial_blocks_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
-.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
+.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5
@@ -994,7 +993,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
movdqu HashKey_4(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM2
@@ -1002,51 +1001,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM0, \XMM3
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM4
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor (%arg1), \XMM1
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
movdqu HashKey_4_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movaps 0x20(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 2
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 2
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
movdqu HashKey_3(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 3
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 3
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0
movaps 0x40(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 4
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 4
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_3_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 5
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 5
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM6, \XMM5
@@ -1058,25 +1057,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
# Multiply TMP5 * HashKey using karatsuba
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x60(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 6
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 6
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0
movaps 0x70(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 7
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 7
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_2_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 8
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 8
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM7, \XMM5
@@ -1089,13 +1088,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
movdqu HashKey(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 9
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 9
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0
lea 0xa0(%arg1),%r10
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
@@ -1105,7 +1104,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
aes_loop_par_enc\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
- AESENC \TMP3, %xmm\index
+ aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -1113,12 +1112,12 @@ aes_loop_par_enc\@:
aes_loop_par_enc_done\@:
MOVADQ (%r10), \TMP3
- AESENCLAST \TMP3, \XMM1 # Round 10
- AESENCLAST \TMP3, \XMM2
- AESENCLAST \TMP3, \XMM3
- AESENCLAST \TMP3, \XMM4
+ aesenclast \TMP3, \XMM1 # Round 10
+ aesenclast \TMP3, \XMM2
+ aesenclast \TMP3, \XMM3
+ aesenclast \TMP3, \XMM4
movdqu HashKey_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
movdqu 16(%arg4,%r11,1), \TMP3
@@ -1131,10 +1130,10 @@ aes_loop_par_enc_done\@:
movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer
movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer
movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor \TMP4, \TMP1
pxor \XMM8, \XMM5
@@ -1186,7 +1185,7 @@ aes_loop_par_enc_done\@:
* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
-.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
+.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM1, \XMM5
@@ -1202,7 +1201,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
movdqu HashKey_4(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM2
@@ -1210,51 +1209,51 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM0, \XMM3
paddd ONE(%rip), \XMM0 # INCR CNT
movdqa \XMM0, \XMM4
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PCLMULQDQ 0x00, \TMP5, \XMM5 # XMM5 = a0*b0
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pclmulqdq $0x00, \TMP5, \XMM5 # XMM5 = a0*b0
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor (%arg1), \XMM1
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
movdqu HashKey_4_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 1
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movaps 0x20(%arg1), \TMP1
- AESENC \TMP1, \XMM1 # Round 2
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
+ aesenc \TMP1, \XMM1 # Round 2
+ aesenc \TMP1, \XMM2
+ aesenc \TMP1, \XMM3
+ aesenc \TMP1, \XMM4
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
movdqu HashKey_3(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 3
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM6 # XMM6 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 3
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM6 # XMM6 = a0*b0
movaps 0x40(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 4
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 4
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_3_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 5
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 5
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM6, \XMM5
@@ -1266,25 +1265,25 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
# Multiply TMP5 * HashKey using karatsuba
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x60(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 6
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM7 # XMM7 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 6
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM7 # XMM7 = a0*b0
movaps 0x70(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 7
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 7
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
movdqu HashKey_2_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 8
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
+ aesenc \TMP3, \XMM1 # Round 8
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
pxor \TMP1, \TMP4
# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
pxor \XMM7, \XMM5
@@ -1297,13 +1296,13 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
movdqu HashKey(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
- AESENC \TMP3, \XMM1 # Round 9
- AESENC \TMP3, \XMM2
- AESENC \TMP3, \XMM3
- AESENC \TMP3, \XMM4
- PCLMULQDQ 0x00, \TMP5, \XMM8 # XMM8 = a0*b0
+ aesenc \TMP3, \XMM1 # Round 9
+ aesenc \TMP3, \XMM2
+ aesenc \TMP3, \XMM3
+ aesenc \TMP3, \XMM4
+ pclmulqdq $0x00, \TMP5, \XMM8 # XMM8 = a0*b0
lea 0xa0(%arg1),%r10
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
@@ -1313,7 +1312,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
aes_loop_par_dec\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
- AESENC \TMP3, %xmm\index
+ aesenc \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
@@ -1321,12 +1320,12 @@ aes_loop_par_dec\@:
aes_loop_par_dec_done\@:
MOVADQ (%r10), \TMP3
- AESENCLAST \TMP3, \XMM1 # last round
- AESENCLAST \TMP3, \XMM2
- AESENCLAST \TMP3, \XMM3
- AESENCLAST \TMP3, \XMM4
+ aesenclast \TMP3, \XMM1 # last round
+ aesenclast \TMP3, \XMM2
+ aesenclast \TMP3, \XMM3
+ aesenclast \TMP3, \XMM4
movdqu HashKey_k(%arg2), \TMP5
- PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer
@@ -1343,10 +1342,10 @@ aes_loop_par_dec_done\@:
pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM4
- PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm15, \XMM4 # perform a 16 byte swap
+ pshufb %xmm15, \XMM1 # perform a 16 byte swap
+ pshufb %xmm15, \XMM2 # perform a 16 byte swap
+ pshufb %xmm15, \XMM3 # perform a 16 byte swap
+ pshufb %xmm15, \XMM4 # perform a 16 byte swap
pxor \TMP4, \TMP1
pxor \XMM8, \XMM5
@@ -1402,10 +1401,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM1, \TMP2
pxor \XMM1, \TMP2
movdqu HashKey_4(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP6 # TMP6 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM1 # XMM1 = a0*b0
movdqu HashKey_4_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqa \XMM1, \XMMDst
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1415,10 +1414,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM2, \TMP2
pxor \XMM2, \TMP2
movdqu HashKey_3(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM2 # XMM2 = a0*b0
movdqu HashKey_3_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM2, \XMMDst
pxor \TMP2, \XMM1
@@ -1430,10 +1429,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM3, \TMP2
pxor \XMM3, \TMP2
movdqu HashKey_2(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM3 # XMM3 = a0*b0
movdqu HashKey_2_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM3, \XMMDst
pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1
@@ -1443,10 +1442,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
pshufd $78, \XMM4, \TMP2
pxor \XMM4, \TMP2
movdqu HashKey(%arg2), \TMP5
- PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
- PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
+ pclmulqdq $0x11, \TMP5, \TMP1 # TMP1 = a1*b1
+ pclmulqdq $0x00, \TMP5, \XMM4 # XMM4 = a0*b0
movdqu HashKey_k(%arg2), \TMP4
- PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
+ pclmulqdq $0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM4, \XMMDst
pxor \XMM1, \TMP2
@@ -1504,13 +1503,13 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
_esb_loop_\@:
MOVADQ (%r10),\TMP1
- AESENC \TMP1,\XMM0
+ aesenc \TMP1,\XMM0
add $16,%r10
sub $1,%eax
jnz _esb_loop_\@
MOVADQ (%r10),\TMP1
- AESENCLAST \TMP1,\XMM0
+ aesenclast \TMP1,\XMM0
.endm
/*****************************************************************************
* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
@@ -1849,72 +1848,72 @@ SYM_FUNC_START(aesni_set_key)
movups 0x10(UKEYP), %xmm2 # other user key
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
- AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+ aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
call _key_expansion_256a
- AESKEYGENASSIST 0x1 %xmm0 %xmm1
+ aeskeygenassist $0x1, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+ aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
call _key_expansion_256a
- AESKEYGENASSIST 0x2 %xmm0 %xmm1
+ aeskeygenassist $0x2, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+ aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
call _key_expansion_256a
- AESKEYGENASSIST 0x4 %xmm0 %xmm1
+ aeskeygenassist $0x4, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+ aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
call _key_expansion_256a
- AESKEYGENASSIST 0x8 %xmm0 %xmm1
+ aeskeygenassist $0x8, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+ aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
call _key_expansion_256a
- AESKEYGENASSIST 0x10 %xmm0 %xmm1
+ aeskeygenassist $0x10, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+ aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
call _key_expansion_256a
- AESKEYGENASSIST 0x20 %xmm0 %xmm1
+ aeskeygenassist $0x20, %xmm0, %xmm1
call _key_expansion_256b
- AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+ aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
call _key_expansion_256a
jmp .Ldec_key
.Lenc_key192:
movq 0x10(UKEYP), %xmm2 # other user key
- AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+ aeskeygenassist $0x1, %xmm2, %xmm1 # round 1
call _key_expansion_192a
- AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+ aeskeygenassist $0x2, %xmm2, %xmm1 # round 2
call _key_expansion_192b
- AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+ aeskeygenassist $0x4, %xmm2, %xmm1 # round 3
call _key_expansion_192a
- AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+ aeskeygenassist $0x8, %xmm2, %xmm1 # round 4
call _key_expansion_192b
- AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+ aeskeygenassist $0x10, %xmm2, %xmm1 # round 5
call _key_expansion_192a
- AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+ aeskeygenassist $0x20, %xmm2, %xmm1 # round 6
call _key_expansion_192b
- AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+ aeskeygenassist $0x40, %xmm2, %xmm1 # round 7
call _key_expansion_192a
- AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8
+ aeskeygenassist $0x80, %xmm2, %xmm1 # round 8
call _key_expansion_192b
jmp .Ldec_key
.Lenc_key128:
- AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1
+ aeskeygenassist $0x1, %xmm0, %xmm1 # round 1
call _key_expansion_128
- AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2
+ aeskeygenassist $0x2, %xmm0, %xmm1 # round 2
call _key_expansion_128
- AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3
+ aeskeygenassist $0x4, %xmm0, %xmm1 # round 3
call _key_expansion_128
- AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4
+ aeskeygenassist $0x8, %xmm0, %xmm1 # round 4
call _key_expansion_128
- AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5
+ aeskeygenassist $0x10, %xmm0, %xmm1 # round 5
call _key_expansion_128
- AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6
+ aeskeygenassist $0x20, %xmm0, %xmm1 # round 6
call _key_expansion_128
- AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7
+ aeskeygenassist $0x40, %xmm0, %xmm1 # round 7
call _key_expansion_128
- AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8
+ aeskeygenassist $0x80, %xmm0, %xmm1 # round 8
call _key_expansion_128
- AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9
+ aeskeygenassist $0x1b, %xmm0, %xmm1 # round 9
call _key_expansion_128
- AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10
+ aeskeygenassist $0x36, %xmm0, %xmm1 # round 10
call _key_expansion_128
.Ldec_key:
sub $0x10, TKEYP
@@ -1927,7 +1926,7 @@ SYM_FUNC_START(aesni_set_key)
.align 4
.Ldec_key_loop:
movaps (KEYP), %xmm0
- AESIMC %xmm0 %xmm1
+ aesimc %xmm0, %xmm1
movaps %xmm1, (UKEYP)
add $0x10, KEYP
sub $0x10, UKEYP
@@ -1988,37 +1987,37 @@ SYM_FUNC_START_LOCAL(_aesni_enc1)
je .Lenc192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps -0x50(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
.align 4
.Lenc192:
movaps -0x40(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps -0x30(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
.align 4
.Lenc128:
movaps -0x20(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps -0x10(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps (TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x10(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x20(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x30(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x40(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x50(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x60(TKEYP), KEY
- AESENC KEY STATE
+ aesenc KEY, STATE
movaps 0x70(TKEYP), KEY
- AESENCLAST KEY STATE
+ aesenclast KEY, STATE
ret
SYM_FUNC_END(_aesni_enc1)
@@ -2054,79 +2053,79 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
je .L4enc192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps -0x50(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
#.align 4
.L4enc192:
movaps -0x40(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps -0x30(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
#.align 4
.L4enc128:
movaps -0x20(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps -0x10(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps (TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x10(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x20(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x30(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x40(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x50(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x60(TKEYP), KEY
- AESENC KEY STATE1
- AESENC KEY STATE2
- AESENC KEY STATE3
- AESENC KEY STATE4
+ aesenc KEY, STATE1
+ aesenc KEY, STATE2
+ aesenc KEY, STATE3
+ aesenc KEY, STATE4
movaps 0x70(TKEYP), KEY
- AESENCLAST KEY STATE1 # last round
- AESENCLAST KEY STATE2
- AESENCLAST KEY STATE3
- AESENCLAST KEY STATE4
+ aesenclast KEY, STATE1 # last round
+ aesenclast KEY, STATE2
+ aesenclast KEY, STATE3
+ aesenclast KEY, STATE4
ret
SYM_FUNC_END(_aesni_enc4)
@@ -2178,37 +2177,37 @@ SYM_FUNC_START_LOCAL(_aesni_dec1)
je .Ldec192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps -0x50(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
.align 4
.Ldec192:
movaps -0x40(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps -0x30(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
.align 4
.Ldec128:
movaps -0x20(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps -0x10(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps (TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x10(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x20(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x30(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x40(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x50(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x60(TKEYP), KEY
- AESDEC KEY STATE
+ aesdec KEY, STATE
movaps 0x70(TKEYP), KEY
- AESDECLAST KEY STATE
+ aesdeclast KEY, STATE
ret
SYM_FUNC_END(_aesni_dec1)
@@ -2244,79 +2243,79 @@ SYM_FUNC_START_LOCAL(_aesni_dec4)
je .L4dec192
add $0x20, TKEYP
movaps -0x60(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps -0x50(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
.align 4
.L4dec192:
movaps -0x40(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps -0x30(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
.align 4
.L4dec128:
movaps -0x20(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps -0x10(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps (TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x10(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x20(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x30(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x40(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x50(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x60(TKEYP), KEY
- AESDEC KEY STATE1
- AESDEC KEY STATE2
- AESDEC KEY STATE3
- AESDEC KEY STATE4
+ aesdec KEY, STATE1
+ aesdec KEY, STATE2
+ aesdec KEY, STATE3
+ aesdec KEY, STATE4
movaps 0x70(TKEYP), KEY
- AESDECLAST KEY STATE1 # last round
- AESDECLAST KEY STATE2
- AESDECLAST KEY STATE3
- AESDECLAST KEY STATE4
+ aesdeclast KEY, STATE1 # last round
+ aesdeclast KEY, STATE2
+ aesdeclast KEY, STATE3
+ aesdeclast KEY, STATE4
ret
SYM_FUNC_END(_aesni_dec4)
@@ -2599,10 +2598,10 @@ SYM_FUNC_END(aesni_cbc_dec)
SYM_FUNC_START_LOCAL(_aesni_inc_init)
movaps .Lbswap_mask, BSWAP_MASK
movaps IV, CTR
- PSHUFB_XMM BSWAP_MASK CTR
+ pshufb BSWAP_MASK, CTR
mov $1, TCTR_LOW
- MOVQ_R64_XMM TCTR_LOW INC
- MOVQ_R64_XMM CTR TCTR_LOW
+ movq TCTR_LOW, INC
+ movq CTR, TCTR_LOW
ret
SYM_FUNC_END(_aesni_inc_init)
@@ -2630,7 +2629,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc)
psrldq $8, INC
.Linc_low:
movaps CTR, IV
- PSHUFB_XMM BSWAP_MASK IV
+ pshufb BSWAP_MASK, IV
ret
SYM_FUNC_END(_aesni_inc)
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 0cea33295287..5fee47956f3b 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -120,7 +120,6 @@
##
#include <linux/linkage.h>
-#include <asm/inst.h>
# constants in mergeable sections, linker can reorder and merge
.section .rodata.cst16.POLY, "aM", @progbits, 16
diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S
index a38ab2512a6f..ca1788bfee16 100644
--- a/arch/x86/crypto/chacha-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
@@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
FRAME_BEGIN
# x0..3 = s0..3
- movdqa 0x00(%rdi),%xmm0
- movdqa 0x10(%rdi),%xmm1
- movdqa 0x20(%rdi),%xmm2
- movdqa 0x30(%rdi),%xmm3
+ movdqu 0x00(%rdi),%xmm0
+ movdqu 0x10(%rdi),%xmm1
+ movdqu 0x20(%rdi),%xmm2
+ movdqu 0x30(%rdi),%xmm3
movdqa %xmm0,%xmm8
movdqa %xmm1,%xmm9
movdqa %xmm2,%xmm10
@@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3)
# %edx: nrounds
FRAME_BEGIN
- movdqa 0x00(%rdi),%xmm0
- movdqa 0x10(%rdi),%xmm1
- movdqa 0x20(%rdi),%xmm2
- movdqa 0x30(%rdi),%xmm3
+ movdqu 0x00(%rdi),%xmm0
+ movdqu 0x10(%rdi),%xmm1
+ movdqu 0x20(%rdi),%xmm2
+ movdqu 0x30(%rdi),%xmm3
mov %edx,%r8d
call chacha_permute
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 22250091cdbe..e67a59130025 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -14,8 +14,6 @@
#include <linux/module.h>
#include <asm/simd.h>
-#define CHACHA_STATE_ALIGN 16
-
asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
@@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
hchacha_block_generic(state, stream, nrounds);
} else {
@@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch);
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
{
- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
chacha_init_generic(state, key, iv);
}
EXPORT_SYMBOL(chacha_init_arch);
@@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch);
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
int nrounds)
{
- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
bytes <= CHACHA_BLOCK_SIZE)
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
@@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
static int chacha_simd_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
- u32 *state, state_buf[16 + 2] __aligned(8);
+ u32 state[CHACHA_STATE_WORDS] __aligned(8);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
- BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
- state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
-
chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
@@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *state, state_buf[16 + 2] __aligned(8);
+ u32 state[CHACHA_STATE_WORDS] __aligned(8);
struct chacha_ctx subctx;
u8 real_iv[16];
- BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
- state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
chacha_init_generic(state, ctx->key, req->iv);
if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index 9fd28ff65bc2..6e7d4c4d3208 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -38,7 +38,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
.section .rodata
@@ -129,17 +128,17 @@ loop_64:/* 64 bytes Full cache line folding */
#ifdef __x86_64__
movdqa %xmm4, %xmm8
#endif
- PCLMULQDQ 00, CONSTANT, %xmm1
- PCLMULQDQ 00, CONSTANT, %xmm2
- PCLMULQDQ 00, CONSTANT, %xmm3
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x00, CONSTANT, %xmm2
+ pclmulqdq $0x00, CONSTANT, %xmm3
#ifdef __x86_64__
- PCLMULQDQ 00, CONSTANT, %xmm4
+ pclmulqdq $0x00, CONSTANT, %xmm4
#endif
- PCLMULQDQ 0x11, CONSTANT, %xmm5
- PCLMULQDQ 0x11, CONSTANT, %xmm6
- PCLMULQDQ 0x11, CONSTANT, %xmm7
+ pclmulqdq $0x11, CONSTANT, %xmm5
+ pclmulqdq $0x11, CONSTANT, %xmm6
+ pclmulqdq $0x11, CONSTANT, %xmm7
#ifdef __x86_64__
- PCLMULQDQ 0x11, CONSTANT, %xmm8
+ pclmulqdq $0x11, CONSTANT, %xmm8
#endif
pxor %xmm5, %xmm1
pxor %xmm6, %xmm2
@@ -149,8 +148,8 @@ loop_64:/* 64 bytes Full cache line folding */
#else
/* xmm8 unsupported for x32 */
movdqa %xmm4, %xmm5
- PCLMULQDQ 00, CONSTANT, %xmm4
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm4
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm4
#endif
@@ -172,20 +171,20 @@ less_64:/* Folding cache line into 128bit */
prefetchnta (BUF)
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor %xmm2, %xmm1
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor %xmm3, %xmm1
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor %xmm4, %xmm1
@@ -193,8 +192,8 @@ less_64:/* Folding cache line into 128bit */
jb fold_64
loop_16:/* Folding rest buffer into 128bit */
movdqa %xmm1, %xmm5
- PCLMULQDQ 0x00, CONSTANT, %xmm1
- PCLMULQDQ 0x11, CONSTANT, %xmm5
+ pclmulqdq $0x00, CONSTANT, %xmm1
+ pclmulqdq $0x11, CONSTANT, %xmm5
pxor %xmm5, %xmm1
pxor (BUF), %xmm1
sub $0x10, LEN
@@ -205,7 +204,7 @@ loop_16:/* Folding rest buffer into 128bit */
fold_64:
/* perform the last 64 bit fold, also adds 32 zeroes
* to the input stream */
- PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
+ pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
psrldq $0x08, %xmm1
pxor CONSTANT, %xmm1
@@ -220,7 +219,7 @@ fold_64:
#endif
psrldq $0x04, %xmm2
pand %xmm3, %xmm1
- PCLMULQDQ 0x00, CONSTANT, %xmm1
+ pclmulqdq $0x00, CONSTANT, %xmm1
pxor %xmm2, %xmm1
/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
@@ -231,11 +230,11 @@ fold_64:
#endif
movdqa %xmm1, %xmm2
pand %xmm3, %xmm1
- PCLMULQDQ 0x10, CONSTANT, %xmm1
+ pclmulqdq $0x10, CONSTANT, %xmm1
pand %xmm3, %xmm1
- PCLMULQDQ 0x00, CONSTANT, %xmm1
+ pclmulqdq $0x00, CONSTANT, %xmm1
pxor %xmm2, %xmm1
- PEXTRD 0x01, %xmm1, %eax
+ pextrd $0x01, %xmm1, %eax
ret
SYM_FUNC_END(crc32_pclmul_le_16)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 8501ec4532f4..884dc767b051 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -43,7 +43,6 @@
* SOFTWARE.
*/
-#include <asm/inst.h>
#include <linux/linkage.h>
#include <asm/nospec-branch.h>
@@ -170,7 +169,7 @@ continue_block:
## branch into array
lea jump_table(%rip), %bufp
- movzxw (%bufp, %rax, 2), len
+ movzwq (%bufp, %rax, 2), len
lea crc_array(%rip), %bufp
lea (%bufp, len, 1), %bufp
JMP_NOSPEC bufp
@@ -225,10 +224,10 @@ LABEL crc_ %i
subq %rax, tmp # tmp -= rax*24
movq crc_init, %xmm1 # CRC for block 1
- PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2
+ pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2
movq crc1, %xmm2 # CRC for block 2
- PCLMULQDQ 0x10, %xmm0, %xmm2 # Multiply by K1
+ pclmulqdq $0x10, %xmm0, %xmm2 # Multiply by K1
pxor %xmm2,%xmm1
movq %xmm1, %rax
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index 8a17621f7d3a..8acbb6584a37 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -948,10 +948,8 @@ static void store_felem(u64 *b, u64 *f)
{
u64 f30 = f[3U];
u64 top_bit0 = f30 >> (u32)63U;
- u64 carry0;
u64 f31;
u64 top_bit;
- u64 carry;
u64 f0;
u64 f1;
u64 f2;
@@ -970,11 +968,11 @@ static void store_felem(u64 *b, u64 *f)
u64 o2;
u64 o3;
f[3U] = f30 & (u64)0x7fffffffffffffffU;
- carry0 = add_scalar(f, f, (u64)19U * top_bit0);
+ add_scalar(f, f, (u64)19U * top_bit0);
f31 = f[3U];
top_bit = f31 >> (u32)63U;
f[3U] = f31 & (u64)0x7fffffffffffffffU;
- carry = add_scalar(f, f, (u64)19U * top_bit);
+ add_scalar(f, f, (u64)19U * top_bit);
f0 = f[0U];
f1 = f[1U];
f2 = f[2U];
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index bb9735fbb865..99ac25e18e09 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -14,7 +14,6 @@
*/
#include <linux/linkage.h>
-#include <asm/inst.h>
#include <asm/frame.h>
.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
@@ -51,9 +50,9 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble)
pxor DATA, T2
pxor SHASH, T3
- PCLMULQDQ 0x00 SHASH DATA # DATA = a0 * b0
- PCLMULQDQ 0x11 SHASH T1 # T1 = a1 * b1
- PCLMULQDQ 0x00 T3 T2 # T2 = (a1 + a0) * (b1 + b0)
+ pclmulqdq $0x00, SHASH, DATA # DATA = a0 * b0
+ pclmulqdq $0x11, SHASH, T1 # T1 = a1 * b1
+ pclmulqdq $0x00, T3, T2 # T2 = (a1 + a0) * (b1 + b0)
pxor DATA, T2
pxor T1, T2 # T2 = a0 * b1 + a1 * b0
@@ -95,9 +94,9 @@ SYM_FUNC_START(clmul_ghash_mul)
movups (%rdi), DATA
movups (%rsi), SHASH
movaps .Lbswap_mask, BSWAP
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
call __clmul_gf128mul_ble
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
movups DATA, (%rdi)
FRAME_END
ret
@@ -114,18 +113,18 @@ SYM_FUNC_START(clmul_ghash_update)
movaps .Lbswap_mask, BSWAP
movups (%rdi), DATA
movups (%rcx), SHASH
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
.align 4
.Lupdate_loop:
movups (%rsi), IN1
- PSHUFB_XMM BSWAP IN1
+ pshufb BSWAP, IN1
pxor IN1, DATA
call __clmul_gf128mul_ble
sub $16, %rdx
add $16, %rsi
cmp $16, %rdx
jge .Lupdate_loop
- PSHUFB_XMM BSWAP DATA
+ pshufb BSWAP, DATA
movups DATA, (%rdi)
.Lupdate_just_ret:
FRAME_END
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index f5a796da07f8..438ccd4f3cc4 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -12,7 +12,6 @@
#define REG_TYPE_R32 0
#define REG_TYPE_R64 1
-#define REG_TYPE_XMM 2
#define REG_TYPE_INVALID 100
.macro R32_NUM opd r32
@@ -123,77 +122,18 @@
#endif
.endm
- .macro XMM_NUM opd xmm
- \opd = REG_NUM_INVALID
- .ifc \xmm,%xmm0
- \opd = 0
- .endif
- .ifc \xmm,%xmm1
- \opd = 1
- .endif
- .ifc \xmm,%xmm2
- \opd = 2
- .endif
- .ifc \xmm,%xmm3
- \opd = 3
- .endif
- .ifc \xmm,%xmm4
- \opd = 4
- .endif
- .ifc \xmm,%xmm5
- \opd = 5
- .endif
- .ifc \xmm,%xmm6
- \opd = 6
- .endif
- .ifc \xmm,%xmm7
- \opd = 7
- .endif
- .ifc \xmm,%xmm8
- \opd = 8
- .endif
- .ifc \xmm,%xmm9
- \opd = 9
- .endif
- .ifc \xmm,%xmm10
- \opd = 10
- .endif
- .ifc \xmm,%xmm11
- \opd = 11
- .endif
- .ifc \xmm,%xmm12
- \opd = 12
- .endif
- .ifc \xmm,%xmm13
- \opd = 13
- .endif
- .ifc \xmm,%xmm14
- \opd = 14
- .endif
- .ifc \xmm,%xmm15
- \opd = 15
- .endif
- .endm
-
.macro REG_TYPE type reg
R32_NUM reg_type_r32 \reg
R64_NUM reg_type_r64 \reg
- XMM_NUM reg_type_xmm \reg
.if reg_type_r64 <> REG_NUM_INVALID
\type = REG_TYPE_R64
.elseif reg_type_r32 <> REG_NUM_INVALID
\type = REG_TYPE_R32
- .elseif reg_type_xmm <> REG_NUM_INVALID
- \type = REG_TYPE_XMM
.else
\type = REG_TYPE_INVALID
.endif
.endm
- .macro PFX_OPD_SIZE
- .byte 0x66
- .endm
-
.macro PFX_REX opd1 opd2 W=0
.if ((\opd1 | \opd2) & 8) || \W
.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
@@ -203,109 +143,6 @@
.macro MODRM mod opd1 opd2
.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
.endm
-
- .macro PSHUFB_XMM xmm1 xmm2
- XMM_NUM pshufb_opd1 \xmm1
- XMM_NUM pshufb_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX pshufb_opd1 pshufb_opd2
- .byte 0x0f, 0x38, 0x00
- MODRM 0xc0 pshufb_opd1 pshufb_opd2
- .endm
-
- .macro PCLMULQDQ imm8 xmm1 xmm2
- XMM_NUM clmul_opd1 \xmm1
- XMM_NUM clmul_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX clmul_opd1 clmul_opd2
- .byte 0x0f, 0x3a, 0x44
- MODRM 0xc0 clmul_opd1 clmul_opd2
- .byte \imm8
- .endm
-
- .macro PEXTRD imm8 xmm gpr
- R32_NUM extrd_opd1 \gpr
- XMM_NUM extrd_opd2 \xmm
- PFX_OPD_SIZE
- PFX_REX extrd_opd1 extrd_opd2
- .byte 0x0f, 0x3a, 0x16
- MODRM 0xc0 extrd_opd1 extrd_opd2
- .byte \imm8
- .endm
-
- .macro AESKEYGENASSIST rcon xmm1 xmm2
- XMM_NUM aeskeygen_opd1 \xmm1
- XMM_NUM aeskeygen_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aeskeygen_opd1 aeskeygen_opd2
- .byte 0x0f, 0x3a, 0xdf
- MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2
- .byte \rcon
- .endm
-
- .macro AESIMC xmm1 xmm2
- XMM_NUM aesimc_opd1 \xmm1
- XMM_NUM aesimc_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesimc_opd1 aesimc_opd2
- .byte 0x0f, 0x38, 0xdb
- MODRM 0xc0 aesimc_opd1 aesimc_opd2
- .endm
-
- .macro AESENC xmm1 xmm2
- XMM_NUM aesenc_opd1 \xmm1
- XMM_NUM aesenc_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesenc_opd1 aesenc_opd2
- .byte 0x0f, 0x38, 0xdc
- MODRM 0xc0 aesenc_opd1 aesenc_opd2
- .endm
-
- .macro AESENCLAST xmm1 xmm2
- XMM_NUM aesenclast_opd1 \xmm1
- XMM_NUM aesenclast_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesenclast_opd1 aesenclast_opd2
- .byte 0x0f, 0x38, 0xdd
- MODRM 0xc0 aesenclast_opd1 aesenclast_opd2
- .endm
-
- .macro AESDEC xmm1 xmm2
- XMM_NUM aesdec_opd1 \xmm1
- XMM_NUM aesdec_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesdec_opd1 aesdec_opd2
- .byte 0x0f, 0x38, 0xde
- MODRM 0xc0 aesdec_opd1 aesdec_opd2
- .endm
-
- .macro AESDECLAST xmm1 xmm2
- XMM_NUM aesdeclast_opd1 \xmm1
- XMM_NUM aesdeclast_opd2 \xmm2
- PFX_OPD_SIZE
- PFX_REX aesdeclast_opd1 aesdeclast_opd2
- .byte 0x0f, 0x38, 0xdf
- MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
- .endm
-
- .macro MOVQ_R64_XMM opd1 opd2
- REG_TYPE movq_r64_xmm_opd1_type \opd1
- .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
- XMM_NUM movq_r64_xmm_opd1 \opd1
- R64_NUM movq_r64_xmm_opd2 \opd2
- .else
- R64_NUM movq_r64_xmm_opd1 \opd1
- XMM_NUM movq_r64_xmm_opd2 \opd2
- .endif
- PFX_OPD_SIZE
- PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
- .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
- .byte 0x0f, 0x7e
- .else
- .byte 0x0f, 0x6e
- .endif
- MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
- .endm
#endif
#endif
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 091c0a0bbf26..1b57419fa2e7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -548,7 +548,7 @@ config CRYPTO_XCBC
select CRYPTO_MANAGER
help
XCBC: Keyed-Hashing with encryption algorithm
- http://www.ietf.org/rfc/rfc3566.txt
+ https://www.ietf.org/rfc/rfc3566.txt
http://csrc.nist.gov/encryption/modes/proposedmodes/
xcbc-mac/xcbc-mac-spec.pdf
@@ -561,7 +561,7 @@ config CRYPTO_VMAC
very high speed on 64-bit architectures.
See also:
- <http://fastcrypto.org/vmac>
+ <https://fastcrypto.org/vmac>
comment "Digest"
@@ -816,7 +816,7 @@ config CRYPTO_RMD128
RIPEMD-160 should be used.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD160
tristate "RIPEMD-160 digest algorithm"
@@ -833,7 +833,7 @@ config CRYPTO_RMD160
against RIPEMD-160.
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD256
tristate "RIPEMD-256 digest algorithm"
@@ -845,7 +845,7 @@ config CRYPTO_RMD256
(than RIPEMD-128).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_RMD320
tristate "RIPEMD-320 digest algorithm"
@@ -857,7 +857,7 @@ config CRYPTO_RMD320
(than RIPEMD-160).
Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
- See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
+ See <https://homes.esat.kuleuven.be/~bosselae/ripemd160.html>
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
@@ -1045,7 +1045,7 @@ config CRYPTO_TGR192
Tiger was developed by Ross Anderson and Eli Biham.
See also:
- <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
+ <https://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
config CRYPTO_WP512
tristate "Whirlpool digest algorithms"
@@ -1221,7 +1221,7 @@ config CRYPTO_BLOWFISH
designed for use on "large microprocessors".
See also:
- <http://www.schneier.com/blowfish.html>
+ <https://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_COMMON
tristate
@@ -1230,7 +1230,7 @@ config CRYPTO_BLOWFISH_COMMON
generic c and the assembler implementations.
See also:
- <http://www.schneier.com/blowfish.html>
+ <https://www.schneier.com/blowfish.html>
config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)"
@@ -1245,7 +1245,7 @@ config CRYPTO_BLOWFISH_X86_64
designed for use on "large microprocessors".
See also:
- <http://www.schneier.com/blowfish.html>
+ <https://www.schneier.com/blowfish.html>
config CRYPTO_CAMELLIA
tristate "Camellia cipher algorithms"
@@ -1441,10 +1441,10 @@ config CRYPTO_SALSA20
Salsa20 stream cipher algorithm.
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
- Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
+ Stream Cipher Project. See <https://www.ecrypt.eu.org/stream/>
The Salsa20 stream cipher algorithm is designed by Daniel J.
- Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
+ Bernstein <djb@cr.yp.to>. See <https://cr.yp.to/snuffle.html>
config CRYPTO_CHACHA20
tristate "ChaCha stream cipher algorithms"
@@ -1456,7 +1456,7 @@ config CRYPTO_CHACHA20
ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
Bernstein and further specified in RFC7539 for use in IETF protocols.
This is the portable C implementation of ChaCha20. See also:
- <http://cr.yp.to/chacha/chacha-20080128.pdf>
+ <https://cr.yp.to/chacha/chacha-20080128.pdf>
XChaCha20 is the application of the XSalsa20 construction to ChaCha20
rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length
@@ -1509,7 +1509,7 @@ config CRYPTO_SERPENT
variant of Serpent for compatibility with old kerneli.org code.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Serpent cipher algorithm (x86_64/SSE2)"
@@ -1528,7 +1528,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
blocks parallel using SSE2 instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_SSE2_586
tristate "Serpent cipher algorithm (i586/SSE2)"
@@ -1547,7 +1547,7 @@ config CRYPTO_SERPENT_SSE2_586
blocks parallel using SSE2 instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_AVX_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX)"
@@ -1567,7 +1567,7 @@ config CRYPTO_SERPENT_AVX_X86_64
eight blocks parallel using the AVX instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SERPENT_AVX2_X86_64
tristate "Serpent cipher algorithm (x86_64/AVX2)"
@@ -1583,7 +1583,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
blocks parallel using AVX2 instruction set.
See also:
- <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+ <https://www.cl.cam.ac.uk/~rja14/serpent.html>
config CRYPTO_SM4
tristate "SM4 cipher algorithm"
@@ -1640,7 +1640,7 @@ config CRYPTO_TWOFISH
bits.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_COMMON
tristate
@@ -1662,7 +1662,7 @@ config CRYPTO_TWOFISH_586
bits.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_X86_64
tristate "Twofish cipher algorithm (x86_64)"
@@ -1678,7 +1678,7 @@ config CRYPTO_TWOFISH_X86_64
bits.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
@@ -1699,7 +1699,7 @@ config CRYPTO_TWOFISH_X86_64_3WAY
blocks parallel, utilizing resources of out-of-order CPUs better.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_AVX_X86_64
tristate "Twofish cipher algorithm (x86_64/AVX)"
@@ -1722,7 +1722,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
eight blocks parallel using the AVX Instruction Set.
See also:
- <http://www.schneier.com/twofish.html>
+ <https://www.schneier.com/twofish.html>
comment "Compression"
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 84a76723e851..c32c72048a1c 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
+ u32 mask, int node)
+{
+ return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
+ node);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
+
struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index cf2b9f4103dd..7fbdc3270984 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -490,7 +490,6 @@ static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
const char *nhpoly1305_name;
struct skcipher_instance *inst;
@@ -500,14 +499,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_alg *hash_alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
@@ -565,8 +559,6 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
- CRYPTO_ALG_ASYNC;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 28fc323e3fe3..5882ed46f1ad 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -635,6 +635,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
if (!ctx->used)
ctx->merge = 0;
+ ctx->init = ctx->more;
}
EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
@@ -734,9 +735,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
*
* @sk socket of connection to user space
* @flags If MSG_DONTWAIT is set, then only report if function would sleep
+ * @min Set to minimum request size if partial requests are allowed.
* @return 0 when writable memory is available, < 0 upon error
*/
-int af_alg_wait_for_data(struct sock *sk, unsigned flags)
+int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct alg_sock *ask = alg_sk(sk);
@@ -754,7 +756,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags)
if (signal_pending(current))
break;
timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
+ if (sk_wait_event(sk, &timeout,
+ ctx->init && (!ctx->more ||
+ (min && ctx->used >= min)),
&wait)) {
err = 0;
break;
@@ -843,10 +847,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
}
lock_sock(sk);
- if (!ctx->more && ctx->used) {
+ if (ctx->init && (init || !ctx->more)) {
err = -EINVAL;
goto unlock;
}
+ ctx->init = true;
if (init) {
ctx->enc = enc;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 92abdf675992..fdabf2675b63 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -690,6 +690,8 @@ int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
spawn->mask = mask;
spawn->next = inst->spawns;
inst->spawns = spawn;
+ inst->alg.cra_flags |=
+ (alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
err = 0;
}
up_write(&crypto_alg_sem);
@@ -816,7 +818,23 @@ struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb)
}
EXPORT_SYMBOL_GPL(crypto_get_attr_type);
-int crypto_check_attr_type(struct rtattr **tb, u32 type)
+/**
+ * crypto_check_attr_type() - check algorithm type and compute inherited mask
+ * @tb: the template parameters
+ * @type: the algorithm type the template would be instantiated as
+ * @mask_ret: (output) the mask that should be passed to crypto_grab_*()
+ * to restrict the flags of any inner algorithms
+ *
+ * Validate that the algorithm type the user requested is compatible with the
+ * one the template would actually be instantiated as. E.g., if the user is
+ * doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because
+ * the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm.
+ *
+ * Also compute the mask to use to restrict the flags of any inner algorithms.
+ *
+ * Return: 0 on success; -errno on failure
+ */
+int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret)
{
struct crypto_attr_type *algt;
@@ -827,6 +845,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type)
if ((algt->type ^ type) & algt->mask)
return -EINVAL;
+ *mask_ret = crypto_algt_inherited_mask(algt);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_check_attr_type);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 0ae000a61c7f..d48d2156e621 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t usedpages = 0; /* [in] RX bufs to be used from user */
size_t processed = 0; /* [in] TX bufs to be consumed */
- if (!ctx->used) {
- err = af_alg_wait_for_data(sk, flags);
+ if (!ctx->init || ctx->more) {
+ err = af_alg_wait_for_data(sk, flags, 0);
if (err)
return err;
}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index ec5567c87a6d..a51ba22fef58 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
int err = 0;
size_t len = 0;
- if (!ctx->used) {
- err = af_alg_wait_for_data(sk, flags);
+ if (!ctx->init || (ctx->more && ctx->used < bs)) {
+ err = af_alg_wait_for_data(sk, flags, bs);
if (err)
return err;
}
diff --git a/crypto/api.c b/crypto/api.c
index edcf690800d4..5d8fe60b36c1 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -433,8 +433,9 @@ err:
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
-void *crypto_create_tfm(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+ const struct crypto_type *frontend,
+ int node)
{
char *mem;
struct crypto_tfm *tfm = NULL;
@@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg,
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
- mem = kzalloc(total, GFP_KERNEL);
+ mem = kzalloc_node(total, GFP_KERNEL, node);
if (mem == NULL)
goto out_err;
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
+ tfm->node = node;
err = frontend->init_tfm(tfm);
if (err)
@@ -472,7 +474,7 @@ out_err:
out:
return mem;
}
-EXPORT_SYMBOL_GPL(crypto_create_tfm);
+EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
@@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
EXPORT_SYMBOL_GPL(crypto_find_alg);
/*
- * crypto_alloc_tfm - Locate algorithm and allocate transform
+ * crypto_alloc_tfm_node - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
* @frontend: Frontend algorithm type
* @type: Type of algorithm
* @mask: Mask for type comparison
+ * @node: NUMA node in which users desire to put requests, if node is
+ * NUMA_NO_NODE, it means users have no special requirement.
*
* crypto_alloc_tfm() will first attempt to locate an already loaded
* algorithm. If that fails and the kernel supports dynamically loadable
@@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg);
*
* In case of error the return value is an error pointer.
*/
-void *crypto_alloc_tfm(const char *alg_name,
- const struct crypto_type *frontend, u32 type, u32 mask)
+
+void *crypto_alloc_tfm_node(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask,
+ int node)
{
void *tfm;
int err;
@@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name,
goto err;
}
- tfm = crypto_create_tfm(alg, frontend);
+ tfm = crypto_create_tfm_node(alg, frontend, node);
if (!IS_ERR(tfm))
return tfm;
@@ -542,7 +548,7 @@ err:
return ERR_PTR(err);
}
-EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
+EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
/*
* crypto_destroy_tfm - Free crypto transform
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 775e7138fd10..670bf1a01d00 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -372,7 +372,6 @@ static void crypto_authenc_free(struct aead_instance *inst)
static int crypto_authenc_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct authenc_instance_ctx *ctx;
@@ -381,14 +380,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
struct skcipher_alg *enc;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -423,8 +417,6 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (auth_base->cra_flags |
- enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 149b70df2a91..b60e61b1904c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -390,7 +390,6 @@ static void crypto_authenc_esn_free(struct aead_instance *inst)
static int crypto_authenc_esn_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct authenc_esn_instance_ctx *ctx;
@@ -399,14 +398,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
struct skcipher_alg *enc;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -437,8 +431,6 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (auth_base->cra_flags |
- enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c
index 0ffd8d92e308..a2ffe60e06d3 100644
--- a/crypto/blake2b_generic.c
+++ b/crypto/blake2b_generic.c
@@ -8,7 +8,7 @@
*
* - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
* - OpenSSL license : https://www.openssl.org/source/license.html
- * - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
+ * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
*
* More information about the BLAKE2 hash function can be found at
* https://blake2.net.
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 9a5783e5196a..0b9f409f7370 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -6,7 +6,7 @@
/*
* Algorithm Specification
- * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
+ * https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
*/
/*
diff --git a/crypto/ccm.c b/crypto/ccm.c
index d1fb01bbc814..494d70901186 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -447,7 +447,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *mac_name)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct ccm_instance_ctx *ictx;
@@ -455,14 +454,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
struct hash_alg_common *mac;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
@@ -470,7 +464,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ictx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst),
- mac_name, 0, CRYPTO_ALG_ASYNC);
+ mac_name, 0, mask | CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
mac = crypto_spawn_ahash_alg(&ictx->mac);
@@ -507,7 +501,6 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
@@ -712,21 +705,15 @@ static void crypto_rfc4309_free(struct aead_instance *inst)
static int crypto_rfc4309_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -759,7 +746,6 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
@@ -878,9 +864,10 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -890,7 +877,7 @@ static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index ccaea5cb66d1..97bbb135e9a6 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -555,7 +555,6 @@ static void chachapoly_free(struct aead_instance *inst)
static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
const char *name, unsigned int ivsize)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
@@ -566,14 +565,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
if (ivsize > CHACHAPOLY_IV_SIZE)
return -EINVAL;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -613,8 +607,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (chacha->base.cra_flags |
- poly->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (chacha->base.cra_priority +
poly->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 143a6544c873..df36be1efb81 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -225,9 +225,10 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -237,7 +238,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 283212262adb..a1bea0f4baa8 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -191,17 +191,20 @@ static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
return ictx->queue;
}
-static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
- u32 *mask)
+static void cryptd_type_and_mask(struct crypto_attr_type *algt,
+ u32 *type, u32 *mask)
{
- struct crypto_attr_type *algt;
+ /*
+ * cryptd is allowed to wrap internal algorithms, but in that case the
+ * resulting cryptd instance will be marked as internal as well.
+ */
+ *type = algt->type & CRYPTO_ALG_INTERNAL;
+ *mask = algt->mask & CRYPTO_ALG_INTERNAL;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return;
+ /* No point in cryptd wrapping an algorithm that's already async. */
+ *mask |= CRYPTO_ALG_ASYNC;
- *type |= algt->type & CRYPTO_ALG_INTERNAL;
- *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
+ *mask |= crypto_algt_inherited_mask(algt);
}
static int cryptd_init_instance(struct crypto_instance *inst,
@@ -364,6 +367,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst)
static int cryptd_create_skcipher(struct crypto_template *tmpl,
struct rtattr **tb,
+ struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct skcipherd_instance_ctx *ctx;
@@ -373,10 +377,7 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
u32 mask;
int err;
- type = 0;
- mask = CRYPTO_ALG_ASYNC;
-
- cryptd_check_internal(tb, &type, &mask);
+ cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -395,9 +396,8 @@ static int cryptd_create_skcipher(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
-
+ inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
@@ -633,16 +633,17 @@ static void cryptd_hash_free(struct ahash_instance *inst)
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
struct shash_alg *alg;
- u32 type = 0;
- u32 mask = 0;
+ u32 type;
+ u32 mask;
int err;
- cryptd_check_internal(tb, &type, &mask);
+ cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -661,10 +662,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto err_free_inst;
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL |
+ inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
CRYPTO_ALG_OPTIONAL_KEY));
-
inst->alg.halg.digestsize = alg->digestsize;
inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
@@ -820,16 +820,17 @@ static void cryptd_aead_free(struct aead_instance *inst)
static int cryptd_create_aead(struct crypto_template *tmpl,
struct rtattr **tb,
+ struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct aead_instance_ctx *ctx;
struct aead_instance *inst;
struct aead_alg *alg;
- u32 type = 0;
- u32 mask = CRYPTO_ALG_ASYNC;
+ u32 type;
+ u32 mask;
int err;
- cryptd_check_internal(tb, &type, &mask);
+ cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -848,8 +849,8 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
+ inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
@@ -884,11 +885,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- return cryptd_create_skcipher(tmpl, tb, &queue);
+ return cryptd_create_skcipher(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_HASH:
- return cryptd_create_hash(tmpl, tb, &queue);
+ return cryptd_create_hash(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_AEAD:
- return cryptd_create_aead(tmpl, tb, &queue);
+ return cryptd_create_aead(tmpl, tb, algt, &queue);
}
return -EINVAL;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 31ac4ae598e1..c39fcffba27f 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -256,29 +256,20 @@ static void crypto_rfc3686_free(struct skcipher_instance *inst)
static int crypto_rfc3686_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
struct skcipher_instance *inst;
struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
u32 mask;
-
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
- mask = crypto_requires_sync(algt->type, algt->mask) |
- crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
-
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
@@ -310,8 +301,6 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
-
inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
diff --git a/crypto/cts.c b/crypto/cts.c
index 5e005c4f0221..3766d47ebcc0 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -325,19 +325,13 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
struct skcipher_alg *alg;
u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -364,7 +358,6 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
diff --git a/crypto/dh.c b/crypto/dh.c
index 566f624a2de2..cd4f32092e5c 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -9,6 +9,7 @@
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <crypto/dh.h>
+#include <linux/fips.h>
#include <linux/mpi.h>
struct dh_ctx {
@@ -179,6 +180,43 @@ static int dh_compute_value(struct kpp_request *req)
if (ret)
goto err_free_base;
+ if (fips_enabled) {
+ /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */
+ if (req->src) {
+ MPI pone;
+
+ /* z <= 1 */
+ if (mpi_cmp_ui(val, 1) < 1) {
+ ret = -EBADMSG;
+ goto err_free_base;
+ }
+
+ /* z == p - 1 */
+ pone = mpi_alloc(0);
+
+ if (!pone) {
+ ret = -ENOMEM;
+ goto err_free_base;
+ }
+
+ ret = mpi_sub_ui(pone, ctx->p, 1);
+ if (!ret && !mpi_cmp(pone, val))
+ ret = -EBADMSG;
+
+ mpi_free(pone);
+
+ if (ret)
+ goto err_free_base;
+
+ /* SP800-56A rev 3 5.6.2.1.3 key check */
+ } else {
+ if (dh_is_pubkey_valid(ctx, val)) {
+ ret = -EAGAIN;
+ goto err_free_val;
+ }
+ }
+ }
+
ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_base;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 02d35be7702b..8acf8433ca29 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -940,7 +940,7 @@ static bool ecc_point_is_zero(const struct ecc_point *point)
}
/* Point multiplication algorithm using Montgomery's ladder with co-Z
- * coordinates. From http://eprint.iacr.org/2011/338.pdf
+ * coordinates. From https://eprint.iacr.org/2011/338.pdf
*/
/* Double in place */
@@ -1404,7 +1404,9 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
}
ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
- if (ecc_point_is_zero(pk)) {
+
+ /* SP800-56A rev 3 5.6.2.1.3 key check */
+ if (ecc_is_pubkey_valid_full(curve, pk)) {
ret = -EAGAIN;
goto err_free_point;
}
@@ -1452,6 +1454,33 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
}
EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
+/* SP800-56A section 5.6.2.3.3 full verification */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ struct ecc_point *nQ;
+
+ /* Checks 1 through 3 */
+ int ret = ecc_is_pubkey_valid_partial(curve, pk);
+
+ if (ret)
+ return ret;
+
+ /* Check 4: Verify that nQ is the zero point. */
+ nQ = ecc_alloc_point(pk->ndigits);
+ if (!nQ)
+ return -ENOMEM;
+
+ ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits);
+ if (!ecc_point_is_zero(nQ))
+ ret = -EINVAL;
+
+ ecc_free_point(nQ);
+
+ return ret;
+}
+EXPORT_SYMBOL(ecc_is_pubkey_valid_full);
+
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret)
@@ -1495,11 +1524,16 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
- ecc_swap_digits(product->x, secret, ndigits);
-
- if (ecc_point_is_zero(product))
+ if (ecc_point_is_zero(product)) {
ret = -EFAULT;
+ goto err_validity;
+ }
+
+ ecc_swap_digits(product->x, secret, ndigits);
+err_validity:
+ memzero_explicit(priv, sizeof(priv));
+ memzero_explicit(rand_z, sizeof(rand_z));
ecc_free_point(product);
err_alloc_product:
ecc_free_point(pk);
diff --git a/crypto/ecc.h b/crypto/ecc.h
index ab0eb70b9c09..d4e546b9ad79 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -148,6 +148,20 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
struct ecc_point *pk);
/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
* vli_is_zero() - Determine is vli is zero
*
* @vli: vli to check.
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 4a2f02baba14..69686668625e 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -115,7 +115,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
struct aead_instance *inst;
int err;
- inst = aead_geniv_alloc(tmpl, tb, 0, 0);
+ inst = aead_geniv_alloc(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
diff --git a/crypto/essiv.c b/crypto/essiv.c
index a7f45dbc4ee2..d012be23d496 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -466,7 +466,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(shash_name);
type = algt->type & algt->mask;
- mask = crypto_requires_sync(algt->type, algt->mask);
+ mask = crypto_algt_inherited_mask(algt);
switch (type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
@@ -525,7 +525,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Synchronous hash, e.g., "sha256" */
_hash_alg = crypto_alg_mod_lookup(shash_name,
CRYPTO_ALG_TYPE_SHASH,
- CRYPTO_ALG_TYPE_MASK);
+ CRYPTO_ALG_TYPE_MASK | mask);
if (IS_ERR(_hash_alg)) {
err = PTR_ERR(_hash_alg);
goto out_drop_skcipher;
@@ -557,7 +557,12 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_hash;
- base->cra_flags = block_base->cra_flags & CRYPTO_ALG_ASYNC;
+ /*
+ * hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its
+ * flags manually.
+ */
+ base->cra_flags |= (hash_alg->base.cra_flags &
+ CRYPTO_ALG_INHERITED_FLAGS);
base->cra_blocksize = block_base->cra_blocksize;
base->cra_ctxsize = sizeof(struct essiv_tfm_ctx);
base->cra_alignmask = block_base->cra_alignmask;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 0103d28c541e..3a36a9533c96 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -578,7 +578,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
const char *ctr_name,
const char *ghash_name)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct gcm_instance_ctx *ctx;
@@ -586,14 +585,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct hash_alg_common *ghash;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -635,8 +629,6 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = (ghash->base.cra_flags |
- ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
@@ -835,21 +827,15 @@ static void crypto_rfc4106_free(struct aead_instance *inst)
static int crypto_rfc4106_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -882,7 +868,6 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
@@ -1057,21 +1042,15 @@ static void crypto_rfc4543_free(struct aead_instance *inst)
static int crypto_rfc4543_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct aead_instance *inst;
struct aead_alg *alg;
struct crypto_rfc4543_instance_ctx *ctx;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -1104,7 +1083,6 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
diff --git a/crypto/geniv.c b/crypto/geniv.c
index 6a90c52d49ad..bee4621b4f12 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -39,22 +39,19 @@ static void aead_geniv_free(struct aead_instance *inst)
}
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask)
+ struct rtattr **tb)
{
struct crypto_aead_spawn *spawn;
- struct crypto_attr_type *algt;
struct aead_instance *inst;
struct aead_alg *alg;
unsigned int ivsize;
unsigned int maxauthsize;
+ u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return ERR_PTR(-EINVAL);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err)
+ return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
@@ -62,11 +59,8 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
spawn = aead_instance_ctx(inst);
- /* Ignore async algorithms if necessary. */
- mask |= crypto_requires_sync(algt->type, algt->mask);
-
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), type, mask);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
@@ -89,7 +83,6 @@ struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e38bfb948278..25856aa7ccbf 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -168,11 +168,12 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
+ u32 mask;
int err;
int ds;
int ss;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -182,7 +183,7 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
salg = crypto_spawn_shash_alg(spawn);
diff --git a/crypto/internal.h b/crypto/internal.h
index ff06a3bd1ca1..1b92a5a61852 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
-void *crypto_create_tfm(struct crypto_alg *alg,
- const struct crypto_type *frontend);
+void *crypto_create_tfm_node(struct crypto_alg *alg,
+ const struct crypto_type *frontend, int node);
+
+static inline void *crypto_create_tfm(struct crypto_alg *alg,
+ const struct crypto_type *frontend)
+{
+ return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE);
+}
+
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
-void *crypto_alloc_tfm(const char *alg_name,
- const struct crypto_type *frontend, u32 type, u32 mask);
+
+void *crypto_alloc_tfm_node(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask,
+ int node);
+
+static inline void *crypto_alloc_tfm(const char *alg_name,
+ const struct crypto_type *frontend, u32 type, u32 mask)
+{
+ return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE);
+}
int crypto_probing_notify(unsigned long val, void *v);
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index 57f4a1ac738b..6e147c43fc18 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -7,7 +7,7 @@
* Design
* ======
*
- * See http://www.chronox.de/jent.html
+ * See https://www.chronox.de/jent.html
*
* License
* =======
@@ -47,7 +47,7 @@
/*
* This Jitterentropy RNG is based on the jitterentropy library
- * version 2.2.0 provided at http://www.chronox.de/jent.html
+ * version 2.2.0 provided at https://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 5b07a7c09296..bcf09fbc750a 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -9,7 +9,7 @@
*/
/* This implementation is checked against the test vectors in the above
* document and by a test vector provided by Ken Buchanan at
- * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
+ * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
*
* The test vectors are included in the testing module tcrypt.[ch] */
@@ -27,7 +27,7 @@
#define LRW_BLOCK_SIZE 16
-struct priv {
+struct lrw_tfm_ctx {
struct crypto_skcipher *child;
/*
@@ -49,12 +49,12 @@ struct priv {
be128 mulinc[128];
};
-struct rctx {
+struct lrw_request_ctx {
be128 t;
struct skcipher_request subreq;
};
-static inline void setbit128_bbe(void *b, int bit)
+static inline void lrw_setbit128_bbe(void *b, int bit)
{
__set_bit(bit ^ (0x80 -
#ifdef __BIG_ENDIAN
@@ -65,10 +65,10 @@ static inline void setbit128_bbe(void *b, int bit)
), b);
}
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
+static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
{
- struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
int err, bsize = LRW_BLOCK_SIZE;
const u8 *tweak = key + keylen - bsize;
@@ -92,7 +92,7 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
/* initialize optimization table */
for (i = 0; i < 128; i++) {
- setbit128_bbe(&tmp, i);
+ lrw_setbit128_bbe(&tmp, i);
ctx->mulinc[i] = tmp;
gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
}
@@ -108,10 +108,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
* For example:
*
* u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
- * int i = next_index(&counter);
+ * int i = lrw_next_index(&counter);
* // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
*/
-static int next_index(u32 *counter)
+static int lrw_next_index(u32 *counter)
{
int i, res = 0;
@@ -135,14 +135,14 @@ static int next_index(u32 *counter)
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
- * just doing the next_index() calls again.
+ * just doing the lrw_next_index() calls again.
*/
-static int xor_tweak(struct skcipher_request *req, bool second_pass)
+static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
{
const int bs = LRW_BLOCK_SIZE;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct priv *ctx = crypto_skcipher_ctx(tfm);
- struct rctx *rctx = skcipher_request_ctx(req);
+ const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
be128 t = rctx->t;
struct skcipher_walk w;
__be32 *iv;
@@ -178,7 +178,8 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
/* T <- I*Key2, using the optimization
* discussed in the specification */
- be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
+ be128_xor(&t, &t,
+ &ctx->mulinc[lrw_next_index(counter)]);
} while ((avail -= bs) >= bs);
if (second_pass && w.nbytes == w.total) {
@@ -194,38 +195,40 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
return err;
}
-static int xor_tweak_pre(struct skcipher_request *req)
+static int lrw_xor_tweak_pre(struct skcipher_request *req)
{
- return xor_tweak(req, false);
+ return lrw_xor_tweak(req, false);
}
-static int xor_tweak_post(struct skcipher_request *req)
+static int lrw_xor_tweak_post(struct skcipher_request *req)
{
- return xor_tweak(req, true);
+ return lrw_xor_tweak(req, true);
}
-static void crypt_done(struct crypto_async_request *areq, int err)
+static void lrw_crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- err = xor_tweak_post(req);
+ err = lrw_xor_tweak_post(req);
}
skcipher_request_complete(req, err);
}
-static void init_crypt(struct skcipher_request *req)
+static void lrw_init_crypt(struct skcipher_request *req)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
+ const struct lrw_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
+ skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
+ req);
/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
skcipher_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, req->iv);
@@ -237,33 +240,33 @@ static void init_crypt(struct skcipher_request *req)
gf128mul_64k_bbe(&rctx->t, ctx->table);
}
-static int encrypt(struct skcipher_request *req)
+static int lrw_encrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
- init_crypt(req);
- return xor_tweak_pre(req) ?:
+ lrw_init_crypt(req);
+ return lrw_xor_tweak_pre(req) ?:
crypto_skcipher_encrypt(subreq) ?:
- xor_tweak_post(req);
+ lrw_xor_tweak_post(req);
}
-static int decrypt(struct skcipher_request *req)
+static int lrw_decrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
- init_crypt(req);
- return xor_tweak_pre(req) ?:
+ lrw_init_crypt(req);
+ return lrw_xor_tweak_pre(req) ?:
crypto_skcipher_decrypt(subreq) ?:
- xor_tweak_post(req);
+ lrw_xor_tweak_post(req);
}
-static int init_tfm(struct crypto_skcipher *tfm)
+static int lrw_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *cipher;
cipher = crypto_spawn_skcipher(spawn);
@@ -273,45 +276,39 @@ static int init_tfm(struct crypto_skcipher *tfm)
ctx->child = cipher;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
- sizeof(struct rctx));
+ sizeof(struct lrw_request_ctx));
return 0;
}
-static void exit_tfm(struct crypto_skcipher *tfm)
+static void lrw_exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->table)
gf128mul_free_64k(ctx->table);
crypto_free_skcipher(ctx->child);
}
-static void crypto_lrw_free(struct skcipher_instance *inst)
+static void lrw_free_instance(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
}
-static int create(struct crypto_template *tmpl, struct rtattr **tb)
+static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
struct skcipher_alg *alg;
const char *cipher_name;
char ecb_name[CRYPTO_MAX_ALG_NAME];
u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -379,7 +376,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
} else
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
@@ -391,43 +387,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
LRW_BLOCK_SIZE;
- inst->alg.base.cra_ctxsize = sizeof(struct priv);
+ inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
- inst->alg.init = init_tfm;
- inst->alg.exit = exit_tfm;
+ inst->alg.init = lrw_init_tfm;
+ inst->alg.exit = lrw_exit_tfm;
- inst->alg.setkey = setkey;
- inst->alg.encrypt = encrypt;
- inst->alg.decrypt = decrypt;
+ inst->alg.setkey = lrw_setkey;
+ inst->alg.encrypt = lrw_encrypt;
+ inst->alg.decrypt = lrw_decrypt;
- inst->free = crypto_lrw_free;
+ inst->free = lrw_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
- crypto_lrw_free(inst);
+ lrw_free_instance(inst);
}
return err;
}
-static struct crypto_template crypto_tmpl = {
+static struct crypto_template lrw_tmpl = {
.name = "lrw",
- .create = create,
+ .create = lrw_create,
.module = THIS_MODULE,
};
-static int __init crypto_module_init(void)
+static int __init lrw_module_init(void)
{
- return crypto_register_template(&crypto_tmpl);
+ return crypto_register_template(&lrw_tmpl);
}
-static void __exit crypto_module_exit(void)
+static void __exit lrw_module_exit(void)
{
- crypto_unregister_template(&crypto_tmpl);
+ crypto_unregister_template(&lrw_tmpl);
}
-subsys_initcall(crypto_module_init);
-module_exit(crypto_module_exit);
+subsys_initcall(lrw_module_init);
+module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LRW block cipher mode");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 8bddc65cd509..d569c7ed6c80 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -226,18 +226,14 @@ static int pcrypt_init_instance(struct crypto_instance *inst,
}
static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
- u32 type, u32 mask)
+ struct crypto_attr_type *algt)
{
struct pcrypt_instance_ctx *ctx;
- struct crypto_attr_type *algt;
struct aead_instance *inst;
struct aead_alg *alg;
+ u32 mask = crypto_algt_inherited_mask(algt);
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
@@ -254,7 +250,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
goto err_free_inst;
err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
@@ -263,7 +259,7 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto err_free_inst;
- inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC;
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
@@ -298,7 +294,7 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
+ return pcrypt_create_aead(tmpl, tb, algt);
}
return -EINVAL;
@@ -320,7 +316,7 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
{
int ret = -ENOMEM;
- *pinst = padata_alloc_possible(name);
+ *pinst = padata_alloc(name);
if (!*pinst)
return ret;
@@ -331,12 +327,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
return ret;
}
-static void pcrypt_fini_padata(struct padata_instance *pinst)
-{
- padata_stop(pinst);
- padata_free(pinst);
-}
-
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.create = pcrypt_create,
@@ -359,13 +349,10 @@ static int __init pcrypt_init(void)
if (err)
goto err_deinit_pencrypt;
- padata_start(pencrypt);
- padata_start(pdecrypt);
-
return crypto_register_template(&pcrypt_tmpl);
err_deinit_pencrypt:
- pcrypt_fini_padata(pencrypt);
+ padata_free(pencrypt);
err_unreg_kset:
kset_unregister(pcrypt_kset);
err:
@@ -376,8 +363,8 @@ static void __exit pcrypt_exit(void)
{
crypto_unregister_template(&pcrypt_tmpl);
- pcrypt_fini_padata(pencrypt);
- pcrypt_fini_padata(pdecrypt);
+ padata_free(pencrypt);
+ padata_free(pdecrypt);
kset_unregister(pcrypt_kset);
}
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index d31031de51bc..4983b2b4a223 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -596,7 +596,6 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
@@ -604,14 +603,9 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *hash_name;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
- return -EINVAL;
-
- mask = crypto_requires_sync(algt->type, algt->mask);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
+ if (err)
+ return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
@@ -658,7 +652,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
goto err_free_inst;
}
- inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index c81a44404086..3418869dabef 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -9,8 +9,8 @@
* Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream
* Cipher Project. It is designed by Daniel J. Bernstein <djb@cr.yp.to>.
* More information about eSTREAM and Salsa20 can be found here:
- * http://www.ecrypt.eu.org/stream/
- * http://cr.yp.to/snuffle.html
+ * https://www.ecrypt.eu.org/stream/
+ * https://cr.yp.to/snuffle.html
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index f124b9b54e15..23e22d8b63e6 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -138,7 +138,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
struct aead_instance *inst;
int err;
- inst = aead_geniv_alloc(tmpl, tb, 0, 0);
+ inst = aead_geniv_alloc(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
@@ -164,23 +164,9 @@ free_inst:
return err;
}
-static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
- return -EINVAL;
-
- return seqiv_aead_create(tmpl, tb);
-}
-
static struct crypto_template seqiv_tmpl = {
.name = "seqiv",
- .create = seqiv_create,
+ .create = seqiv_aead_create,
.module = THIS_MODULE,
};
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 44e263e25599..3e4069935b53 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -3,7 +3,7 @@
* Cryptographic API.
*
* SHA-3, as specified in
- * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
+ * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
*
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
* Ard Biesheuvel <ard.biesheuvel@linaro.org>
diff --git a/crypto/simd.c b/crypto/simd.c
index 56885af49c24..edaa479a1ec5 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -171,7 +171,8 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;
alg->base.cra_alignmask = ialg->base.cra_alignmask;
@@ -417,7 +418,8 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname,
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
- alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;
alg->base.cra_alignmask = ialg->base.cra_alignmask;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 7221def7b9a7..467af525848a 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -934,22 +934,15 @@ static void skcipher_free_instance_simple(struct skcipher_instance *inst)
struct skcipher_instance *skcipher_alloc_instance_simple(
struct crypto_template *tmpl, struct rtattr **tb)
{
- struct crypto_attr_type *algt;
u32 mask;
struct skcipher_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *cipher_alg;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return ERR_PTR(-EINVAL);
-
- mask = crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d29983908c38..b9a2d73d9f8d 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -3916,7 +3916,7 @@ static const struct hash_testvec hmac_sm3_tv_template[] = {
};
/*
- * SHA1 test vectors from from FIPS PUB 180-1
+ * SHA1 test vectors from FIPS PUB 180-1
* Long vector from CAVS 5.0
*/
static const struct hash_testvec sha1_tv_template[] = {
@@ -4103,7 +4103,7 @@ static const struct hash_testvec sha1_tv_template[] = {
/*
- * SHA224 test vectors from from FIPS PUB 180-2
+ * SHA224 test vectors from FIPS PUB 180-2
*/
static const struct hash_testvec sha224_tv_template[] = {
{
@@ -4273,7 +4273,7 @@ static const struct hash_testvec sha224_tv_template[] = {
};
/*
- * SHA256 test vectors from from NIST
+ * SHA256 test vectors from NIST
*/
static const struct hash_testvec sha256_tv_template[] = {
{
@@ -4442,7 +4442,7 @@ static const struct hash_testvec sha256_tv_template[] = {
};
/*
- * SHA384 test vectors from from NIST and kerneli
+ * SHA384 test vectors from NIST and kerneli
*/
static const struct hash_testvec sha384_tv_template[] = {
{
@@ -4632,7 +4632,7 @@ static const struct hash_testvec sha384_tv_template[] = {
};
/*
- * SHA512 test vectors from from NIST and kerneli
+ * SHA512 test vectors from NIST and kerneli
*/
static const struct hash_testvec sha512_tv_template[] = {
{
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 2d906830df96..9b565d1040d6 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -620,9 +620,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -632,7 +633,7 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 598ec88abf0f..af3b7eb5d7c7 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -191,9 +191,10 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
+ u32 mask;
int err;
- err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
@@ -203,7 +204,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
- crypto_attr_alg_name(tb[1]), 0, 0);
+ crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
diff --git a/crypto/xts.c b/crypto/xts.c
index 3565f3b863a6..ad45b009774b 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -20,7 +20,7 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-struct priv {
+struct xts_tfm_ctx {
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
};
@@ -30,17 +30,17 @@ struct xts_instance_ctx {
char name[CRYPTO_MAX_ALG_NAME];
};
-struct rctx {
+struct xts_request_ctx {
le128 t;
struct scatterlist *tail;
struct scatterlist sg[2];
struct skcipher_request subreq;
};
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
+static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
{
- struct priv *ctx = crypto_skcipher_ctx(parent);
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
int err;
@@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the gf128mul_x_ble() calls again.
*/
-static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
+static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
+ bool enc)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
const int bs = XTS_BLOCK_SIZE;
@@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
return err;
}
-static int xor_tweak_pre(struct skcipher_request *req, bool enc)
+static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
{
- return xor_tweak(req, false, enc);
+ return xts_xor_tweak(req, false, enc);
}
-static int xor_tweak_post(struct skcipher_request *req, bool enc)
+static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
{
- return xor_tweak(req, true, enc);
+ return xts_xor_tweak(req, true, enc);
}
-static void cts_done(struct crypto_async_request *areq, int err)
+static void xts_cts_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
le128 b;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
le128_xor(&b, &rctx->t, &b);
@@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static int cts_final(struct skcipher_request *req,
- int (*crypt)(struct skcipher_request *req))
+static int xts_cts_final(struct skcipher_request *req,
+ int (*crypt)(struct skcipher_request *req))
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ const struct xts_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int tail = req->cryptlen % XTS_BLOCK_SIZE;
le128 b[2];
@@ -169,7 +171,7 @@ static int cts_final(struct skcipher_request *req,
offset - XTS_BLOCK_SIZE);
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
- memcpy(b + 1, b, tail);
+ b[1] = b[0];
scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
le128_xor(b, &rctx->t, b);
@@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req,
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
+ skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
+ req);
skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
XTS_BLOCK_SIZE, NULL);
@@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req,
return 0;
}
-static void encrypt_done(struct crypto_async_request *areq, int err)
+static void xts_encrypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- err = xor_tweak_post(req, true);
+ err = xts_xor_tweak_post(req, true);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
- err = cts_final(req, crypto_skcipher_encrypt);
+ err = xts_cts_final(req, crypto_skcipher_encrypt);
if (err == -EINPROGRESS)
return;
}
@@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static void decrypt_done(struct crypto_async_request *areq, int err)
+static void xts_decrypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
if (!err) {
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- err = xor_tweak_post(req, false);
+ err = xts_xor_tweak_post(req, false);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
- err = cts_final(req, crypto_skcipher_decrypt);
+ err = xts_cts_final(req, crypto_skcipher_decrypt);
if (err == -EINPROGRESS)
return;
}
@@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
+static int xts_init_crypt(struct skcipher_request *req,
+ crypto_completion_t compl)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
+ const struct xts_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
if (req->cryptlen < XTS_BLOCK_SIZE)
@@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
return 0;
}
-static int encrypt(struct skcipher_request *req)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int err;
- err = init_crypt(req, encrypt_done) ?:
- xor_tweak_pre(req, true) ?:
+ err = xts_init_crypt(req, xts_encrypt_done) ?:
+ xts_xor_tweak_pre(req, true) ?:
crypto_skcipher_encrypt(subreq) ?:
- xor_tweak_post(req, true);
+ xts_xor_tweak_post(req, true);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err;
- return cts_final(req, crypto_skcipher_encrypt);
+ return xts_cts_final(req, crypto_skcipher_encrypt);
}
-static int decrypt(struct skcipher_request *req)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
+ struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int err;
- err = init_crypt(req, decrypt_done) ?:
- xor_tweak_pre(req, false) ?:
+ err = xts_init_crypt(req, xts_decrypt_done) ?:
+ xts_xor_tweak_pre(req, false) ?:
crypto_skcipher_decrypt(subreq) ?:
- xor_tweak_post(req, false);
+ xts_xor_tweak_post(req, false);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err;
- return cts_final(req, crypto_skcipher_decrypt);
+ return xts_cts_final(req, crypto_skcipher_decrypt);
}
-static int init_tfm(struct crypto_skcipher *tfm)
+static int xts_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
@@ -309,41 +314,39 @@ static int init_tfm(struct crypto_skcipher *tfm)
ctx->tweak = tweak;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
- sizeof(struct rctx));
+ sizeof(struct xts_request_ctx));
return 0;
}
-static void exit_tfm(struct crypto_skcipher *tfm)
+static void xts_exit_tfm(struct crypto_skcipher *tfm)
{
- struct priv *ctx = crypto_skcipher_ctx(tfm);
+ struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->child);
crypto_free_cipher(ctx->tweak);
}
-static void crypto_xts_free(struct skcipher_instance *inst)
+static void xts_free_instance(struct skcipher_instance *inst)
{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ictx->spawn);
kfree(inst);
}
-static int create(struct crypto_template *tmpl, struct rtattr **tb)
+static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
struct xts_instance_ctx *ctx;
struct skcipher_alg *alg;
const char *cipher_name;
u32 mask;
int err;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
- return -EINVAL;
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+ if (err)
+ return err;
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -355,10 +358,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx = skcipher_instance_ctx(inst);
- mask = crypto_requires_off(algt->type, algt->mask,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
-
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
if (err == -ENOENT) {
@@ -415,7 +414,6 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
} else
goto err_free_inst;
- inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
@@ -425,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
- inst->alg.base.cra_ctxsize = sizeof(struct priv);
+ inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
- inst->alg.init = init_tfm;
- inst->alg.exit = exit_tfm;
+ inst->alg.init = xts_init_tfm;
+ inst->alg.exit = xts_exit_tfm;
- inst->alg.setkey = setkey;
- inst->alg.encrypt = encrypt;
- inst->alg.decrypt = decrypt;
+ inst->alg.setkey = xts_setkey;
+ inst->alg.encrypt = xts_encrypt;
+ inst->alg.decrypt = xts_decrypt;
- inst->free = crypto_xts_free;
+ inst->free = xts_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
- crypto_xts_free(inst);
+ xts_free_instance(inst);
}
return err;
}
-static struct crypto_template crypto_tmpl = {
+static struct crypto_template xts_tmpl = {
.name = "xts",
- .create = create,
+ .create = xts_create,
.module = THIS_MODULE,
};
-static int __init crypto_module_init(void)
+static int __init xts_module_init(void)
{
- return crypto_register_template(&crypto_tmpl);
+ return crypto_register_template(&xts_tmpl);
}
-static void __exit crypto_module_exit(void)
+static void __exit xts_module_exit(void)
{
- crypto_unregister_template(&crypto_tmpl);
+ crypto_unregister_template(&xts_tmpl);
}
-subsys_initcall(crypto_module_init);
-module_exit(crypto_module_exit);
+subsys_initcall(xts_module_init);
+module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 0ad17efc96df..f976a49e1fb5 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -74,6 +74,16 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
+config HW_RANDOM_BA431
+ tristate "Silex Insight BA431 Random Number Generator support"
+ depends on HAS_IOMEM
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware based on Silex Insight BA431 IP.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ba431-rng.
+
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
@@ -245,7 +255,7 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_IMX_RNGC
tristate "Freescale i.MX RNGC Random Number Generator"
depends on HAS_IOMEM && HAVE_CLK
- depends on SOC_IMX25 || COMPILE_TEST
+ depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -257,6 +267,21 @@ config HW_RANDOM_IMX_RNGC
If unsure, say Y.
+config HW_RANDOM_INGENIC_RNG
+ tristate "Ingenic Random Number Generator support"
+ depends on HW_RANDOM
+ depends on MACH_JZ4780 || MACH_X1000
+ default HW_RANDOM
+ help
+ This driver provides kernel-side support for the Random Number Generator
+ hardware found in ingenic JZ4780 and X1000 SoC. MIPS Creator CI20 uses
+ JZ4780 SoC, YSH & ATIL CU1000-Neo uses X1000 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ingenic-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 2c6724735345..26ae06844f09 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
+obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
+obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
new file mode 100644
index 000000000000..410b50b05e21
--- /dev/null
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Silex Insight
+
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#define BA431_RESET_DELAY 1 /* usec */
+#define BA431_RESET_READ_STATUS_TIMEOUT 1000 /* usec */
+#define BA431_RESET_READ_STATUS_INTERVAL 10 /* usec */
+#define BA431_READ_RETRY_INTERVAL 1 /* usec */
+
+#define BA431_REG_CTRL 0x00
+#define BA431_REG_FIFO_LEVEL 0x04
+#define BA431_REG_STATUS 0x30
+#define BA431_REG_FIFODATA 0x80
+
+#define BA431_CTRL_ENABLE BIT(0)
+#define BA431_CTRL_SOFTRESET BIT(8)
+
+#define BA431_STATUS_STATE_MASK (BIT(1) | BIT(2) | BIT(3))
+#define BA431_STATUS_STATE_OFFSET 1
+
+enum ba431_state {
+ BA431_STATE_RESET,
+ BA431_STATE_STARTUP,
+ BA431_STATE_FIFOFULLON,
+ BA431_STATE_FIFOFULLOFF,
+ BA431_STATE_RUNNING,
+ BA431_STATE_ERROR
+};
+
+struct ba431_trng {
+ struct device *dev;
+ void __iomem *base;
+ struct hwrng rng;
+ atomic_t reset_pending;
+ struct work_struct reset_work;
+};
+
+static inline u32 ba431_trng_read_reg(struct ba431_trng *ba431, u32 reg)
+{
+ return ioread32(ba431->base + reg);
+}
+
+static inline void ba431_trng_write_reg(struct ba431_trng *ba431, u32 reg,
+ u32 val)
+{
+ iowrite32(val, ba431->base + reg);
+}
+
+static inline enum ba431_state ba431_trng_get_state(struct ba431_trng *ba431)
+{
+ u32 status = ba431_trng_read_reg(ba431, BA431_REG_STATUS);
+
+ return (status & BA431_STATUS_STATE_MASK) >> BA431_STATUS_STATE_OFFSET;
+}
+
+static int ba431_trng_is_in_error(struct ba431_trng *ba431)
+{
+ enum ba431_state state = ba431_trng_get_state(ba431);
+
+ if ((state < BA431_STATE_STARTUP) ||
+ (state >= BA431_STATE_ERROR))
+ return 1;
+
+ return 0;
+}
+
+static int ba431_trng_reset(struct ba431_trng *ba431)
+{
+ int ret;
+
+ /* Disable interrupts, random generation and enable the softreset */
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_SOFTRESET);
+ udelay(BA431_RESET_DELAY);
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, BA431_CTRL_ENABLE);
+
+ /* Wait until the state changed */
+ if (readx_poll_timeout(ba431_trng_is_in_error, ba431, ret, !ret,
+ BA431_RESET_READ_STATUS_INTERVAL,
+ BA431_RESET_READ_STATUS_TIMEOUT)) {
+ dev_err(ba431->dev, "reset failed (state: %d)\n",
+ ba431_trng_get_state(ba431));
+ return -ETIMEDOUT;
+ }
+
+ dev_info(ba431->dev, "reset done\n");
+
+ return 0;
+}
+
+static void ba431_trng_reset_work(struct work_struct *work)
+{
+ struct ba431_trng *ba431 = container_of(work, struct ba431_trng,
+ reset_work);
+ ba431_trng_reset(ba431);
+ atomic_set(&ba431->reset_pending, 0);
+}
+
+static void ba431_trng_schedule_reset(struct ba431_trng *ba431)
+{
+ if (atomic_cmpxchg(&ba431->reset_pending, 0, 1))
+ return;
+
+ schedule_work(&ba431->reset_work);
+}
+
+static int ba431_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+ u32 *data = buf;
+ unsigned int level, i;
+ int n = 0;
+
+ while (max > 0) {
+ level = ba431_trng_read_reg(ba431, BA431_REG_FIFO_LEVEL);
+ if (!level) {
+ if (ba431_trng_is_in_error(ba431)) {
+ ba431_trng_schedule_reset(ba431);
+ break;
+ }
+
+ if (!wait)
+ break;
+
+ udelay(BA431_READ_RETRY_INTERVAL);
+ continue;
+ }
+
+ i = level;
+ do {
+ data[n++] = ba431_trng_read_reg(ba431,
+ BA431_REG_FIFODATA);
+ max -= sizeof(*data);
+ } while (--i && (max > 0));
+
+ if (ba431_trng_is_in_error(ba431)) {
+ n -= (level - i);
+ ba431_trng_schedule_reset(ba431);
+ break;
+ }
+ }
+
+ n *= sizeof(data);
+ return (n || !wait) ? n : -EIO;
+}
+
+static void ba431_trng_cleanup(struct hwrng *rng)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+
+ ba431_trng_write_reg(ba431, BA431_REG_CTRL, 0);
+ cancel_work_sync(&ba431->reset_work);
+}
+
+static int ba431_trng_init(struct hwrng *rng)
+{
+ struct ba431_trng *ba431 = container_of(rng, struct ba431_trng, rng);
+
+ return ba431_trng_reset(ba431);
+}
+
+static int ba431_trng_probe(struct platform_device *pdev)
+{
+ struct ba431_trng *ba431;
+ struct resource *res;
+ int ret;
+
+ ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
+ if (!ba431)
+ return -ENOMEM;
+
+ ba431->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ba431->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ba431->base))
+ return PTR_ERR(ba431->base);
+
+ atomic_set(&ba431->reset_pending, 0);
+ INIT_WORK(&ba431->reset_work, ba431_trng_reset_work);
+ ba431->rng.name = pdev->name;
+ ba431->rng.init = ba431_trng_init;
+ ba431->rng.cleanup = ba431_trng_cleanup;
+ ba431->rng.read = ba431_trng_read;
+
+ platform_set_drvdata(pdev, ba431);
+
+ ret = hwrng_register(&ba431->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "BA431 TRNG registered\n");
+
+ return 0;
+}
+
+static int ba431_trng_remove(struct platform_device *pdev)
+{
+ struct ba431_trng *ba431 = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&ba431->rng);
+
+ return 0;
+}
+
+static const struct of_device_id ba431_trng_dt_ids[] = {
+ { .compatible = "silex-insight,ba431-rng", .data = NULL },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids);
+
+static struct platform_driver ba431_trng_driver = {
+ .driver = {
+ .name = "ba431-rng",
+ .of_match_table = ba431_trng_dt_ids,
+ },
+ .probe = ba431_trng_probe,
+ .remove = ba431_trng_remove,
+};
+
+module_platform_driver(ba431_trng_driver);
+
+MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
+MODULE_DESCRIPTION("TRNG driver for Silex Insight BA431");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index cbf5eaea662c..1a7c43b43c6b 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -139,7 +139,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
{
const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
int err;
@@ -166,7 +165,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
priv->rng.cleanup = bcm2835_rng_cleanup;
if (dev_of_node(dev)) {
- rng_id = of_match_node(bcm2835_rng_of_match, np);
+ rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
if (!rng_id)
return -EINVAL;
@@ -188,7 +187,7 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
-static struct platform_device_id bcm2835_rng_devtype[] = {
+static const struct platform_device_id bcm2835_rng_devtype[] = {
{ .name = "bcm2835-rng" },
{ .name = "bcm63xx-rng" },
{ /* sentinel */ }
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d7a42d7e0d..8c1c47dd9f46 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
static int __init hwrng_modinit(void)
{
- int ret = -ENOMEM;
+ int ret;
/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
index 6815e17a9834..96438f85cafa 100644
--- a/drivers/char/hw_random/hisi-rng.c
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -99,7 +99,7 @@ static int hisi_rng_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id hisi_rng_dt_ids[] = {
+static const struct of_device_id hisi_rng_dt_ids[] __maybe_unused = {
{ .compatible = "hisilicon,hip04-rng" },
{ .compatible = "hisilicon,hip05-rng" },
{ }
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
new file mode 100644
index 000000000000..d704cef64b64
--- /dev/null
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ingenic Random Number Generator driver
+ * Copyright (c) 2017 PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>
+ * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* RNG register offsets */
+#define RNG_REG_ERNG_OFFSET 0x0
+#define RNG_REG_RNG_OFFSET 0x4
+
+/* bits within the ERND register */
+#define ERNG_READY BIT(31)
+#define ERNG_ENABLE BIT(0)
+
+enum ingenic_rng_version {
+ ID_JZ4780,
+ ID_X1000,
+};
+
+/* Device associated memory */
+struct ingenic_rng {
+ enum ingenic_rng_version version;
+
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int ingenic_rng_init(struct hwrng *rng)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+
+ writel(ERNG_ENABLE, priv->base + RNG_REG_ERNG_OFFSET);
+
+ return 0;
+}
+
+static void ingenic_rng_cleanup(struct hwrng *rng)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+
+ writel(0, priv->base + RNG_REG_ERNG_OFFSET);
+}
+
+static int ingenic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct ingenic_rng *priv = container_of(rng, struct ingenic_rng, rng);
+ u32 *data = buf;
+ u32 status;
+ int ret;
+
+ if (priv->version >= ID_X1000) {
+ ret = readl_poll_timeout(priv->base + RNG_REG_ERNG_OFFSET, status,
+ status & ERNG_READY, 10, 1000);
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: Wait for RNG data ready timeout\n", __func__);
+ return ret;
+ }
+ } else {
+ /*
+ * A delay is required so that the current RNG data is not bit shifted
+ * version of previous RNG data which could happen if random data is
+ * read continuously from this device.
+ */
+ udelay(20);
+ }
+
+ *data = readl(priv->base + RNG_REG_RNG_OFFSET);
+
+ return 4;
+}
+
+static int ingenic_rng_probe(struct platform_device *pdev)
+{
+ struct ingenic_rng *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ pr_err("%s: Failed to map RNG registers\n", __func__);
+ ret = PTR_ERR(priv->base);
+ goto err_free_rng;
+ }
+
+ priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = ingenic_rng_init;
+ priv->rng.cleanup = ingenic_rng_cleanup;
+ priv->rng.read = ingenic_rng_read;
+
+ ret = hwrng_register(&priv->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hwrng\n");
+ goto err_free_rng;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(&pdev->dev, "Ingenic RNG driver registered\n");
+ return 0;
+
+err_free_rng:
+ kfree(priv);
+ return ret;
+}
+
+static int ingenic_rng_remove(struct platform_device *pdev)
+{
+ struct ingenic_rng *priv = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&priv->rng);
+
+ writel(0, priv->base + RNG_REG_ERNG_OFFSET);
+
+ return 0;
+}
+
+static const struct of_device_id ingenic_rng_of_match[] = {
+ { .compatible = "ingenic,jz4780-rng", .data = (void *) ID_JZ4780 },
+ { .compatible = "ingenic,x1000-rng", .data = (void *) ID_X1000 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
+
+static struct platform_driver ingenic_rng_driver = {
+ .probe = ingenic_rng_probe,
+ .remove = ingenic_rng_remove,
+ .driver = {
+ .name = "ingenic-rng",
+ .of_match_table = ingenic_rng_of_match,
+ },
+};
+
+module_platform_driver(ingenic_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("PrasannaKumar Muralidharan <prasannatsmkumar@gmail.com>");
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic Random Number Generator driver");
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index 001617033d6a..8f1d47ff9799 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -2,7 +2,7 @@
/*
* Random Number Generator driver for the Keystone SOC
*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Sandeep Nair
* Vitaly Andrianov
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 74ed29f42e4f..b0ded41eb865 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -76,7 +76,7 @@ static int nmk_rng_remove(struct amba_device *dev)
return 0;
}
-static struct amba_id nmk_rng_ids[] = {
+static const struct amba_id nmk_rng_ids[] = {
{
.id = 0x000805e1,
.mask = 0x000fffff, /* top bits are rev and cfg: accept all */
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 01d04404d8c0..5d0d13f891b7 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -161,7 +161,7 @@ static const struct dev_pm_ops npcm_rng_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id rng_dt_id[] = {
+static const struct of_device_id rng_dt_id[] __maybe_unused = {
{ .compatible = "nuvoton,npcm750-rng", },
{},
};
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 7be8067ac4e8..8561a09b4681 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -33,7 +33,7 @@ static int octeon_rng_init(struct hwrng *rng)
ctl.u64 = 0;
ctl.s.ent_en = 1; /* Enable the entropy source. */
ctl.s.rng_en = 1; /* Enable the RNG hardware. */
- cvmx_write_csr((u64)p->control_status, ctl.u64);
+ cvmx_write_csr((__force u64)p->control_status, ctl.u64);
return 0;
}
@@ -44,14 +44,14 @@ static void octeon_rng_cleanup(struct hwrng *rng)
ctl.u64 = 0;
/* Disable everything. */
- cvmx_write_csr((u64)p->control_status, ctl.u64);
+ cvmx_write_csr((__force u64)p->control_status, ctl.u64);
}
static int octeon_rng_data_read(struct hwrng *rng, u32 *data)
{
struct octeon_rng *p = container_of(rng, struct octeon_rng, ops);
- *data = cvmx_read64_uint32((u64)p->result);
+ *data = cvmx_read64_uint32((__force u64)p->result);
return sizeof(u32);
}
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 7290c603fcb8..5cc5fc504968 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
+#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
@@ -243,7 +244,6 @@ static struct omap_rng_pdata omap2_rng_pdata = {
.cleanup = omap2_rng_cleanup,
};
-#if defined(CONFIG_OF)
static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
{
return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
@@ -358,7 +358,7 @@ static struct omap_rng_pdata eip76_rng_pdata = {
.cleanup = omap4_rng_cleanup,
};
-static const struct of_device_id omap_rng_of_match[] = {
+static const struct of_device_id omap_rng_of_match[] __maybe_unused = {
{
.compatible = "ti,omap2-rng",
.data = &omap2_rng_pdata,
@@ -418,13 +418,6 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
}
return 0;
}
-#else
-static int of_get_omap_rng_device_details(struct omap_rng_dev *omap_rng,
- struct platform_device *pdev)
-{
- return -EINVAL;
-}
-#endif
static int get_omap_rng_device_details(struct omap_rng_dev *omap_rng)
{
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 81080cb2294e..e8210c1715cf 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -119,7 +119,7 @@ static int pic32_rng_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id pic32_rng_of_match[] = {
+static const struct of_device_id pic32_rng_of_match[] __maybe_unused = {
{ .compatible = "microchip,pic32mzda-rng", },
{ /* sentinel */ }
};
diff --git a/drivers/char/hw_random/st-rng.c b/drivers/char/hw_random/st-rng.c
index 783c24e3f8b7..15ba1e6fae4d 100644
--- a/drivers/char/hw_random/st-rng.c
+++ b/drivers/char/hw_random/st-rng.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -121,7 +122,7 @@ static int st_rng_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id st_rng_match[] = {
+static const struct of_device_id st_rng_match[] __maybe_unused = {
{ .compatible = "st,rng" },
{},
};
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 79a6e47b5fbc..a90001e02bf7 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -195,7 +195,7 @@ static int virtrng_restore(struct virtio_device *vdev)
}
#endif
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 802b9ada4e9e..aa3a4ed07a66 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -624,6 +624,8 @@ config CRYPTO_DEV_QCE_SKCIPHER
config CRYPTO_DEV_QCE_SHA
bool
depends on CRYPTO_DEV_QCE
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
choice
prompt "Algorithms enabled for QCE acceleration"
@@ -756,10 +758,9 @@ config CRYPTO_DEV_ZYNQMP_AES
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
- select CRYPTO_AES
+ select CRYPTO_LIB_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
- select CRYPTO_CTR
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
@@ -865,4 +866,18 @@ source "drivers/crypto/hisilicon/Kconfig"
source "drivers/crypto/amlogic/Kconfig"
+config CRYPTO_DEV_SA2UL
+ tristate "Support for TI security accelerator"
+ depends on ARCH_K3 || COMPILE_TEST
+ select ARM64_CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_AES_ARM64
+ select CRYPTO_ALGAPI
+ select HW_RANDOM
+ select SG_SPLIT
+ help
+ K3 devices include a security accelerator engine that may be
+ used for crypto offload. Select this if you want to use hardware
+ acceleration for cryptographic algorithms on these devices.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 944ed7226e37..53fc115cf459 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
+obj-$(CONFIG_CRYPTO_DEV_SA2UL) += sa2ul.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
index 7f22d305178e..b72de8939497 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
@@ -122,19 +122,17 @@ static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_requ
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
int err;
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL,
- NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (ctx->mode & SS_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
+ err = crypto_skcipher_decrypt(&ctx->fallback_req);
else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = crypto_skcipher_encrypt(&ctx->fallback_req);
return err;
}
@@ -494,23 +492,25 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
alg.crypto.base);
op->ss = algt->ss;
- crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
- sizeof(struct sun4i_cipher_req_ctx));
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct sun4i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
+
+
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
goto error_pm;
return 0;
error_pm:
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
return err;
}
@@ -518,7 +518,7 @@ void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put(op->ss->dev);
}
@@ -546,10 +546,10 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the DES key, prepare the mode to be used */
@@ -566,10 +566,10 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the 3DES key, prepare the mode to be used */
@@ -586,9 +586,9 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
index 2b4c6333eb67..163962f9e284 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h
@@ -170,11 +170,12 @@ struct sun4i_tfm_ctx {
u32 keylen;
u32 keymode;
struct sun4i_ss_ctx *ss;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
struct sun4i_cipher_req_ctx {
u32 mode;
+ struct skcipher_request fallback_req; // keep at the end
};
struct sun4i_req_ctx {
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index a6abb701bfc6..1e4f9a58bb24 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -58,23 +58,20 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt;
-#endif
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
algt->stat_fb++;
#endif
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (rctx->op_dir & CE_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
@@ -335,18 +332,20 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
op->ce = algt->ce;
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
+
dev_info(op->ce->dev, "Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
@@ -358,7 +357,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
return 0;
error_pm:
- crypto_free_sync_skcipher(op->fallback_tfm);
+ pm_runtime_put_noidle(op->ce->dev);
+ crypto_free_skcipher(op->fallback_tfm);
return err;
}
@@ -370,7 +370,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
memzero_explicit(op->key, op->keylen);
kfree(op->key);
}
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync_suspend(op->ce->dev);
}
@@ -400,10 +400,10 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -425,8 +425,8 @@ int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index b957061424a1..138759dc8190 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -185,7 +185,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -211,7 +212,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -236,7 +238,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -262,7 +265,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 0e9eac397e1b..963645fe4adb 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -181,12 +181,14 @@ struct sun8i_ce_dev {
/*
* struct sun8i_cipher_req_ctx - context for a skcipher request
- * @op_dir: direction (encrypt vs decrypt) for this request
- * @flow: the flow to use for this request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ * @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
u32 op_dir;
int flow;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -202,7 +204,7 @@ struct sun8i_cipher_tfm_ctx {
u32 *key;
u32 keylen;
struct sun8i_ce_dev *ce;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
/*
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index c89cb2ee2496..7a131675a41c 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -73,7 +73,6 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt;
@@ -81,15 +80,15 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
algt->stat_fb++;
#endif
- skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
- skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
if (rctx->op_dir & SS_DECRYPTION)
- err = crypto_skcipher_decrypt(subreq);
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
@@ -334,18 +333,20 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
op->ss = algt->ss;
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
+
dev_info(op->ss->dev, "Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&sktfm->base),
- crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
+ crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
@@ -359,7 +360,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
return 0;
error_pm:
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
return err;
}
@@ -371,7 +372,7 @@ void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
memzero_explicit(op->key, op->keylen);
kfree(op->key);
}
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
pm_runtime_put_sync(op->ss->dev);
}
@@ -401,10 +402,10 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -427,8 +428,8 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 5d9d0fedcb06..9a23515783a6 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -169,7 +169,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -195,7 +196,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -220,7 +222,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -246,7 +249,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index 29c44f279112..0405767f1f7e 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -135,17 +135,18 @@ struct sun8i_ss_dev {
/*
* struct sun8i_cipher_req_ctx - context for a skcipher request
- * @t_src: list of mapped SGs with their size
- * @t_dst: list of mapped SGs with their size
- * @p_key: DMA address of the key
- * @p_iv: DMA address of the IV
- * @method: current algorithm for this request
- * @op_mode: op_mode for this request
- * @op_dir: direction (encrypt vs decrypt) for this request
- * @flow: the flow to use for this request
- * @ivlen: size of biv
- * @keylen: keylen for this request
- * @biv: buffer which contain the IV
+ * @t_src: list of mapped SGs with their size
+ * @t_dst: list of mapped SGs with their size
+ * @p_key: DMA address of the key
+ * @p_iv: DMA address of the IV
+ * @method: current algorithm for this request
+ * @op_mode: op_mode for this request
+ * @op_dir: direction (encrypt vs decrypt) for this request
+ * @flow: the flow to use for this request
+ * @ivlen: size of biv
+ * @keylen: keylen for this request
+ * @biv: buffer which contain the IV
+ * @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
struct sginfo t_src[MAX_SG];
@@ -159,6 +160,7 @@ struct sun8i_cipher_req_ctx {
unsigned int ivlen;
unsigned int keylen;
void *biv;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -174,7 +176,7 @@ struct sun8i_cipher_tfm_ctx {
u32 *key;
u32 keylen;
struct sun8i_ss_dev *ss;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
/*
diff --git a/drivers/crypto/amlogic/Kconfig b/drivers/crypto/amlogic/Kconfig
index cf9547602670..cf2c676a7093 100644
--- a/drivers/crypto/amlogic/Kconfig
+++ b/drivers/crypto/amlogic/Kconfig
@@ -1,7 +1,7 @@
config CRYPTO_DEV_AMLOGIC_GXL
tristate "Support for amlogic cryptographic offloader"
depends on HAS_IOMEM
- default y if ARCH_MESON
+ default m if ARCH_MESON
select CRYPTO_SKCIPHER
select CRYPTO_ENGINE
select CRYPTO_ECB
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 9819dd50fbad..5880b94dcb32 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -64,22 +64,20 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt;
-#endif
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm);
-#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
algt->stat_fb++;
#endif
- skcipher_request_set_sync_tfm(req, op->fallback_tfm);
- skcipher_request_set_callback(req, areq->base.flags, NULL, NULL);
- skcipher_request_set_crypt(req, areq->src, areq->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
areq->cryptlen, areq->iv);
+
if (rctx->op_dir == MESON_DECRYPT)
- err = crypto_skcipher_decrypt(req);
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
else
- err = crypto_skcipher_encrypt(req);
- skcipher_request_zero(req);
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
return err;
}
@@ -321,15 +319,16 @@ int meson_cipher_init(struct crypto_tfm *tfm)
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
op->mc = algt->mc;
- sktfm->reqsize = sizeof(struct meson_cipher_req_ctx);
-
- op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
name, PTR_ERR(op->fallback_tfm));
return PTR_ERR(op->fallback_tfm);
}
+ sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm);
+
op->enginectx.op.do_one_request = meson_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
@@ -345,7 +344,7 @@ void meson_cipher_exit(struct crypto_tfm *tfm)
memzero_explicit(op->key, op->keylen);
kfree(op->key);
}
- crypto_free_sync_skcipher(op->fallback_tfm);
+ crypto_free_skcipher(op->fallback_tfm);
}
int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
@@ -377,5 +376,5 @@ int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (!op->key)
return -ENOMEM;
- return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+ return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 411857fad8ba..466552acbbbb 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -54,7 +54,8 @@ static struct meson_alg_template mc_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
@@ -79,7 +80,8 @@ static struct meson_alg_template mc_algs[] = {
.cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct meson_cipher_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 0xf,
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
index b7f2de91ab76..dc0f142324a3 100644
--- a/drivers/crypto/amlogic/amlogic-gxl.h
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -109,6 +109,7 @@ struct meson_dev {
struct meson_cipher_req_ctx {
u32 op_dir;
int flow;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -126,7 +127,7 @@ struct meson_cipher_tfm_ctx {
u32 keylen;
u32 keymode;
struct meson_dev *mc;
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
};
/*
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 62ba0325a618..1a46eeddf082 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2630,7 +2630,8 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha1",
.cra_driver_name = "artpec-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2653,7 +2654,8 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "sha256",
.cra_driver_name = "artpec-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2677,7 +2679,8 @@ static struct ahash_alg hash_algos[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "artpec-hmac-sha256",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
.cra_alignmask = 3,
@@ -2696,7 +2699,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "artpec6-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2717,6 +2721,7 @@ static struct skcipher_alg crypto_algos[] = {
.cra_driver_name = "artpec6-ctr-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
@@ -2738,7 +2743,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "artpec6-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2759,7 +2765,8 @@ static struct skcipher_alg crypto_algos[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "artpec6-xts-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
.cra_alignmask = 3,
@@ -2790,6 +2797,7 @@ static struct aead_alg aead_algos[] = {
.cra_driver_name = "artpec-gcm-aes",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index a353217a0d33..8a7fa1ae1ade 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3233,7 +3233,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3256,7 +3258,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3279,7 +3283,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = AES_BLOCK_SIZE,
@@ -3302,7 +3308,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3325,7 +3333,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3348,7 +3358,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha224),cbc(des))",
.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3371,7 +3383,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3394,7 +3408,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha384),cbc(des))",
.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3417,7 +3433,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha512),cbc(des))",
.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES_BLOCK_SIZE,
@@ -3440,7 +3458,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3463,7 +3483,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3486,7 +3508,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3509,7 +3533,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3532,7 +3558,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3555,7 +3583,9 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY
},
.setkey = aead_authenc_setkey,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -3811,7 +3841,8 @@ static struct iproc_alg_s driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-iproc",
.cra_blocksize = MD5_BLOCK_WORDS * 4,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.cipher_info = {
@@ -4508,7 +4539,9 @@ static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
crypto->base.cra_priority = cipher_pri;
crypto->base.cra_alignmask = 0;
crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
- crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
crypto->init = skcipher_init_tfm;
crypto->exit = skcipher_exit_tfm;
@@ -4547,7 +4580,8 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
hash->halg.base.cra_init = ahash_cra_init;
hash->halg.base.cra_exit = generic_cra_exit;
- hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
@@ -4591,7 +4625,7 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_alignmask = 0;
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
- aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
+ aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
/* setkey set in alg initialization */
aead->setauthsize = aead_setauthsize;
aead->encrypt = aead_encrypt;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b2f9882bc010..91feda5b63f6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -810,12 +810,6 @@ static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
}
-static int arc4_skcipher_setkey(struct crypto_skcipher *skcipher,
- const u8 *key, unsigned int keylen)
-{
- return skcipher_setkey(skcipher, key, keylen, 0);
-}
-
static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
@@ -838,7 +832,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- dev_err(jrdev, "key size mismatch\n");
+ dev_dbg(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -1967,21 +1961,6 @@ static struct caam_skcipher_alg driver_algs[] = {
},
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
},
- {
- .skcipher = {
- .base = {
- .cra_name = "ecb(arc4)",
- .cra_driver_name = "ecb-arc4-caam",
- .cra_blocksize = ARC4_BLOCK_SIZE,
- },
- .setkey = arc4_skcipher_setkey,
- .encrypt = skcipher_encrypt,
- .decrypt = skcipher_decrypt,
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- },
- .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
- },
};
static struct caam_aead_alg driver_aeads[] = {
@@ -3433,7 +3412,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
@@ -3446,7 +3426,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_aead_init;
alg->exit = caam_aead_exit;
@@ -3457,7 +3438,6 @@ int caam_algapi_init(struct device *ctrldev)
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
- u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false, gcm_support;
@@ -3477,8 +3457,6 @@ int caam_algapi_init(struct device *ctrldev)
CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
- arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
- CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
@@ -3499,7 +3477,6 @@ int caam_algapi_init(struct device *ctrldev)
md_inst = mdha & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
- arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
@@ -3522,10 +3499,6 @@ int caam_algapi_init(struct device *ctrldev)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
- /* Skip ARC4 algorithms if not supported by device */
- if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
- continue;
-
/*
* Check support for AES modes not available
* on LP devices.
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 27e36bdf6163..bb1c0106a95c 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -728,7 +728,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
int ret = 0;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- dev_err(jrdev, "key size mismatch\n");
+ dev_dbg(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -2502,7 +2502,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;
@@ -2515,7 +2516,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_aead_init;
alg->exit = caam_aead_exit;
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 28669cbecf77..66ae1d581168 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1058,7 +1058,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- dev_err(dev, "key size mismatch\n");
+ dev_dbg(dev, "key size mismatch\n");
return -EINVAL;
}
@@ -2912,7 +2912,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init_skcipher;
alg->exit = caam_cra_exit;
@@ -2925,7 +2926,8 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init_aead;
alg->exit = caam_cra_exit_aead;
@@ -4004,7 +4006,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
struct dpaa2_sg_entry *sg_table;
- int ret;
+ int ret = -ENOMEM;
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
@@ -4017,7 +4019,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
DMA_TO_DEVICE);
if (!mapped_nents) {
dev_err(ctx->dev, "unable to DMA map source\n");
- return -ENOMEM;
+ return ret;
}
} else {
mapped_nents = 0;
@@ -4027,7 +4029,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
edesc = qi_cache_zalloc(GFP_DMA | flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
- return -ENOMEM;
+ return ret;
}
edesc->src_nents = src_nents;
@@ -4082,7 +4084,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
unmap:
ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
- return -ENOMEM;
+ return ret;
}
static int ahash_update_first(struct ahash_request *req)
@@ -4498,7 +4500,11 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct caam_hash_state));
- return ahash_set_sh_desc(ahash);
+ /*
+ * For keyed hash algorithms shared descriptors
+ * will be created later in setkey() callback
+ */
+ return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -4547,7 +4553,7 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
t_alg->dev = dev;
@@ -4697,6 +4703,13 @@ static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
{
struct device *dev = priv->dev;
struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int err;
+
+ if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
+ err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err)
+ dev_err(dev, "dpseci_reset() failed\n");
+ }
dpaa2_dpseci_congestion_free(priv);
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
@@ -4894,6 +4907,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+ if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
+ err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_reset() failed\n");
+ goto err_get_vers;
+ }
+ }
+
err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
&priv->dpseci_attr);
if (err) {
@@ -5221,7 +5242,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
dev_warn(dev, "%s hash alg allocation failed: %d\n",
- alg->driver_name, err);
+ alg->hmac_driver_name, err);
continue;
}
@@ -5384,6 +5405,7 @@ static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
},
{ .vendor = 0x0 }
};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
static struct fsl_mc_driver dpaa2_caam_driver = {
.driver = {
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 27ff4a3d037e..e8a6d8bc43b5 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1927,7 +1927,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 60e2a54c19f1..c3c22a8de4c0 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -43,7 +43,6 @@
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
-#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index f3d20b7645e0..94502f1d4b48 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -469,7 +469,7 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
* pipeline to a depth of 1 (from it's default of 4) to preclude this situation
* from occurring.
*/
-static void handle_imx6_err005766(u32 *mcr)
+static void handle_imx6_err005766(u32 __iomem *mcr)
{
if (of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6dl") ||
@@ -527,11 +527,21 @@ static const struct caam_imx_data caam_imx6ul_data = {
.num_clks = ARRAY_SIZE(caam_imx6ul_clks),
};
+static const struct clk_bulk_data caam_vf610_clks[] = {
+ { .id = "ipg" },
+};
+
+static const struct caam_imx_data caam_vf610_data = {
+ .clks = caam_vf610_clks,
+ .num_clks = ARRAY_SIZE(caam_vf610_clks),
+};
+
static const struct soc_device_attribute caam_imx_soc_table[] = {
{ .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
{ .soc_id = "i.MX6*", .data = &caam_imx6_data },
{ .soc_id = "i.MX7*", .data = &caam_imx7_data },
{ .soc_id = "i.MX8M*", .data = &caam_imx7_data },
+ { .soc_id = "VF*", .data = &caam_vf610_data },
{ .family = "Freescale i.MX" },
{ /* sentinel */ }
};
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
index 8a68531ded0b..039df6c5790c 100644
--- a/drivers/crypto/caam/dpseci.c
+++ b/drivers/crypto/caam/dpseci.c
@@ -104,6 +104,24 @@ int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
}
/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpseci_is_enabled() - Check if the DPSECI is enabled.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
index 4550e134d166..6dcd9be8144b 100644
--- a/drivers/crypto/caam/dpseci.h
+++ b/drivers/crypto/caam/dpseci.h
@@ -59,6 +59,8 @@ int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
int *en);
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
index 6ab77ead6e3d..71a007c85adb 100644
--- a/drivers/crypto/caam/dpseci_cmd.h
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -33,6 +33,7 @@
#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 17c6108b6d41..72db90176b1a 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -212,6 +212,9 @@ static const char * const rng_err_id_list[] = {
"Prediction resistance and test request",
"Uninstantiate",
"Secure key generation",
+ "",
+ "Hardware error",
+ "Continuous check"
};
static int report_ccb_status(struct device *jrdev, const u32 status,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4af22e7ceb4f..bf6b03b17251 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -339,8 +339,7 @@ EXPORT_SYMBOL(caam_jr_free);
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
* if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
* descriptor.
- * @dev: device of the job ring to be used. This device should have
- * been assigned prior by caam_jr_register().
+ * @dev: struct device of the job ring to be used
* @desc: points to a job descriptor that execute our request. All
* descriptors (and all referenced data) must be in a DMAable
* region, and all data references must be physical addresses
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 0f810bc13b2b..af61f3a2c0d4 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -173,9 +173,14 @@ static inline u64 rd_reg64(void __iomem *reg)
static inline u64 cpu_to_caam_dma64(dma_addr_t value)
{
- if (caam_imx)
- return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
- (u64)cpu_to_caam32(upper_32_bits(value)));
+ if (caam_imx) {
+ u64 ret_val = (u64)cpu_to_caam32(lower_32_bits(value)) << 32;
+
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ ret_val |= (u64)cpu_to_caam32(upper_32_bits(value));
+
+ return ret_val;
+ }
return cpu_to_caam64(value);
}
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 1be1adffff1d..5af0dc2a8909 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -99,10 +99,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
struct fc_context *fctx = &rctx->fctx;
- u64 *offset_control = &rctx->control_word;
u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
struct cpt_request_info *req_info = &rctx->cpt_req;
- u64 *ctrl_flags = NULL;
+ __be64 *ctrl_flags = NULL;
+ __be64 *offset_control;
req_info->ctrl.s.grp = 0;
req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
@@ -126,9 +126,10 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
else
memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
- ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
- *ctrl_flags = cpu_to_be64(*ctrl_flags);
+ ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags;
+ *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
+ offset_control = (__be64 *)&rctx->control_word;
*offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
/* Storing Packet Data Information in offset
* Control Word First 8 bytes
@@ -200,6 +201,7 @@ static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
int status;
memset(req_info, 0, sizeof(struct cpt_request_info));
+ req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
memset(fctx, 0, sizeof(struct fc_context));
create_input_list(req, enc, enc_iv_len);
create_output_list(req, enc_iv_len);
@@ -339,7 +341,8 @@ static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
}
static struct skcipher_alg algs[] = { {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -356,7 +359,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -373,7 +377,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -389,7 +394,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
.base.cra_alignmask = 7,
@@ -406,7 +412,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
.base.cra_alignmask = 7,
@@ -423,7 +430,8 @@ static struct skcipher_alg algs[] = { {
.decrypt = cvm_decrypt,
.init = cvm_enc_dec_init,
}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
.base.cra_alignmask = 7,
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 7a24019356b5..3878b01e19e1 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -4,6 +4,7 @@
*/
#include "cptvf.h"
+#include "cptvf_algs.h"
#include "request_manager.h"
/**
@@ -133,7 +134,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup gather (input) components */
g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
- info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
+ info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->gather_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -150,7 +151,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Setup scatter (output) components */
s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
- info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
+ info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->scatter_components) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -167,17 +168,16 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
/* Create and initialize DPTR */
info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
+ info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->in_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
}
- ((u16 *)info->in_buffer)[0] = req->outcnt;
- ((u16 *)info->in_buffer)[1] = req->incnt;
- ((u16 *)info->in_buffer)[2] = 0;
- ((u16 *)info->in_buffer)[3] = 0;
- *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
+ ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
+ ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
+ ((__be16 *)info->in_buffer)[2] = 0;
+ ((__be16 *)info->in_buffer)[3] = 0;
memcpy(&info->in_buffer[8], info->gather_components,
g_sz_bytes);
@@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
}
/* Create and initialize RPTR */
- info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
+ info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!info->out_buffer) {
ret = -ENOMEM;
goto scatter_gather_clean;
@@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
struct cpt_vq_command vq_cmd;
union cpt_inst_s cptinst;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
return -ENOMEM;
@@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
* Get buffer for union cpt_res_s response
* structure and its physical address
*/
- info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
+ info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
ret = -ENOMEM;
@@ -470,8 +470,6 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
- /* 64-bit swap for microcode data reads, not needed for addresses*/
- vq_cmd.cmd.u64 = cpu_to_be64(vq_cmd.cmd.u64);
vq_cmd.dptr = info->dptr_baddr;
vq_cmd.rptr = info->rptr_baddr;
vq_cmd.cptr.u64 = 0;
diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h
index 3514b082eca7..8d40e4ba3af1 100644
--- a/drivers/crypto/cavium/cpt/request_manager.h
+++ b/drivers/crypto/cavium/cpt/request_manager.h
@@ -62,6 +62,8 @@ struct cpt_request_info {
union ctrl_info ctrl; /* User control information */
struct cptvf_request req; /* Request Information (Core specific) */
+ bool may_sleep;
+
struct buf_ptr in[MAX_BUF_CNT];
struct buf_ptr out[MAX_BUF_CNT];
@@ -73,16 +75,16 @@ struct sglist_component {
union {
u64 len;
struct {
- u16 len0;
- u16 len1;
- u16 len2;
- u16 len3;
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
} s;
} u;
- u64 ptr0;
- u64 ptr1;
- u64 ptr2;
- u64 ptr3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
};
struct cpt_info_buffer {
@@ -112,10 +114,10 @@ struct cpt_info_buffer {
union vq_cmd_word0 {
u64 u64;
struct {
- u16 opcode;
- u16 param1;
- u16 param2;
- u16 dlen;
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
} s;
};
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index dce5423a5883..1be2571363fe 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -522,7 +522,7 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_name = "gcm(aes)",
.cra_driver_name = "n5_aes_gcm",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -541,7 +541,7 @@ static struct aead_alg nitrox_aeads[] = { {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "n5_rfc4106",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 18088b0a2257..a553ac65f324 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -388,7 +388,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cbc(aes)",
.cra_driver_name = "n5_cbc(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -407,7 +407,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "ecb(aes)",
.cra_driver_name = "n5_ecb(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -426,7 +426,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cfb(aes)",
.cra_driver_name = "n5_cfb(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -445,7 +445,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "xts(aes)",
.cra_driver_name = "n5_xts(aes)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -464,7 +464,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "n5_rfc3686(ctr(aes))",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -483,7 +483,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cts(cbc(aes))",
.cra_driver_name = "n5_cts(cbc(aes))",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -502,7 +502,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "n5_cbc(des3_ede)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
@@ -521,7 +521,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "n5_ecb(des3_ede)",
.cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 5eba7ee49e81..11a305fa19e6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -378,6 +378,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
base->cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = AES_BLOCK_SIZE;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 9e8f07c1afac..1c1c939f5c39 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -172,6 +172,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 04b2517df955..6849261ca47d 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -98,7 +98,7 @@ static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
+ return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct skcipher_request *req,
@@ -145,20 +145,19 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req,
(ctx->u.aes.key_len != AES_KEYSIZE_256))
fallback = 1;
if (fallback) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
- ctx->u.aes.tfm_skcipher);
-
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- ret = encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+ skcipher_request_set_tfm(&rctx->fallback_req,
+ ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
@@ -198,13 +197,12 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req)
static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
- CRYPTO_ALG_ASYNC |
+ fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
pr_warn("could not load fallback driver xts(aes)\n");
@@ -212,7 +210,8 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
}
ctx->u.aes.tfm_skcipher = fallback_tfm;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) +
+ crypto_skcipher_reqsize(fallback_tfm));
return 0;
}
@@ -221,7 +220,7 @@ static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
+ crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
@@ -243,6 +242,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
alg->base.cra_blocksize = AES_BLOCK_SIZE;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 51e12fbd1159..e6dcd8cedd53 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -212,6 +212,7 @@ static const struct skcipher_alg ccp_aes_defaults = {
.init = ccp_aes_init_tfm,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
@@ -229,6 +230,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
.init = ccp_aes_rfc3686_init_tfm,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index 9c129defdb50..ec97daf0fcb7 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -136,6 +136,7 @@ static const struct skcipher_alg ccp_des3_defaults = {
.init = ccp_des3_init_tfm,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b0cc2bd73af8..8fbfdb9e8cd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -19,6 +19,7 @@
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
@@ -424,7 +425,7 @@ static int ccp_register_hmac_alg(struct list_head *head,
*ccp_alg = *base_alg;
INIT_LIST_HEAD(&ccp_alg->entry);
- strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
+ strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
alg = &ccp_alg->alg;
alg->setkey = ccp_sha_setkey;
@@ -486,6 +487,7 @@ static int ccp_register_sha_alg(struct list_head *head,
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
def->drv_name);
base->cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK;
base->cra_blocksize = def->block_size;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 90a009e6b5c1..aed3d2192d01 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -89,7 +89,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_sync_skcipher *tfm_skcipher;
+ struct crypto_skcipher *tfm_skcipher;
enum ccp_engine engine;
enum ccp_aes_type type;
@@ -121,6 +121,8 @@ struct ccp_aes_req_ctx {
u8 rfc3686_iv[AES_BLOCK_SIZE];
struct ccp_cmd cmd;
+
+ struct skcipher_request fallback_req; // keep at the end
};
struct ccp_aes_cmac_req_ctx {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 82ac4c14c04c..7838f63bab32 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -221,8 +221,8 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
static int ccp5_do_cmd(struct ccp5_desc *desc,
struct ccp_cmd_queue *cmd_q)
{
- u32 *mP;
- __le32 *dP;
+ __le32 *mP;
+ u32 *dP;
u32 tail;
int i;
int ret = 0;
@@ -235,8 +235,8 @@ static int ccp5_do_cmd(struct ccp5_desc *desc,
}
mutex_lock(&cmd_q->q_mutex);
- mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
- dP = (__le32 *) desc;
+ mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx];
+ dP = (u32 *)desc;
for (i = 0; i < 8; i++)
mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 19ac509ed76e..0971ee60f840 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -531,7 +531,6 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return len;
}
-#ifdef CONFIG_PM
bool ccp_queues_suspended(struct ccp_device *ccp)
{
unsigned int suspended = 0;
@@ -549,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+int ccp_dev_suspend(struct sp_device *sp)
{
struct ccp_device *ccp = sp->ccp_data;
unsigned long flags;
@@ -601,7 +600,6 @@ int ccp_dev_resume(struct sp_device *sp)
return 0;
}
-#endif
int ccp_dev_init(struct sp_device *sp)
{
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 3f68262d9ab4..a5d9123a22ea 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -469,6 +469,7 @@ struct ccp_sg_workarea {
unsigned int sg_used;
struct scatterlist *dma_sg;
+ struct scatterlist *dma_sg_head;
struct device *dma_dev;
unsigned int dma_count;
enum dma_data_direction dma_dir;
@@ -596,8 +597,8 @@ struct dword3 {
};
union dword4 {
- __le32 dst_lo; /* NON-SHA */
- __le32 sha_len_lo; /* SHA */
+ u32 dst_lo; /* NON-SHA */
+ u32 sha_len_lo; /* SHA */
};
union dword5 {
@@ -607,7 +608,7 @@ union dword5 {
unsigned int rsvd1:13;
unsigned int fixed:1;
} fields;
- __le32 sha_len_hi;
+ u32 sha_len_hi;
};
struct dword7 {
@@ -618,12 +619,12 @@ struct dword7 {
struct ccp5_desc {
struct dword0 dw0;
- __le32 length;
- __le32 src_lo;
+ u32 length;
+ u32 src_lo;
struct dword3 dw3;
union dword4 dw4;
union dword5 dw5;
- __le32 key_lo;
+ u32 key_lo;
struct dword7 dw7;
};
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 422193690fd4..bd270e66185e 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
static void ccp_sg_free(struct ccp_sg_workarea *wa)
{
if (wa->dma_count)
- dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
+ dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
wa->dma_count = 0;
}
@@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
return 0;
wa->dma_sg = sg;
+ wa->dma_sg_head = sg;
wa->dma_dev = dev;
wa->dma_dir = dma_dir;
wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
@@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
{
unsigned int nbytes = min_t(u64, len, wa->bytes_left);
+ unsigned int sg_combined_len = 0;
if (!wa->sg)
return;
wa->sg_used += nbytes;
wa->bytes_left -= nbytes;
- if (wa->sg_used == wa->sg->length) {
- wa->sg = sg_next(wa->sg);
+ if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
+ /* Advance to the next DMA scatterlist entry */
+ wa->dma_sg = sg_next(wa->dma_sg);
+
+ /* In the case that the DMA mapped scatterlist has entries
+ * that have been merged, the non-DMA mapped scatterlist
+ * must be advanced multiple times for each merged entry.
+ * This ensures that the current non-DMA mapped entry
+ * corresponds to the current DMA mapped entry.
+ */
+ do {
+ sg_combined_len += wa->sg->length;
+ wa->sg = sg_next(wa->sg);
+ } while (wa->sg_used > sg_combined_len);
+
wa->sg_used = 0;
}
}
@@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
/* Update the structures and generate the count */
buf_count = 0;
while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
- nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
+ nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
dm_wa->length - buf_count);
nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
@@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
* and destination. The resulting len values will always be <= UINT_MAX
* because the dma length is an unsigned int.
*/
- sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
+ sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
if (dst) {
- sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
+ sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
op_len = min(sg_src_len, sg_dst_len);
} else {
@@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough data in the sg element, but we need to
* adjust for any previously copied data
*/
- op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
+ op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
op->src.u.dma.offset = src->sg_wa.sg_used;
op->src.u.dma.length = op_len & ~(block_size - 1);
@@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
/* Enough room in the sg element, but we need to
* adjust for any previously used area
*/
- op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
+ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
op->dst.u.dma.offset = dst->sg_wa.sg_used;
op->dst.u.dma.length = op->src.u.dma.length;
}
@@ -617,13 +632,12 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct ccp_data src, dst;
struct ccp_data aad;
struct ccp_op op;
-
- unsigned long long *final;
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
unsigned int ilen;
bool in_place = true; /* Default value */
+ __be64 *final;
int ret;
struct scatterlist *p_inp, sg_inp[2];
@@ -825,7 +839,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
- final = (unsigned long long *) final_wa.address;
+ final = (__be64 *)final_wa.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
@@ -1308,7 +1322,6 @@ ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
return -EINVAL;
}
- ret = -EIO;
/* Zero out all the fields of the command desc */
memset(&op, 0, sizeof(op));
@@ -2028,7 +2041,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.sg_wa.sg_used = 0;
for (i = 1; i <= src.sg_wa.dma_count; i++) {
if (!dst.sg_wa.sg ||
- (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
+ (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
ret = -EINVAL;
goto e_dst;
}
@@ -2054,8 +2067,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}
- dst.sg_wa.sg_used += src.sg_wa.sg->length;
- if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
+ dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
+ if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
dst.sg_wa.sg_used = 0;
}
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index ce42675d3274..6284a15e5047 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -211,13 +211,12 @@ void sp_destroy(struct sp_device *sp)
sp_del_device(sp);
}
-#ifdef CONFIG_PM
-int sp_suspend(struct sp_device *sp, pm_message_t state)
+int sp_suspend(struct sp_device *sp)
{
int ret;
if (sp->dev_vdata->ccp_vdata) {
- ret = ccp_dev_suspend(sp, state);
+ ret = ccp_dev_suspend(sp);
if (ret)
return ret;
}
@@ -237,7 +236,6 @@ int sp_resume(struct sp_device *sp)
return 0;
}
-#endif
struct sp_device *sp_get_psp_master_device(void)
{
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index f913f1494af9..0218d0670eee 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -119,7 +119,7 @@ int sp_init(struct sp_device *sp);
void sp_destroy(struct sp_device *sp);
struct sp_device *sp_get_master(void);
-int sp_suspend(struct sp_device *sp, pm_message_t state);
+int sp_suspend(struct sp_device *sp);
int sp_resume(struct sp_device *sp);
int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
const char *name, void *data);
@@ -134,7 +134,7 @@ struct sp_device *sp_get_psp_master_device(void);
int ccp_dev_init(struct sp_device *sp);
void ccp_dev_destroy(struct sp_device *sp);
-int ccp_dev_suspend(struct sp_device *sp, pm_message_t state);
+int ccp_dev_suspend(struct sp_device *sp);
int ccp_dev_resume(struct sp_device *sp);
#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
@@ -145,7 +145,7 @@ static inline int ccp_dev_init(struct sp_device *sp)
}
static inline void ccp_dev_destroy(struct sp_device *sp) { }
-static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+static inline int ccp_dev_suspend(struct sp_device *sp)
{
return 0;
}
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index cb6cb47053f4..f471dbaef1fb 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -252,23 +252,19 @@ static void sp_pci_remove(struct pci_dev *pdev)
sp_free_irqs(sp);
}
-#ifdef CONFIG_PM
-static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused sp_pci_suspend(struct device *dev)
{
- struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
- return sp_suspend(sp, state);
+ return sp_suspend(sp);
}
-static int sp_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused sp_pci_resume(struct device *dev)
{
- struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
return sp_resume(sp);
}
-#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
static const struct sev_vdata sevv1 = {
@@ -365,15 +361,14 @@ static const struct pci_device_id sp_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, sp_pci_table);
+static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume);
+
static struct pci_driver sp_pci_driver = {
.name = "ccp",
.id_table = sp_pci_table,
.probe = sp_pci_probe,
.remove = sp_pci_remove,
-#ifdef CONFIG_PM
- .suspend = sp_pci_suspend,
- .resume = sp_pci_resume,
-#endif
+ .driver.pm = &sp_pci_pm_ops,
};
int sp_pci_init(void)
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 831aac1393a2..9dba52fbee99 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -207,7 +207,7 @@ static int sp_platform_suspend(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
- return sp_suspend(sp, state);
+ return sp_suspend(sp);
}
static int sp_platform_resume(struct platform_device *pdev)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 872ea3ff1c6b..076669dc1035 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -45,7 +45,6 @@ enum cc_key_type {
struct cc_cipher_ctx {
struct cc_drvdata *drvdata;
int keylen;
- int key_round_number;
int cipher_mode;
int flow_mode;
unsigned int flags;
@@ -56,6 +55,8 @@ struct cc_cipher_ctx {
struct cc_cpp_key_info cpp;
};
struct crypto_shash *shash_tfm;
+ struct crypto_skcipher *fallback_tfm;
+ bool fallback_on;
};
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
@@ -75,7 +76,6 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
- ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
return 0;
break;
@@ -159,22 +159,49 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
skcipher_alg.base);
struct device *dev = drvdata_to_dev(cc_alg->drvdata);
unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
- int rc = 0;
+ unsigned int fallback_req_size = 0;
dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
crypto_tfm_alg_name(tfm));
- crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
- sizeof(struct cipher_req_ctx));
-
ctx_p->cipher_mode = cc_alg->cipher_mode;
ctx_p->flow_mode = cc_alg->flow_mode;
ctx_p->drvdata = cc_alg->drvdata;
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ const char *name = crypto_tfm_alg_name(tfm);
+
+ /* Alloc hash tfm for essiv */
+ ctx_p->shash_tfm = crypto_alloc_shash("sha256", 0, 0);
+ if (IS_ERR(ctx_p->shash_tfm)) {
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+ return PTR_ERR(ctx_p->shash_tfm);
+ }
+ max_key_buf_size <<= 1;
+
+ /* Alloc fallabck tfm or essiv when key size != 256 bit */
+ ctx_p->fallback_tfm =
+ crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
+
+ if (IS_ERR(ctx_p->fallback_tfm)) {
+ /* Note we're still allowing registration with no fallback since it's
+ * better to have most modes supported than none at all.
+ */
+ dev_warn(dev, "Error allocating fallback algo %s. Some modes may be available.\n",
+ name);
+ ctx_p->fallback_tfm = NULL;
+ } else {
+ fallback_req_size = crypto_skcipher_reqsize(ctx_p->fallback_tfm);
+ }
+ }
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct cipher_req_ctx) + fallback_req_size);
+
/* Allocate key buffer, cache line aligned */
- ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+ ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL);
if (!ctx_p->user.key)
- return -ENOMEM;
+ goto free_fallback;
dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
ctx_p->user.key);
@@ -186,21 +213,20 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
max_key_buf_size, ctx_p->user.key);
- return -ENOMEM;
+ goto free_key;
}
dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
- if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
- /* Alloc hash tfm for essiv */
- ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
- if (IS_ERR(ctx_p->shash_tfm)) {
- dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
- return PTR_ERR(ctx_p->shash_tfm);
- }
- }
+ return 0;
- return rc;
+free_key:
+ kfree(ctx_p->user.key);
+free_fallback:
+ crypto_free_skcipher(ctx_p->fallback_tfm);
+ crypto_free_shash(ctx_p->shash_tfm);
+
+ return -ENOMEM;
}
static void cc_cipher_exit(struct crypto_tfm *tfm)
@@ -220,6 +246,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm)
/* Free hash tfm for essiv */
crypto_free_shash(ctx_p->shash_tfm);
ctx_p->shash_tfm = NULL;
+ crypto_free_skcipher(ctx_p->fallback_tfm);
+ ctx_p->fallback_tfm = NULL;
}
/* Unmap key buffer */
@@ -303,6 +331,7 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
}
ctx_p->keylen = keylen;
+ ctx_p->fallback_on = false;
switch (cc_slot_to_key_type(hki.hw_key1)) {
case CC_HW_PROTECTED_KEY:
@@ -388,10 +417,33 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
if (validate_keys_sizes(ctx_p, keylen)) {
- dev_dbg(dev, "Unsupported key size %d.\n", keylen);
+ dev_dbg(dev, "Invalid key size %d.\n", keylen);
return -EINVAL;
}
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+
+ /* We only support 256 bit ESSIV-CBC-AES keys */
+ if (keylen != AES_KEYSIZE_256) {
+ unsigned int flags = crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_MASK;
+
+ if (likely(ctx_p->fallback_tfm)) {
+ ctx_p->fallback_on = true;
+ crypto_skcipher_clear_flags(ctx_p->fallback_tfm,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(ctx_p->fallback_tfm, flags);
+ return crypto_skcipher_setkey(ctx_p->fallback_tfm, key, keylen);
+ }
+
+ dev_dbg(dev, "Unsupported key size %d and no fallback.\n", keylen);
+ return -EINVAL;
+ }
+
+ /* Internal ESSIV key buffer is double sized */
+ max_key_buf_size <<= 1;
+ }
+
+ ctx_p->fallback_on = false;
ctx_p->key_type = CC_UNPROTECTED_KEY;
/*
@@ -419,21 +471,20 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
max_key_buf_size, DMA_TO_DEVICE);
memcpy(ctx_p->user.key, key, keylen);
- if (keylen == 24)
- memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* sha256 for key2 - use sw implementation */
- int key_len = keylen >> 1;
int err;
err = crypto_shash_tfm_digest(ctx_p->shash_tfm,
- ctx_p->user.key, key_len,
- ctx_p->user.key + key_len);
+ ctx_p->user.key, keylen,
+ ctx_p->user.key + keylen);
if (err) {
dev_err(dev, "Failed to hash ESSIV key.\n");
return err;
}
+
+ keylen <<= 1;
}
dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
@@ -571,9 +622,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
- unsigned int key_len = ctx_p->keylen;
+ unsigned int key_len = (ctx_p->keylen / 2);
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
+ unsigned int key_offset = key_len;
struct cc_crypto_alg *cc_alg =
container_of(tfm->__crt_alg, struct cc_crypto_alg,
@@ -593,6 +645,10 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
+
+ if (cipher_mode == DRV_CIPHER_ESSIV)
+ key_len = SHA256_DIGEST_SIZE;
+
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -602,12 +658,12 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
ctx_p->hw.key2_slot);
} else {
set_din_type(&desc[*seq_size], DMA_DLLI,
- (key_dma_addr + (key_len / 2)),
- (key_len / 2), NS_BIT);
+ (key_dma_addr + key_offset),
+ key_len, NS_BIT);
}
set_xex_data_unit_size(&desc[*seq_size], du_size);
set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_key_size_aes(&desc[*seq_size], key_len);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
@@ -616,7 +672,7 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_key_size_aes(&desc[*seq_size], key_len);
set_flow_mode(&desc[*seq_size], flow_mode);
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
CC_AES_BLOCK_SIZE, NS_BIT);
@@ -867,6 +923,17 @@ static int cc_cipher_process(struct skcipher_request *req,
goto exit_process;
}
+ if (ctx_p->fallback_on) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, ctx_p->fallback_tfm);
+ if (direction == DRV_CRYPTO_DIRECTION_ENCRYPT)
+ return crypto_skcipher_encrypt(subreq);
+ else
+ return crypto_skcipher_decrypt(subreq);
+ }
+
/* The IV we are handed may be allocted from the stack so
* we must copy it to a DMAable buffer before use.
*/
@@ -1010,7 +1077,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv(paes)",
+ .name = "essiv(cbc(paes),sha256)",
.driver_name = "essiv-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
@@ -1028,7 +1095,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv512(paes)",
+ .name = "essiv512(cbc(paes),sha256)",
.driver_name = "essiv-paes-du512-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
@@ -1047,7 +1114,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "essiv4096(paes)",
+ .name = "essiv4096(cbc(paes),sha256)",
.driver_name = "essiv-paes-du4096-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
@@ -1269,15 +1336,15 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv(aes)",
+ .name = "essiv(cbc(aes),sha256)",
.driver_name = "essiv-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
.decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cipher_mode = DRV_CIPHER_ESSIV,
@@ -1286,15 +1353,15 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv512(aes)",
+ .name = "essiv512(cbc(aes),sha256)",
.driver_name = "essiv-aes-du512-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
.decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cipher_mode = DRV_CIPHER_ESSIV,
@@ -1304,15 +1371,15 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "essiv4096(aes)",
+ .name = "essiv4096(cbc(aes),sha256)",
.driver_name = "essiv-aes-du4096-ccree",
.blocksize = AES_BLOCK_SIZE,
.template_skcipher = {
.setkey = cc_cipher_setkey,
.encrypt = cc_cipher_encrypt,
.decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE * 2,
- .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.cipher_mode = DRV_CIPHER_ESSIV,
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 4c2553672b6f..13b908ea4873 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -690,26 +690,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
return min(srclen, dstlen);
}
-static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
- u32 flags,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int nbytes,
+static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+ struct skcipher_request *req,
u8 *iv,
unsigned short op_type)
{
+ struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
int err;
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
-
- skcipher_request_set_sync_tfm(subreq, cipher);
- skcipher_request_set_callback(subreq, flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, src, dst,
- nbytes, iv);
+ skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
+ skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
+ req->cryptlen, iv);
- err = op_type ? crypto_skcipher_decrypt(subreq) :
- crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
+ err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
+ crypto_skcipher_encrypt(&reqctx->fallback_req);
return err;
@@ -924,11 +920,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
{
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
- crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
+ crypto_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
+ crypto_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+ return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
}
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
@@ -1206,13 +1202,8 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
req);
memcpy(req->iv, reqctx->init_iv, IV);
atomic_inc(&adap->chcr_stats.fallback);
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
- req->base.flags,
- req->src,
- req->dst,
- req->cryptlen,
- req->iv,
- reqctx->op);
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
+ reqctx->op);
goto complete;
}
@@ -1224,7 +1215,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam);
if (IS_ERR(skb)) {
- pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
+ pr_err("%s : Failed to form WR. No memory\n", __func__);
err = PTR_ERR(skb);
goto unmap;
}
@@ -1341,11 +1332,7 @@ static int process_cipher(struct skcipher_request *req,
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req);
fallback: atomic_inc(&adap->chcr_stats.fallback);
- err = chcr_cipher_fallback(ablkctx->sw_cipher,
- req->base.flags,
- req->src,
- req->dst,
- req->cryptlen,
+ err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
subtype ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
reqctx->iv : req->iv,
@@ -1486,14 +1473,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
+ ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
init_completion(&ctx->cbc_aes_aio_done);
- crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
return chcr_device_init(ctx);
}
@@ -1507,13 +1495,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response
*/
- ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
+ ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher);
}
- crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
+ crypto_skcipher_reqsize(ablkctx->sw_cipher));
return chcr_device_init(ctx);
}
@@ -1523,7 +1512,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm)
struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_sync_skcipher(ablkctx->sw_cipher);
+ crypto_free_skcipher(ablkctx->sw_cipher);
}
static int get_alg_config(struct algo_param *params,
@@ -1556,7 +1545,7 @@ static int get_alg_config(struct algo_param *params,
params->result_size = SHA512_DIGEST_SIZE;
break;
default:
- pr_err("chcr : ERROR, unsupported digest size\n");
+ pr_err("ERROR, unsupported digest size\n");
return -EINVAL;
}
return 0;
@@ -3571,7 +3560,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
goto out;
if (get_alg_config(&param, max_authsize)) {
- pr_err("chcr : Unsupported digest size\n");
+ pr_err("Unsupported digest size\n");
goto out;
}
subtype = get_aead_subtype(authenc);
@@ -3590,7 +3579,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("Unsupported cipher key\n");
goto out;
}
@@ -3608,10 +3597,8 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
}
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
- pr_err("chcr : Base driver cannot be loaded\n");
- aeadctx->enckey_len = 0;
- memzero_explicit(&keys, sizeof(keys));
- return -EINVAL;
+ pr_err("Base driver cannot be loaded\n");
+ goto out;
}
{
SHASH_DESC_ON_STACK(shash, base_hash);
@@ -3626,7 +3613,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
keys.authkeylen,
o_ptr);
if (err) {
- pr_err("chcr : Base driver cannot be loaded\n");
+ pr_err("Base driver cannot be loaded\n");
goto out;
}
keys.authkeylen = max_authsize;
@@ -3711,7 +3698,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
+ pr_err("Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
@@ -3747,7 +3734,7 @@ static int chcr_aead_op(struct aead_request *req,
cdev = a_ctx(tfm)->dev;
if (!cdev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
+ pr_err("%s : No crypto device.\n", __func__);
return -ENXIO;
}
@@ -4445,6 +4432,7 @@ static int chcr_register_alg(void)
driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
driver_algs[i].alg.skcipher.base.cra_flags =
CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK;
driver_algs[i].alg.skcipher.base.cra_ctxsize =
sizeof(struct chcr_context) +
@@ -4456,7 +4444,8 @@ static int chcr_register_alg(void)
break;
case CRYPTO_ALG_TYPE_AEAD:
driver_algs[i].alg.aead.base.cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
driver_algs[i].alg.aead.init = chcr_aead_cra_init;
@@ -4476,7 +4465,8 @@ static int chcr_register_alg(void)
a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
a_hash->halg.base.cra_module = THIS_MODULE;
- a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
+ a_hash->halg.base.cra_flags =
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
a_hash->halg.base.cra_alignmask = 0;
a_hash->halg.base.cra_exit = NULL;
@@ -4497,8 +4487,7 @@ static int chcr_register_alg(void)
break;
}
if (err) {
- pr_err("chcr : %s : Algorithm registration failed\n",
- name);
+ pr_err("%s : Algorithm registration failed\n", name);
goto register_err;
} else {
driver_algs[i].is_registered = 1;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 31e427e273f8..e89f9e0094b4 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -171,7 +171,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
}
struct ablk_ctx {
- struct crypto_sync_skcipher *sw_cipher;
+ struct crypto_skcipher *sw_cipher;
__be32 key_ctx_hdr;
unsigned int enckey_len;
unsigned char ciph_mode;
@@ -305,6 +305,7 @@ struct chcr_skcipher_req_ctx {
u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
u16 txqidx;
u16 rxqidx;
+ struct skcipher_request fallback_req; // keep at the end
};
struct chcr_alg_template {
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index a3ee127a70e3..b135c74fb619 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -12,7 +12,6 @@
#include <linux/topology.h>
#include "hpre.h"
-#define HPRE_VF_NUM 63
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -46,9 +45,9 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE 0x3f
+#define HPRE_HAC_RAS_CE_ENABLE 0x1
#define HPRE_RAS_NFE_ENB 0x301414
-#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0
+#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
#define HPRE_HAC_RAS_FE_ENABLE 0
@@ -83,6 +82,10 @@
#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
+#define HPRE_QM_BME_FLR BIT(7)
+#define HPRE_QM_PM_FLR BIT(11)
+#define HPRE_QM_SRIOV_FLR BIT(12)
+
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
@@ -231,6 +234,22 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0;
}
+/*
+ * For Hi1620, we shoul disable FLR triggered by hardware (BME/PM/SRIOV).
+ * Or it may stay in D3 state when we bind and unbind hpre quickly,
+ * as it does FLR triggered by hardware.
+ */
+static void disable_flr_of_bme(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
+ val |= HPRE_QM_PM_FLR;
+ writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
+ writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
+}
+
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -242,10 +261,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
- /* disable FLR triggered by BME(bus master enable) */
- writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
- writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
-
/* HPRE need more time, we close this interrupt */
val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
@@ -264,7 +279,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
- val & BIT(0),
+ val & BIT(0),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret) {
@@ -296,6 +311,8 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
if (ret)
dev_err(dev, "acpi_evaluate_dsm err.\n");
+ disable_flr_of_bme(qm);
+
return ret;
}
@@ -372,7 +389,6 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
-
if (val > num_vfs)
return -EINVAL;
@@ -449,7 +465,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos)
+ size_t count, loff_t *pos)
{
struct hpre_debugfs_file *file = filp->private_data;
char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
@@ -477,7 +493,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
}
static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+ size_t count, loff_t *pos)
{
struct hpre_debugfs_file *file = filp->private_data;
char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
@@ -548,13 +564,15 @@ static int hpre_debugfs_atomic64_get(void *data, u64 *val)
static int hpre_debugfs_atomic64_set(void *data, u64 val)
{
struct hpre_dfx *dfx_item = data;
- struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+ struct hpre_dfx *hpre_dfx = NULL;
- if (val)
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
+ hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ } else if (val) {
return -EINVAL;
+ }
- if (dfx_item->type == HPRE_OVERTIME_THRHLD)
- atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
atomic64_set(&dfx_item->value, val);
return 0;
@@ -563,15 +581,17 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
hpre_debugfs_atomic64_set, "%llu\n");
-static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
+static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
+ struct hpre *hpre = container_of(qm, struct hpre, qm);
+ struct hpre_debug *dbg = &hpre->debug;
struct dentry *file_dir;
if (dir)
file_dir = dir;
else
- file_dir = dbg->debug_root;
+ file_dir = qm->debug.debug_root;
if (type >= HPRE_DEBUG_FILE_NUM)
return -EINVAL;
@@ -586,10 +606,8 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
return 0;
}
-static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
+static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
{
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
@@ -601,14 +619,12 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
return 0;
}
-static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
+static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
@@ -619,7 +635,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -630,7 +646,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->base = qm->io_base + hpre_cluster_offsets[i];
debugfs_create_regset32("regs", 0444, tmp_d, regset);
- ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
+ ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
return ret;
@@ -639,32 +655,31 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
return 0;
}
-static int hpre_ctrl_debug_init(struct hpre_debug *debug)
+static int hpre_ctrl_debug_init(struct hisi_qm *qm)
{
int ret;
- ret = hpre_create_debugfs_file(debug, NULL, HPRE_CURRENT_QM,
+ ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
HPRE_CURRENT_QM);
if (ret)
return ret;
- ret = hpre_create_debugfs_file(debug, NULL, HPRE_CLEAR_ENABLE,
+ ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
HPRE_CLEAR_ENABLE);
if (ret)
return ret;
- ret = hpre_pf_comm_regs_debugfs_init(debug);
+ ret = hpre_pf_comm_regs_debugfs_init(qm);
if (ret)
return ret;
- return hpre_cluster_debugfs_init(debug);
+ return hpre_cluster_debugfs_init(qm);
}
-static void hpre_dfx_debug_init(struct hpre_debug *debug)
+static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
- struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre *hpre = container_of(qm, struct hpre, qm);
struct hpre_dfx *dfx = hpre->debug.dfx;
- struct hisi_qm *qm = &hpre->qm;
struct dentry *parent;
int i;
@@ -676,30 +691,27 @@ static void hpre_dfx_debug_init(struct hpre_debug *debug)
}
}
-static int hpre_debugfs_init(struct hpre *hpre)
+static int hpre_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
- struct dentry *dir;
int ret;
- dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- qm->debug.debug_root = dir;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
+
qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
-
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
- hpre->debug.debug_root = dir;
- ret = hpre_ctrl_debug_init(&hpre->debug);
+ ret = hpre_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
- hpre_dfx_debug_init(&hpre->debug);
+ hpre_dfx_debug_init(qm);
return 0;
@@ -708,10 +720,8 @@ failed_to_create:
return ret;
}
-static void hpre_debugfs_exit(struct hpre *hpre)
+static void hpre_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
}
@@ -732,6 +742,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF) {
qm->qp_base = HPRE_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &hpre_devices;
}
@@ -849,7 +860,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_with_err_init;
- ret = hpre_debugfs_init(hpre);
+ ret = hpre_debugfs_init(qm);
if (ret)
dev_warn(&pdev->dev, "init debugfs fail!\n");
@@ -874,6 +885,7 @@ err_with_crypto_register:
err_with_qm_start:
hisi_qm_del_from_list(qm, &hpre_devices);
+ hpre_debugfs_exit(qm);
hisi_qm_stop(qm);
err_with_err_init:
@@ -905,7 +917,7 @@ static void hpre_remove(struct pci_dev *pdev)
qm->debug.curr_qm_qp_num = 0;
}
- hpre_debugfs_exit(hpre);
+ hpre_debugfs_exit(qm);
hisi_qm_stop(qm);
hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
@@ -924,7 +936,8 @@ static struct pci_driver hpre_pci_driver = {
.id_table = hpre_dev_ids,
.probe = hpre_probe,
.remove = hpre_remove,
- .sriov_configure = hisi_qm_sriov_configure,
+ .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
+ hisi_qm_sriov_configure : NULL,
.err_handler = &hpre_err_handler,
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 9bb263cec6c3..6527c53b073f 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1064,19 +1064,10 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
char buf[QM_DBG_READ_LEN];
int len;
- if (*pos)
- return 0;
-
- if (count < QM_DBG_READ_LEN)
- return -ENOSPC;
-
- len = snprintf(buf, QM_DBG_READ_LEN, "%s\n",
- "Please echo help to cmd to get help information");
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
- if (copy_to_user(buffer, buf, len))
- return -EFAULT;
-
- return (*pos = len);
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
}
static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
@@ -1741,7 +1732,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
-static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
+static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
{
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
@@ -1813,7 +1804,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
int qp_id = qp->qp_id;
- int pasid = arg;
+ u32 pasid = arg;
int ret;
if (!qm_qp_avail_state(qm, qp, QP_START))
@@ -2179,8 +2170,12 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
.flags = UACCE_DEV_SVA,
.ops = &uacce_qm_ops,
};
+ int ret;
- strncpy(interface.name, pdev->driver->name, sizeof(interface.name));
+ ret = strscpy(interface.name, pdev->driver->name,
+ sizeof(interface.name));
+ if (ret < 0)
+ return -ENAMETOOLONG;
uacce = uacce_alloc(&pdev->dev, &interface);
if (IS_ERR(uacce))
@@ -2691,24 +2686,12 @@ static ssize_t qm_status_read(struct file *filp, char __user *buffer,
{
struct hisi_qm *qm = filp->private_data;
char buf[QM_DBG_READ_LEN];
- int val, cp_len, len;
-
- if (*pos)
- return 0;
-
- if (count < QM_DBG_READ_LEN)
- return -ENOSPC;
+ int val, len;
val = atomic_read(&qm->status.flags);
- len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
- if (!len)
- return -EFAULT;
-
- cp_len = copy_to_user(buffer, buf, len);
- if (cp_len)
- return -EFAULT;
+ len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
- return (*pos = len);
+ return simple_read_from_buffer(buffer, count, pos, buf, len);
}
static const struct file_operations qm_status_fops = {
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 0a351de8d838..6c1d3c7d64ee 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -44,6 +44,7 @@
#define QM_AXI_M_CFG 0x1000ac
#define AXI_M_CFG 0xffff
#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
#define AXI_M_CFG_ENABLE 0xffffffff
#define QM_PEH_AXUSER_CFG 0x1000cc
#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index c27e7160d2df..8ca945ac297e 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
dma_addr_t *psec_sgl,
struct scatterlist *sgl,
int count,
- struct sec_dev_info *info)
+ struct sec_dev_info *info,
+ gfp_t gfp)
{
struct sec_hw_sgl *sgl_current = NULL;
struct sec_hw_sgl *sgl_next;
@@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
sge_index = i % SEC_MAX_SGE_NUM;
if (sge_index == 0) {
sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
- GFP_KERNEL, &sgl_next_dma);
+ gfp, &sgl_next_dma);
if (!sgl_next) {
ret = -ENOMEM;
goto err_free_hw_sgls;
@@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
}
static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
- int *steps)
+ int *steps, gfp_t gfp)
{
size_t *sizes;
int i;
/* Split into suitable sized blocks */
*steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
- sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
+ sizes = kcalloc(*steps, sizeof(*sizes), gfp);
if (!sizes)
return -ENOMEM;
@@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
int steps, struct scatterlist ***splits,
int **splits_nents,
int sgl_len_in,
- struct device *dev)
+ struct device *dev, gfp_t gfp)
{
int ret, count;
@@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
if (!count)
return -EINVAL;
- *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
+ *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
if (!*splits) {
ret = -ENOMEM;
goto err_unmap_sg;
}
- *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
+ *splits_nents = kcalloc(steps, sizeof(int), gfp);
if (!*splits_nents) {
ret = -ENOMEM;
goto err_free_splits;
@@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
/* output the scatter list before and after this */
ret = sg_split(sgl, count, 0, steps, split_sizes,
- *splits, *splits_nents, GFP_KERNEL);
+ *splits, *splits_nents, gfp);
if (ret) {
ret = -ENOMEM;
goto err_free_splits_nents;
@@ -630,13 +631,13 @@ static struct sec_request_el
int el_size, bool different_dest,
struct scatterlist *sgl_in, int n_ents_in,
struct scatterlist *sgl_out, int n_ents_out,
- struct sec_dev_info *info)
+ struct sec_dev_info *info, gfp_t gfp)
{
struct sec_request_el *el;
struct sec_bd_info *req;
int ret;
- el = kzalloc(sizeof(*el), GFP_KERNEL);
+ el = kzalloc(sizeof(*el), gfp);
if (!el)
return ERR_PTR(-ENOMEM);
el->el_length = el_size;
@@ -668,7 +669,7 @@ static struct sec_request_el
el->sgl_in = sgl_in;
ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
- n_ents_in, info);
+ n_ents_in, info, gfp);
if (ret)
goto err_free_el;
@@ -679,7 +680,7 @@ static struct sec_request_el
el->sgl_out = sgl_out;
ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
el->sgl_out,
- n_ents_out, info);
+ n_ents_out, info, gfp);
if (ret)
goto err_free_hw_sgl_in;
@@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
int *splits_out_nents = NULL;
struct sec_request_el *el, *temp;
bool split = skreq->src != skreq->dst;
+ gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
mutex_init(&sec_req->lock);
sec_req->req_base = &skreq->base;
@@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_in = sg_nents(skreq->src);
ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
- &steps);
+ &steps, gfp);
if (ret)
return ret;
sec_req->num_elements = steps;
ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
&splits_in_nents, sec_req->len_in,
- info->dev);
+ info->dev, gfp);
if (ret)
goto err_free_split_sizes;
@@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
sec_req->len_out = sg_nents(skreq->dst);
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
&splits_out, &splits_out_nents,
- sec_req->len_out, info->dev);
+ sec_req->len_out, info->dev, gfp);
if (ret)
goto err_unmap_in_sg;
}
@@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
splits_in[i], splits_in_nents[i],
split ? splits_out[i] : NULL,
split ? splits_out_nents[i] : 0,
- info);
+ info, gfp);
if (IS_ERR(el)) {
ret = PTR_ERR(el);
goto err_free_elements;
@@ -932,7 +934,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ecb(aes)",
.cra_driver_name = "hisi_sec_aes_ecb",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -951,7 +954,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "cbc(aes)",
.cra_driver_name = "hisi_sec_aes_cbc",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -970,7 +974,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ctr(aes)",
.cra_driver_name = "hisi_sec_aes_ctr",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -989,7 +994,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "xts(aes)",
.cra_driver_name = "hisi_sec_aes_xts",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1009,7 +1015,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ecb(des)",
.cra_driver_name = "hisi_sec_des_ecb",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1028,7 +1035,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "cbc(des)",
.cra_driver_name = "hisi_sec_des_cbc",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1047,7 +1055,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "hisi_sec_3des_cbc",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
@@ -1066,7 +1075,8 @@ static struct skcipher_alg sec_algs[] = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "hisi_sec_3des_ecb",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 7b64aca704d6..037762b531e2 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -46,9 +46,11 @@ struct sec_req {
struct sec_cipher_req c_req;
struct sec_aead_req aead_req;
+ struct list_head backlog_head;
int err_type;
int req_id;
+ int flag;
/* Status of the SEC request */
bool fake_busy;
@@ -104,6 +106,7 @@ struct sec_qp_ctx {
struct sec_alg_res res[QM_Q_DEPTH];
struct sec_ctx *ctx;
struct mutex req_lock;
+ struct list_head backlog;
struct hisi_acc_sgl_pool *c_in_pool;
struct hisi_acc_sgl_pool *c_out_pool;
atomic_t pending_reqs;
@@ -161,6 +164,7 @@ struct sec_dfx {
atomic64_t send_cnt;
atomic64_t recv_cnt;
atomic64_t send_busy_cnt;
+ atomic64_t recv_busy_cnt;
atomic64_t err_bd_cnt;
atomic64_t invalid_req_cnt;
atomic64_t done_flag_cnt;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 64614a9bdf21..497969ae8b23 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -166,6 +166,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
if (unlikely(!req)) {
atomic64_inc(&dfx->invalid_req_cnt);
+ atomic_inc(&qp->qp_status.used);
return;
}
req->err_type = bd->type2.error_type;
@@ -198,21 +199,30 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int ret;
+ if (ctx->fake_req_limit <=
+ atomic_read(&qp_ctx->qp->qp_status.used) &&
+ !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+
mutex_lock(&qp_ctx->req_lock);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
+
+ if (ctx->fake_req_limit <=
+ atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
+ list_add_tail(&req->backlog_head, &qp_ctx->backlog);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
+ atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
+ mutex_unlock(&qp_ctx->req_lock);
+ return -EBUSY;
+ }
mutex_unlock(&qp_ctx->req_lock);
- atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
if (unlikely(ret == -EBUSY))
return -ENOBUFS;
- if (!ret) {
- if (req->fake_busy) {
- atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
- ret = -EBUSY;
- } else {
- ret = -EINPROGRESS;
- }
+ if (likely(!ret)) {
+ ret = -EINPROGRESS;
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
}
return ret;
@@ -373,8 +383,8 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
- atomic_set(&qp_ctx->pending_reqs, 0);
idr_init(&qp_ctx->req_idr);
+ INIT_LIST_HEAD(&qp_ctx->backlog);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
@@ -1048,21 +1058,49 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
+static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct sec_req *backlog_req = NULL;
+
+ mutex_lock(&qp_ctx->req_lock);
+ if (ctx->fake_req_limit >=
+ atomic_read(&qp_ctx->qp->qp_status.used) &&
+ !list_empty(&qp_ctx->backlog)) {
+ backlog_req = list_first_entry(&qp_ctx->backlog,
+ typeof(*backlog_req), backlog_head);
+ list_del(&backlog_req->backlog_head);
+ }
+ mutex_unlock(&qp_ctx->req_lock);
+
+ return backlog_req;
+}
+
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
int err)
{
struct skcipher_request *sk_req = req->c_req.sk_req;
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct skcipher_request *backlog_sk_req;
+ struct sec_req *backlog_req;
- atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
/* IV output at encrypto of CBC mode */
if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
sec_update_iv(req, SEC_SKCIPHER);
- if (req->fake_busy)
- sk_req->base.complete(&sk_req->base, -EINPROGRESS);
+ while (1) {
+ backlog_req = sec_back_req_clear(ctx, qp_ctx);
+ if (!backlog_req)
+ break;
+
+ backlog_sk_req = backlog_req->c_req.sk_req;
+ backlog_sk_req->base.complete(&backlog_sk_req->base,
+ -EINPROGRESS);
+ atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
+ }
+
sk_req->base.complete(&sk_req->base, err);
}
@@ -1133,10 +1171,10 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
struct sec_cipher_req *c_req = &req->c_req;
size_t authsize = crypto_aead_authsize(tfm);
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct aead_request *backlog_aead_req;
+ struct sec_req *backlog_req;
size_t sz;
- atomic_dec(&qp_ctx->pending_reqs);
-
if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
sec_update_iv(req, SEC_AEAD);
@@ -1157,17 +1195,22 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
sec_free_req_id(req);
- if (req->fake_busy)
- a_req->base.complete(&a_req->base, -EINPROGRESS);
+ while (1) {
+ backlog_req = sec_back_req_clear(c, qp_ctx);
+ if (!backlog_req)
+ break;
+
+ backlog_aead_req = backlog_req->aead_req.aead_req;
+ backlog_aead_req->base.complete(&backlog_aead_req->base,
+ -EINPROGRESS);
+ atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
+ }
a_req->base.complete(&a_req->base, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
- struct sec_qp_ctx *qp_ctx = req->qp_ctx;
-
- atomic_dec(&qp_ctx->pending_reqs);
sec_free_req_id(req);
sec_free_queue_id(ctx, req);
}
@@ -1187,11 +1230,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
return req->req_id;
}
- if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
- req->fake_busy = true;
- else
- req->fake_busy = false;
-
return 0;
}
@@ -1213,7 +1251,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req);
- if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
+ (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n");
goto err_send_req;
}
@@ -1407,6 +1446,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
if (!sk_req->cryptlen)
return 0;
+ req->flag = sk_req->base.flags;
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
@@ -1435,7 +1475,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
@@ -1530,6 +1570,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
struct sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
+ req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
@@ -1558,7 +1599,7 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
- .cra_flags = CRYPTO_ALG_ASYNC,\
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index a4cb58b54b25..2297425486cb 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -22,17 +22,15 @@
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
-#define SEC_XTS_MIV_ENABLE_REG 0x301384
-#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
-#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0xfffff7fd
-#define SEC_BD_ERR_CHK_EN2 0xffffbfff
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
+#define SEC_BD_ERR_CHK_EN3 0xffffbfff
#define SEC_SQE_SIZE 128
#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
-#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_NUM 256
#define SEC_PF_DEF_Q_BASE 0
-#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_DEF 2
#define SEC_CTX_Q_NUM_MAX 32
#define SEC_CTRL_CNT_CLR_CE 0x301120
@@ -47,17 +45,18 @@
#define SEC_ECC_ADDR(err) ((err) >> 0)
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
+#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_SAA_ENABLE 0x17f
-#define SEC_RAS_CE_REG 0x50
-#define SEC_RAS_FE_REG 0x54
-#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_REG 0x301050
+#define SEC_RAS_FE_REG 0x301054
+#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x177
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x0100
#define SEC_MEM_INIT_DONE_REG 0x0104
-#define SEC_QM_ABNORMAL_INT_MASK 0x100004
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
@@ -68,8 +67,10 @@
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_SAA_EN_REG 0x0270
+#define SEC_BD_ERR_CHK_EN_REG0 0x0380
#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG2 0x038c
+#define SEC_BD_ERR_CHK_EN_REG3 0x038c
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
@@ -77,8 +78,8 @@
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
-#define SEC_VF_CNT_MASK 0xffffffc0
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_SQE_MASK_OFFSET 64
#define SEC_SQE_MASK_LEN 48
@@ -122,6 +123,7 @@ static struct sec_dfx_item sec_dfx_labels[] = {
{"send_cnt", offsetof(struct sec_dfx, send_cnt)},
{"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
{"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
+ {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
{"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
{"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
{"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
@@ -191,7 +193,7 @@ static const struct kernel_param_ops sec_ctx_q_num_ops = {
};
static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
static const struct kernel_param_ops vfs_num_ops = {
.set = vfs_num_set,
@@ -280,7 +282,7 @@ static int sec_engine_init(struct hisi_qm *qm)
reg, reg & 0x1, SEC_DELAY_10_US,
SEC_POLL_TIMEOUT_US);
if (ret) {
- dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ pci_err(qm->pdev, "fail to init sec mem\n");
return ret;
}
@@ -296,25 +298,25 @@ static int sec_engine_init(struct hisi_qm *qm)
reg |= SEC_USER1_SMMU_NORMAL;
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+ writel(SEC_SINGLE_PORT_MAX_TRANS,
+ qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
+
+ writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
+
+ /* Enable sm4 extra mode, as ctr/ecb */
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
+ /* Enable sm4 xts mode multiple iv */
writel_relaxed(SEC_BD_ERR_CHK_EN1,
SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
- writel_relaxed(SEC_BD_ERR_CHK_EN2,
- SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG2));
-
- /* enable clock gate control */
- reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
- reg |= SEC_CLK_GATE_ENABLE;
- writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel_relaxed(SEC_BD_ERR_CHK_EN3,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
/* config endian */
reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg |= sec_get_endian(qm);
writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- /* Enable sm4 xts mode multiple iv */
- writel_relaxed(SEC_XTS_MIV_ENABLE_MSK,
- qm->io_base + SEC_XTS_MIV_ENABLE_REG);
-
return 0;
}
@@ -346,10 +348,17 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
/* sec_debug_regs_clear() - clear the sec debug regs */
static void sec_debug_regs_clear(struct hisi_qm *qm)
{
+ int i;
+
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
+ /* clear sec dfx regs */
+ writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
+ for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
+ readl(qm->io_base + sec_dfx_regs[i].offset);
+
/* clear rdclr_en */
writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
@@ -362,14 +371,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "V1 not support hw error handle\n");
+ pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
- val = readl(qm->io_base + SEC_CONTROL_REG);
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
/* clear SEC hw error source if having */
- writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable SEC hw error interrupts */
writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
@@ -382,14 +391,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
/* enable SEC block master OOO when m-bit error occur */
val = val | SEC_AXI_SHUTDOWN_ENABLE;
- writel(val, qm->io_base + SEC_CONTROL_REG);
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static void sec_hw_error_disable(struct hisi_qm *qm)
{
u32 val;
- val = readl(qm->io_base + SEC_CONTROL_REG);
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
/* disable RAS int */
writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
@@ -402,7 +411,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
/* disable SEC block master OOO when m-bit error occur */
val = val & SEC_AXI_SHUTDOWN_DISABLE;
- writel(val, qm->io_base + SEC_CONTROL_REG);
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static u32 sec_current_qm_read(struct sec_debug_file *file)
@@ -577,20 +586,20 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
sec_debugfs_atomic64_set, "%lld\n");
-static int sec_core_debug_init(struct sec_dev *sec)
+static int sec_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
struct device *dev = &qm->pdev->dev;
struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
int i;
- tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root);
+ tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
- return -ENOENT;
+ return -ENOMEM;
regset->regs = sec_dfx_regs;
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
@@ -609,44 +618,44 @@ static int sec_core_debug_init(struct sec_dev *sec)
return 0;
}
-static int sec_debug_init(struct sec_dev *sec)
+static int sec_debug_init(struct hisi_qm *qm)
{
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
int i;
- for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&sec->debug.files[i].lock);
- sec->debug.files[i].index = i;
- sec->debug.files[i].qm = &sec->qm;
-
- debugfs_create_file(sec_dbg_file_name[i], 0600,
- sec->qm.debug.debug_root,
- sec->debug.files + i,
- &sec_dbg_fops);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = qm;
+
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
+ qm->debug.debug_root,
+ sec->debug.files + i,
+ &sec_dbg_fops);
+ }
}
- return sec_core_debug_init(sec);
+ return sec_core_debug_init(qm);
}
-static int sec_debugfs_init(struct sec_dev *sec)
+static int sec_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &sec->qm;
struct device *dev = &qm->pdev->dev;
int ret;
qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
sec_debugfs_root);
-
qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
- if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- ret = sec_debug_init(sec);
- if (ret)
- goto failed_to_create;
- }
+ ret = sec_debug_init(qm);
+ if (ret)
+ goto failed_to_create;
+
return 0;
@@ -656,9 +665,9 @@ failed_to_create:
return ret;
}
-static void sec_debugfs_exit(struct sec_dev *sec)
+static void sec_debugfs_exit(struct hisi_qm *qm)
{
- debugfs_remove_recursive(sec->qm.debug.debug_root);
+ debugfs_remove_recursive(qm->debug.debug_root);
}
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
@@ -677,8 +686,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
SEC_ECC_NUM(err_val));
- dev_err(dev, "multi ecc sram addr=0x%x\n",
- SEC_ECC_ADDR(err_val));
}
}
errs++;
@@ -868,7 +875,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_probe_uninit;
}
- ret = sec_debugfs_init(sec);
+ ret = sec_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
@@ -893,7 +900,7 @@ err_crypto_unregister:
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
- sec_debugfs_exit(sec);
+ sec_debugfs_exit(qm);
hisi_qm_stop(qm);
err_probe_uninit:
@@ -917,7 +924,7 @@ static void sec_remove(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev);
- sec_debugfs_exit(sec);
+ sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm);
@@ -987,5 +994,6 @@ module_exit(sec_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
+MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index f3ed4c0e5493..4484be13812b 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -76,7 +76,7 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-int zip_create_qps(struct hisi_qp **qps, int ctx_num);
+int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index c73707c2e539..01fd6a78111d 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -158,13 +158,13 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
hisi_qm_release_qp(ctx->qp);
}
-static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
+static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
{
struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip *hisi_zip;
int ret, i, j;
- ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
if (ret) {
pr_err("Can not create zip qps!\n");
return -ENODEV;
@@ -379,7 +379,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
int ret;
- ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name));
+ ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
if (ret)
return ret;
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 2229a21ae7c8..e2845b2c963d 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -234,9 +234,10 @@ static const struct pci_device_id hisi_zip_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-int zip_create_qps(struct hisi_qp **qps, int qp_num)
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
{
- int node = cpu_to_node(smp_processor_id());
+ if (node == NUMA_NO_NODE)
+ node = cpu_to_node(smp_processor_id());
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 0e25fc3087f3..87226b7c2795 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -330,7 +330,7 @@ static int img_hash_write_via_dma(struct img_hash_dev *hdev)
static int img_hash_dma_init(struct img_hash_dev *hdev)
{
struct dma_slave_config dma_conf;
- int err = -EINVAL;
+ int err;
hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
if (IS_ERR(hdev->dma_lch)) {
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 2cb53fbae841..fa7398e68858 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1135,11 +1135,12 @@ static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
static int safexcel_request_ring_irq(void *pdev, int irqid,
int is_pci_dev,
+ int ring_id,
irq_handler_t handler,
irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
- int ret, irq;
+ int ret, irq, cpu;
struct device *dev;
if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) {
@@ -1177,6 +1178,10 @@ static int safexcel_request_ring_irq(void *pdev, int irqid,
return ret;
}
+ /* Set affinity */
+ cpu = cpumask_local_spread(ring_id, NUMA_NO_NODE);
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
return irq;
}
@@ -1611,6 +1616,7 @@ static int safexcel_probe_generic(void *pdev,
irq = safexcel_request_ring_irq(pdev,
EIP197_IRQ_NUMBER(i, is_pci_dev),
is_pci_dev,
+ i,
safexcel_irq_ring,
safexcel_irq_ring_thread,
ring_irq);
@@ -1619,6 +1625,7 @@ static int safexcel_probe_generic(void *pdev,
return irq;
}
+ priv->ring[i].irq = irq;
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
INIT_WORK(&priv->ring[i].work_data.work,
@@ -1756,8 +1763,10 @@ static int safexcel_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->reg_clk);
clk_disable_unprepare(priv->clk);
- for (i = 0; i < priv->config.rings; i++)
+ for (i = 0; i < priv->config.rings; i++) {
+ irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
+ }
return 0;
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 94016c505abb..7c5fe382d272 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -707,6 +707,9 @@ struct safexcel_ring {
*/
struct crypto_async_request *req;
struct crypto_async_request *backlog;
+
+ /* irq of this ring */
+ int irq;
};
/* EIP integration context flags */
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 0c5e80c3f6e3..1ac3253b7903 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1300,6 +1300,7 @@ struct safexcel_alg_template safexcel_alg_ecb_aes = {
.cra_driver_name = "safexcel-ecb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1337,6 +1338,7 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
.cra_driver_name = "safexcel-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1374,6 +1376,7 @@ struct safexcel_alg_template safexcel_alg_cfb_aes = {
.cra_driver_name = "safexcel-cfb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1411,6 +1414,7 @@ struct safexcel_alg_template safexcel_alg_ofb_aes = {
.cra_driver_name = "safexcel-ofb-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1485,6 +1489,7 @@ struct safexcel_alg_template safexcel_alg_ctr_aes = {
.cra_driver_name = "safexcel-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1545,6 +1550,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = {
.cra_driver_name = "safexcel-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1582,6 +1588,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = {
.cra_driver_name = "safexcel-ecb-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1642,6 +1649,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
.cra_driver_name = "safexcel-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1679,6 +1687,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
.cra_driver_name = "safexcel-ecb-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1751,6 +1760,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1786,6 +1796,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1821,6 +1832,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1856,6 +1868,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1891,6 +1904,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1927,6 +1941,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1963,6 +1978,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1999,6 +2015,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2035,6 +2052,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2071,6 +2089,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2107,6 +2126,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2143,6 +2163,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2179,6 +2200,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2215,6 +2237,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2251,6 +2274,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2285,6 +2309,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2319,6 +2344,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha256-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2353,6 +2379,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha224-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2387,6 +2414,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha512-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2421,6 +2449,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_ctr_aes = {
.cra_driver_name = "safexcel-authenc-hmac-sha384-ctr-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2534,6 +2563,7 @@ struct safexcel_alg_template safexcel_alg_xts_aes = {
.cra_driver_name = "safexcel-xts-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = XTS_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2646,6 +2676,7 @@ struct safexcel_alg_template safexcel_alg_gcm = {
.cra_driver_name = "safexcel-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2769,6 +2800,7 @@ struct safexcel_alg_template safexcel_alg_ccm = {
.cra_driver_name = "safexcel-ccm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2832,6 +2864,7 @@ struct safexcel_alg_template safexcel_alg_chacha20 = {
.cra_driver_name = "safexcel-chacha20",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -2993,6 +3026,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly = {
/* +1 to put it above HW chacha + SW poly */
.cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
@@ -3032,6 +3066,7 @@ struct safexcel_alg_template safexcel_alg_chachapoly_esp = {
/* +1 to put it above HW chacha + SW poly */
.cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
@@ -3110,6 +3145,7 @@ struct safexcel_alg_template safexcel_alg_ecb_sm4 = {
.cra_driver_name = "safexcel-ecb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3147,6 +3183,7 @@ struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
.cra_driver_name = "safexcel-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3184,6 +3221,7 @@ struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
.cra_driver_name = "safexcel-ofb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3221,6 +3259,7 @@ struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
.cra_driver_name = "safexcel-cfb-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3273,6 +3312,7 @@ struct safexcel_alg_template safexcel_alg_ctr_sm4 = {
.cra_driver_name = "safexcel-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3332,6 +3372,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3441,6 +3482,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SM4_BLOCK_SIZE,
@@ -3476,6 +3518,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3510,6 +3553,7 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = {
.cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3578,6 +3622,7 @@ struct safexcel_alg_template safexcel_alg_rfc4106_gcm = {
.cra_driver_name = "safexcel-rfc4106-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3622,6 +3667,7 @@ struct safexcel_alg_template safexcel_alg_rfc4543_gcm = {
.cra_driver_name = "safexcel-rfc4543-gcm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -3713,6 +3759,7 @@ struct safexcel_alg_template safexcel_alg_rfc4309_ccm = {
.cra_driver_name = "safexcel-rfc4309-ccm-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 43962bc709c6..16a467969d8e 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -992,6 +992,7 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
.cra_driver_name = "safexcel-sha1",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1235,6 +1236,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
.cra_driver_name = "safexcel-hmac-sha1",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1291,6 +1293,7 @@ struct safexcel_alg_template safexcel_alg_sha256 = {
.cra_driver_name = "safexcel-sha256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1347,6 +1350,7 @@ struct safexcel_alg_template safexcel_alg_sha224 = {
.cra_driver_name = "safexcel-sha224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1418,6 +1422,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
.cra_driver_name = "safexcel-hmac-sha224",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1489,6 +1494,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
.cra_driver_name = "safexcel-hmac-sha256",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1545,6 +1551,7 @@ struct safexcel_alg_template safexcel_alg_sha512 = {
.cra_driver_name = "safexcel-sha512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1601,6 +1608,7 @@ struct safexcel_alg_template safexcel_alg_sha384 = {
.cra_driver_name = "safexcel-sha384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1672,6 +1680,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
.cra_driver_name = "safexcel-hmac-sha512",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1743,6 +1752,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
.cra_driver_name = "safexcel-hmac-sha384",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1799,6 +1809,7 @@ struct safexcel_alg_template safexcel_alg_md5 = {
.cra_driver_name = "safexcel-md5",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1871,6 +1882,7 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
.cra_driver_name = "safexcel-hmac-md5",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -1952,6 +1964,7 @@ struct safexcel_alg_template safexcel_alg_crc32 = {
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2041,6 +2054,7 @@ struct safexcel_alg_template safexcel_alg_cbcmac = {
.cra_driver_name = "safexcel-cbcmac-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2136,6 +2150,7 @@ struct safexcel_alg_template safexcel_alg_xcbcmac = {
.cra_driver_name = "safexcel-xcbc-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2232,6 +2247,7 @@ struct safexcel_alg_template safexcel_alg_cmac = {
.cra_driver_name = "safexcel-cmac-aes",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2288,6 +2304,7 @@ struct safexcel_alg_template safexcel_alg_sm3 = {
.cra_driver_name = "safexcel-sm3",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
@@ -2359,6 +2376,7 @@ struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
.cra_driver_name = "safexcel-hmac-sm3",
.cra_priority = SAFEXCEL_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index ad73fc946682..f478bb0a566a 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1402,7 +1402,8 @@ static int __init ixp_module_init(void)
/* block ciphers */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
if (!cra->setkey)
cra->setkey = ablk_setkey;
if (!cra->encrypt)
@@ -1435,7 +1436,8 @@ static int __init ixp_module_init(void)
/* authenc */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC;
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
cra->setkey = cra->setkey ?: aead_setkey;
cra->setauthsize = aead_setauthsize;
cra->encrypt = aead_encrypt;
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 8a5f0b0bdf77..d63bca9718dc 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -438,7 +438,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
struct resource *res;
- int irq, ret, i;
+ int irq, ret, i, cpu;
u32 sram_size;
if (cesa_dev) {
@@ -505,6 +505,8 @@ static int mv_cesa_probe(struct platform_device *pdev)
goto err_cleanup;
}
+ engine->irq = irq;
+
/*
* Not all platforms can gate the CESA clocks: do not complain
* if the clock does not exist.
@@ -548,6 +550,10 @@ static int mv_cesa_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup;
+ /* Set affinity */
+ cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+
crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
atomic_set(&engine->load, 0);
INIT_LIST_HEAD(&engine->complete_queue);
@@ -570,6 +576,8 @@ err_cleanup:
clk_disable_unprepare(cesa->engines[i].zclk);
clk_disable_unprepare(cesa->engines[i].clk);
mv_cesa_put_sram(pdev, i);
+ if (cesa->engines[i].irq > 0)
+ irq_set_affinity_hint(cesa->engines[i].irq, NULL);
}
return ret;
@@ -586,6 +594,7 @@ static int mv_cesa_remove(struct platform_device *pdev)
clk_disable_unprepare(cesa->engines[i].zclk);
clk_disable_unprepare(cesa->engines[i].clk);
mv_cesa_put_sram(pdev, i);
+ irq_set_affinity_hint(cesa->engines[i].irq, NULL);
}
return 0;
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index e8632d5f343f..0c9cbb681e49 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -457,6 +457,7 @@ struct mv_cesa_engine {
atomic_t load;
struct mv_cesa_tdma_chain chain;
struct list_head complete_queue;
+ int irq;
};
/**
diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
index f133c2ccb5ae..45b4d7a29833 100644
--- a/drivers/crypto/marvell/cesa/cipher.c
+++ b/drivers/crypto/marvell/cesa/cipher.c
@@ -508,7 +508,8 @@ struct skcipher_alg mv_cesa_ecb_des_alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "mv-ecb-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
.cra_alignmask = 0,
@@ -558,7 +559,8 @@ struct skcipher_alg mv_cesa_cbc_des_alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "mv-cbc-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
.cra_alignmask = 0,
@@ -616,7 +618,8 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "mv-ecb-des3-ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
.cra_alignmask = 0,
@@ -669,7 +672,8 @@ struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "mv-cbc-des3-ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
.cra_alignmask = 0,
@@ -741,7 +745,8 @@ struct skcipher_alg mv_cesa_ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "mv-ecb-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
.cra_alignmask = 0,
@@ -790,7 +795,8 @@ struct skcipher_alg mv_cesa_cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "mv-cbc-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
.cra_alignmask = 0,
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index b971284332b6..bd0bd9ffd6e9 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -921,6 +921,7 @@ struct ahash_alg mv_md5_alg = {
.cra_driver_name = "mv-md5",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
@@ -991,6 +992,7 @@ struct ahash_alg mv_sha1_alg = {
.cra_driver_name = "mv-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
@@ -1064,6 +1066,7 @@ struct ahash_alg mv_sha256_alg = {
.cra_driver_name = "mv-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
@@ -1298,6 +1301,7 @@ struct ahash_alg mv_ahmac_md5_alg = {
.cra_driver_name = "mv-hmac-md5",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
@@ -1368,6 +1372,7 @@ struct ahash_alg mv_ahmac_sha1_alg = {
.cra_driver_name = "mv-hmac-sha1",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
@@ -1438,6 +1443,7 @@ struct ahash_alg mv_ahmac_sha256_alg = {
.cra_driver_name = "mv-hmac-sha256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index fec8f3b9b112..cc103b1bc224 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -878,11 +878,11 @@ static int copy_ucode_to_dma_mem(struct device *dev,
/* Byte swap 64-bit */
for (i = 0; i < (ucode->size / 8); i++)
- ((u64 *)ucode->align_va)[i] =
+ ((__be64 *)ucode->align_va)[i] =
cpu_to_be64(((u64 *)ucode->align_va)[i]);
/* Ucode needs 16-bit swap */
for (i = 0; i < (ucode->size / 2); i++)
- ((u16 *)ucode->align_va)[i] =
+ ((__be16 *)ucode->align_va)[i] =
cpu_to_be16(((u16 *)ucode->align_va)[i]);
return 0;
}
@@ -1463,8 +1463,8 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
struct otx_cpt_eng_grps *eng_grps,
int pf_type)
{
- struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = { 0 };
- struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
+ struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
+ struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
struct tar_arch_info_t *tar_arch = NULL;
char *tar_filename;
int i, ret = 0;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
index 14f02b60d0c2..8620ac87a447 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h
@@ -74,7 +74,7 @@ struct otx_cpt_ucode_ver_num {
struct otx_cpt_ucode_hdr {
struct otx_cpt_ucode_ver_num ver_num;
u8 ver_str[OTX_CPT_UCODE_VER_STR_SZ];
- u32 code_length;
+ __be32 code_length;
u32 padding[3];
};
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 1e0a1d70ebd3..90bb31329d4b 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -239,7 +239,6 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
int ivsize = crypto_skcipher_ivsize(stfm);
u32 start = req->cryptlen - ivsize;
- u64 *ctrl_flags = NULL;
gfp_t flags;
flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
@@ -280,8 +279,7 @@ static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
- ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
- *ctrl_flags = cpu_to_be64(*ctrl_flags);
+ fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
/*
* Storing Packet Data Information in offset
@@ -692,20 +690,17 @@ static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
static inline void swap_data32(void *buf, u32 len)
{
- u32 *store = (u32 *) buf;
- int i = 0;
-
- for (i = 0 ; i < len/sizeof(u32); i++, store++)
- *store = cpu_to_be32(*store);
+ cpu_to_be32_array(buf, buf, len / 4);
}
static inline void swap_data64(void *buf, u32 len)
{
- u64 *store = (u64 *) buf;
+ __be64 *dst = buf;
+ u64 *src = buf;
int i = 0;
- for (i = 0 ; i < len/sizeof(u64); i++, store++)
- *store = cpu_to_be64(*store);
+ for (i = 0 ; i < len / 8; i++, src++, dst++)
+ *dst = cpu_to_be64p(src);
}
static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
@@ -1012,7 +1007,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
/* Unknown cipher type */
return -EINVAL;
}
- rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags);
+ rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
@@ -1032,7 +1027,7 @@ static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
fctx->enc.enc_ctrl.e.mac_len = mac_len;
- fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
+ fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
/*
* Storing Packet Data Information in offset
@@ -1306,7 +1301,7 @@ static int otx_cpt_aead_null_decrypt(struct aead_request *req)
static struct skcipher_alg otx_cpt_skciphers[] = { {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "cpt_xts_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1323,7 +1318,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cpt_cbc_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1340,7 +1335,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "cpt_ecb_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1357,7 +1352,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "cfb(aes)",
.base.cra_driver_name = "cpt_cfb_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
.base.cra_alignmask = 7,
@@ -1374,7 +1369,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cpt_cbc_des3_ede",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
.base.cra_alignmask = 7,
@@ -1391,7 +1386,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
}, {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "cpt_ecb_des3_ede",
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
.base.cra_alignmask = 7,
@@ -1412,7 +1407,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1431,7 +1426,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1450,7 +1445,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1469,7 +1464,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1488,7 +1483,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha1_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1507,7 +1502,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha256_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1526,7 +1521,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha384_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1545,7 +1540,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
.cra_driver_name = "cpt_hmac_sha512_ecb_null",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
@@ -1564,7 +1559,7 @@ static struct aead_alg otx_cpt_aeads[] = { {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "cpt_rfc4106_gcm_aes",
.cra_blocksize = 1,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
.cra_priority = 4001,
.cra_alignmask = 0,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
index 67cc0025f5d5..4181b5c5c356 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.h
@@ -66,7 +66,8 @@ enum otx_cpt_aes_key_len {
};
union otx_cpt_encr_ctrl {
- u64 flags;
+ __be64 flags;
+ u64 cflags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 enc_cipher:4;
@@ -138,7 +139,8 @@ struct otx_cpt_des3_ctx {
};
union otx_cpt_offset_ctrl_word {
- u64 flags;
+ __be64 flags;
+ u64 cflags;
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved:32;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
index 239195cccf93..cbc3d7869ebe 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
@@ -202,11 +202,10 @@ static inline int setup_sgio_list(struct pci_dev *pdev,
info->dlen = dlen;
info->in_buffer = (u8 *)info + info_len;
- ((u16 *)info->in_buffer)[0] = req->outcnt;
- ((u16 *)info->in_buffer)[1] = req->incnt;
+ ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt);
+ ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt);
((u16 *)info->in_buffer)[2] = 0;
((u16 *)info->in_buffer)[3] = 0;
- *(u64 *)info->in_buffer = cpu_to_be64p((u64 *)info->in_buffer);
/* Setup gather (input) components */
if (setup_sgio_components(pdev, req->in, req->incnt,
@@ -367,8 +366,6 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
iq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2);
iq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen);
- /* 64-bit swap for microcode data reads, not needed for addresses*/
- iq_cmd.cmd.u64 = cpu_to_be64(iq_cmd.cmd.u64);
iq_cmd.dptr = info->dptr_baddr;
iq_cmd.rptr = info->rptr_baddr;
iq_cmd.cptr.u64 = 0;
@@ -436,7 +433,7 @@ static int cpt_process_ccode(struct pci_dev *pdev,
u8 ccode = cpt_status->s.compcode;
union otx_cpt_error_code ecode;
- ecode.u = be64_to_cpu(*((u64 *) cpt_info->out_buffer));
+ ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer);
switch (ccode) {
case CPT_COMP_E_FAULT:
dev_err(&pdev->dev,
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
index a4c9ff730b13..d912fe0c532d 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
@@ -92,10 +92,10 @@ union otx_cpt_ctrl_info {
union otx_cpt_iq_cmd_word0 {
u64 u64;
struct {
- u16 opcode;
- u16 param1;
- u16 param2;
- u16 dlen;
+ __be16 opcode;
+ __be16 param1;
+ __be16 param2;
+ __be16 dlen;
} s;
};
@@ -123,16 +123,16 @@ struct otx_cpt_sglist_component {
union {
u64 len;
struct {
- u16 len0;
- u16 len1;
- u16 len2;
- u16 len3;
+ __be16 len0;
+ __be16 len1;
+ __be16 len2;
+ __be16 len3;
} s;
} u;
- u64 ptr0;
- u64 ptr1;
- u64 ptr2;
- u64 ptr3;
+ __be64 ptr0;
+ __be64 ptr1;
+ __be64 ptr2;
+ __be64 ptr3;
};
struct otx_cpt_pending_entry {
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 78d660d963e2..4ad3571ab6af 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -137,8 +137,6 @@ struct mtk_aes_gcm_ctx {
u32 authsize;
size_t textlen;
-
- struct crypto_skcipher *ctr;
};
struct mtk_aes_drv {
@@ -996,17 +994,8 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
u32 keylen)
{
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
- struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
- struct crypto_skcipher *ctr = gctx->ctr;
- struct {
- u32 hash[4];
- u8 iv[8];
-
- struct crypto_wait wait;
-
- struct scatterlist sg[1];
- struct skcipher_request req;
- } *data;
+ u8 hash[AES_BLOCK_SIZE] __aligned(4) = {};
+ struct crypto_aes_ctx aes_ctx;
int err;
switch (keylen) {
@@ -1026,39 +1015,18 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
ctx->keylen = SIZE_IN_WORDS(keylen);
- /* Same as crypto_gcm_setkey() from crypto/gcm.c */
- crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ctr, key, keylen);
+ err = aes_expandkey(&aes_ctx, key, keylen);
if (err)
return err;
- data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- crypto_init_wait(&data->wait);
- sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
- skcipher_request_set_tfm(&data->req, ctr);
- skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &data->wait);
- skcipher_request_set_crypt(&data->req, data->sg, data->sg,
- AES_BLOCK_SIZE, data->iv);
-
- err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
- &data->wait);
- if (err)
- goto out;
+ aes_encrypt(&aes_ctx, hash, hash);
+ memzero_explicit(&aes_ctx, sizeof(aes_ctx));
mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
- mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash,
+ mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash,
AES_BLOCK_SIZE);
-out:
- kzfree(data);
- return err;
+
+ return 0;
}
static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
@@ -1095,32 +1063,17 @@ static int mtk_aes_gcm_init(struct crypto_aead *aead)
{
struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
- ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(ctx->ctr)) {
- pr_err("Error allocating ctr(aes)\n");
- return PTR_ERR(ctx->ctr);
- }
-
crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
ctx->base.start = mtk_aes_gcm_start;
return 0;
}
-static void mtk_aes_gcm_exit(struct crypto_aead *aead)
-{
- struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
-
- crypto_free_skcipher(ctx->ctr);
-}
-
static struct aead_alg aes_gcm_alg = {
.setkey = mtk_aes_gcm_setkey,
.setauthsize = mtk_aes_gcm_setauthsize,
.encrypt = mtk_aes_gcm_encrypt,
.decrypt = mtk_aes_gcm_decrypt,
.init = mtk_aes_gcm_init,
- .exit = mtk_aes_gcm_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d84530293036..909a7eb748e3 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -97,7 +97,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -105,6 +105,7 @@ struct dcp_async_ctx {
struct dcp_aes_req_ctx {
unsigned int enc:1;
unsigned int ecb:1;
+ struct skcipher_request fallback_req; // keep at the end
};
struct dcp_sha_req_ctx {
@@ -426,21 +427,20 @@ static int dcp_chan_thread_aes(void *data)
static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
req->cryptlen, req->iv);
if (enc)
- ret = crypto_skcipher_encrypt(subreq);
+ ret = crypto_skcipher_encrypt(&rctx->fallback_req);
else
- ret = crypto_skcipher_decrypt(subreq);
-
- skcipher_request_zero(subreq);
+ ret = crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
@@ -510,24 +510,25 @@ static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(actx->fallback,
+ crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(actx->fallback, key, len);
+ return crypto_skcipher_setkey(actx->fallback, key, len);
}
static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *blk;
+ struct crypto_skcipher *blk;
- blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
actx->fallback = blk;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
+ crypto_skcipher_reqsize(blk));
return 0;
}
@@ -535,7 +536,7 @@ static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
{
struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(actx->fallback);
+ crypto_free_skcipher(actx->fallback);
}
/*
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 6a828bbecea4..d8aec5153b21 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1382,7 +1382,8 @@ static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
alg->base.cra_priority = N2_CRA_PRIORITY;
- alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
+ alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY;
alg->base.cra_blocksize = tmpl->block_size;
p->enc_type = tmpl->enc_type;
alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index b5aff20c5900..4fd14d90cc40 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -139,7 +139,7 @@ int omap_aes_write_ctrl(struct omap_aes_dev *dd)
for (i = 0; i < key32; i++) {
omap_aes_write(dd, AES_REG_KEY(dd, i),
- __le32_to_cpu(dd->ctx->key[i]));
+ (__force u32)cpu_to_le32(dd->ctx->key[i]));
}
if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv)
@@ -363,7 +363,7 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
{
int err;
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zu\n", dd->total);
if (!dd->pio_only) {
err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
@@ -409,7 +409,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
{
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zu\n", dd->total);
omap_aes_dma_stop(dd);
@@ -548,20 +548,18 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_CBC));
if (req->cryptlen < aes_fallback_sz) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags, NULL,
- NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT)
- ret = crypto_skcipher_encrypt(subreq);
+ ret = crypto_skcipher_encrypt(&rctx->fallback_req);
else
- ret = crypto_skcipher_decrypt(subreq);
-
- skcipher_request_zero(subreq);
+ ret = crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
dd = omap_aes_find_dev(rctx);
@@ -590,11 +588,11 @@ static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
return 0;
@@ -640,15 +638,16 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(&tfm->base);
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_sync_skcipher *blk;
+ struct crypto_skcipher *blk;
- blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
ctx->fallback = blk;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) +
+ crypto_skcipher_reqsize(blk));
ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
@@ -662,7 +661,7 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
- crypto_free_sync_skcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 2d111bf906e1..23d073e87bb8 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -97,7 +97,7 @@ struct omap_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
struct omap_aes_gcm_ctx {
@@ -110,6 +110,7 @@ struct omap_aes_reqctx {
unsigned long mode;
u8 iv[AES_BLOCK_SIZE];
u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)];
+ struct skcipher_request fallback_req; // keep at the end
};
#define OMAP_AES_QUEUE_LENGTH 1
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 8eda43319204..c9d38bcfd1c7 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -87,7 +87,7 @@ struct omap_des_ctx {
struct omap_des_dev *dd;
int keylen;
- u32 key[(3 * DES_KEY_SIZE) / sizeof(u32)];
+ __le32 key[(3 * DES_KEY_SIZE) / sizeof(u32)];
unsigned long flags;
};
@@ -461,7 +461,7 @@ static int omap_des_crypt_dma_start(struct omap_des_dev *dd)
crypto_skcipher_reqtfm(dd->req));
int err;
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zd\n", dd->total);
if (!dd->pio_only) {
err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
@@ -504,7 +504,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
{
- pr_debug("total: %d\n", dd->total);
+ pr_debug("total: %zd\n", dd->total);
omap_des_dma_stop(dd);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 82691a057d2a..954d703f2981 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -357,10 +357,10 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
if (big_endian)
for (i = 0; i < d; i++)
- hash[i] = be32_to_cpu(in[i]);
+ hash[i] = be32_to_cpup((__be32 *)in + i);
else
for (i = 0; i < d; i++)
- hash[i] = le32_to_cpu(in[i]);
+ hash[i] = le32_to_cpup((__le32 *)in + i);
}
static int omap_sham_hw_init(struct omap_sham_dev *dd)
@@ -522,7 +522,7 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
int mlen;
struct sg_mapping_iter mi;
- dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
ctx->digcnt, length, final);
dd->pdata->write_ctrl(dd, length, final, 0);
@@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
struct dma_slave_config cfg;
int ret;
- dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
+ dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
ctx->digcnt, length, final);
if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
@@ -871,7 +871,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
nbytes += req->nbytes - rctx->offset;
dev_dbg(rctx->dd->dev,
- "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
+ "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
__func__, nbytes, bs, rctx->total, rctx->offset,
rctx->bufcnt);
@@ -932,7 +932,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
return 0;
}
-struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
+static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
{
struct omap_sham_dev *dd;
@@ -1023,7 +1023,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
!(dd->flags & BIT(FLAGS_HUGE));
- dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, final: %d",
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
ctx->total, ctx->digcnt, final);
if (ctx->total < get_block_size(ctx) ||
@@ -1036,7 +1036,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
err = omap_sham_xmit_dma(dd, ctx->total, final);
/* wait for dma completion before can take more data */
- dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
+ dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
return err;
}
@@ -1097,7 +1097,7 @@ static int omap_sham_finish(struct ahash_request *req)
err = omap_sham_finish_hmac(req);
}
- dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
+ dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
return err;
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 7384e91c8b32..dac6eb37fff9 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -86,6 +86,7 @@ struct spacc_req {
dma_addr_t src_addr, dst_addr;
struct spacc_ddt *src_ddt, *dst_ddt;
void (*complete)(struct spacc_req *req);
+ struct skcipher_request fallback_req; // keep at the end
};
struct spacc_aead {
@@ -158,7 +159,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_sync_skcipher *sw_cipher;
+ struct crypto_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -792,13 +793,13 @@ static int spacc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
+ crypto_skcipher_clear_flags(ctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ctx->sw_cipher,
+ crypto_skcipher_set_flags(ctx->sw_cipher,
cipher->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
+ err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
if (err)
goto sw_setkey_failed;
}
@@ -900,7 +901,7 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_skcipher_tfm(crypto_skcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
+ struct spacc_req *dev_req = skcipher_request_ctx(req);
int err;
/*
@@ -908,13 +909,13 @@ static int spacc_ablk_do_fallback(struct skcipher_request *req,
* the ciphering has completed, put the old transform back into the
* request.
*/
- skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
- skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
+ skcipher_request_set_tfm(&dev_req->fallback_req, ctx->sw_cipher);
+ skcipher_request_set_callback(&dev_req->fallback_req, req->base.flags,
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(&dev_req->fallback_req, req->src, req->dst,
req->cryptlen, req->iv);
- err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+ err = is_encrypt ? crypto_skcipher_encrypt(&dev_req->fallback_req) :
+ crypto_skcipher_decrypt(&dev_req->fallback_req);
return err;
}
@@ -1007,19 +1008,24 @@ static int spacc_ablk_init_tfm(struct crypto_skcipher *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_sync_skcipher(
- alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->base.cra_name);
return PTR_ERR(ctx->sw_cipher);
}
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req) +
+ crypto_skcipher_reqsize(ctx->sw_cipher));
+ } else {
+ /* take the size without the fallback skcipher_request at the end */
+ crypto_skcipher_set_reqsize(tfm, offsetof(struct spacc_req,
+ fallback_req));
}
+
ctx->generic.key_offs = spacc_alg->key_offs;
ctx->generic.iv_offs = spacc_alg->iv_offs;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct spacc_req));
-
return 0;
}
@@ -1027,7 +1033,7 @@ static void spacc_ablk_exit_tfm(struct crypto_skcipher *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->sw_cipher);
+ crypto_free_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct skcipher_request *req)
@@ -1226,6 +1232,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1251,6 +1258,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1274,7 +1282,8 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "cbc-des-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
.base.cra_module = THIS_MODULE,
@@ -1298,7 +1307,8 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "ecb-des-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
.base.cra_module = THIS_MODULE,
@@ -1321,6 +1331,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "cbc-des3-ede-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1345,6 +1356,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.base.cra_driver_name = "ecb-des3-ede-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
@@ -1376,6 +1388,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-aes-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1406,6 +1419,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-aes-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1436,6 +1450,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-aes-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1466,6 +1481,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-3des-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1497,6 +1513,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-3des-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1527,6 +1544,7 @@ static struct spacc_aead ipsec_engine_aeads[] = {
"cbc-3des-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1556,6 +1574,7 @@ static struct spacc_alg l2_engine_algs[] = {
.base.cra_driver_name = "f8-kasumi-picoxcell",
.base.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = 8,
.base.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 6bc68bc00d76..aee494d3da52 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index afc9a0a86747..8b5dd2c94ebf 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_C3XXX_HW_DATA_H_
#define ADF_C3XXX_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index d937cc7248a5..020d099409e5 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index d2d0ae445fd8..d2fedbd7113c 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
index 934f216acf39..7945a9cd1c60 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_C3XXXVF_HW_DATA_H_
#define ADF_C3XXXVF_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1dc5ac859f7b..11039fe55f61 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 618cec360b39..844ad5ed33fc 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
index 17a8a32d5c63..88504d2bf30d 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_C62X_HW_DATA_H_
#define ADF_C62X_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 2bc06c89d2fe..4ba9c14383af 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 38e4bc04f407..29fd3f1091ab 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
index a28d83e77422..a6c04cf7a43c 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_C62XVF_HW_DATA_H_
#define ADF_C62XVF_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index a68358b31292..b8b021d54bb5 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 33f0a6251e38..c1db8c26afb6 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
#include <linux/interrupt.h>
@@ -103,8 +59,8 @@ struct adf_accel_pci {
struct pci_dev *pci_dev;
struct adf_accel_msix msix_entries;
struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
- uint8_t revid;
- uint8_t sku;
+ u8 revid;
+ u8 sku;
} __packed;
enum dev_state {
@@ -144,7 +100,7 @@ static inline const char *get_sku_info(enum dev_sku_info info)
struct adf_hw_device_class {
const char *name;
const enum adf_device_type type;
- uint32_t instances;
+ u32 instances;
} __packed;
struct adf_cfg_device_data;
@@ -154,15 +110,15 @@ struct adf_etr_ring_data;
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
- uint32_t (*get_accel_mask)(uint32_t fuse);
- uint32_t (*get_ae_mask)(uint32_t fuse);
- uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
- uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
- uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
- uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
- uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
- uint32_t (*get_pf2vf_offset)(uint32_t i);
- uint32_t (*get_vintmsk_offset)(uint32_t i);
+ u32 (*get_accel_mask)(u32 fuse);
+ u32 (*get_ae_mask)(u32 fuse);
+ u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
+ u32 (*get_num_aes)(struct adf_hw_device_data *self);
+ u32 (*get_num_accels)(struct adf_hw_device_data *self);
+ u32 (*get_pf2vf_offset)(u32 i);
+ u32 (*get_vintmsk_offset)(u32 i);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
@@ -173,25 +129,25 @@ struct adf_hw_device_data {
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
- const uint32_t **cfg);
+ const u32 **cfg);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);
const char *fw_name;
const char *fw_mmp_name;
- uint32_t fuses;
- uint32_t accel_capabilities_mask;
- uint32_t instance_id;
- uint16_t accel_mask;
- uint16_t ae_mask;
- uint16_t tx_rings_mask;
- uint8_t tx_rx_gap;
- uint8_t num_banks;
- uint8_t num_accel;
- uint8_t num_logical_accel;
- uint8_t num_engines;
- uint8_t min_iov_compat_ver;
+ u32 fuses;
+ u32 accel_capabilities_mask;
+ u32 instance_id;
+ u16 accel_mask;
+ u16 ae_mask;
+ u16 tx_rings_mask;
+ u8 tx_rx_gap;
+ u8 num_banks;
+ u8 num_accel;
+ u8 num_logical_accel;
+ u8 num_engines;
+ u8 min_iov_compat_ver;
} __packed;
/* CSR write macro */
@@ -248,8 +204,8 @@ struct adf_accel_dev {
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
struct completion iov_msg_completion;
- uint8_t compatible;
- uint8_t pf_version;
+ u8 compatible;
+ u8 pf_version;
} vf;
};
bool is_vf;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index a42fc42704be..c8ad85b882be 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/firmware.h>
#include <linux/pci.h>
#include "adf_cfg.h"
@@ -118,7 +74,7 @@ int adf_ae_start(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
if (!hw_data->fw_name)
return 0;
@@ -139,7 +95,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+ u32 ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
if (!hw_data->fw_name)
return 0;
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index d28cba34773e..1c8ca151a963 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -1,53 +1,9 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
@@ -60,6 +16,9 @@
#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32
+#define ADF_CONST_TABLE_SIZE 1024
+#define ADF_ADMIN_POLL_DELAY_US 20
+#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -154,11 +113,13 @@ struct adf_admin_comms {
static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
void *in, void *out)
{
+ int ret;
+ u32 status;
struct adf_admin_comms *admin = accel_dev->admin;
int offset = ae * ADF_ADMINMSG_LEN * 2;
void __iomem *mailbox = admin->mailbox_addr;
int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
- int times, received;
+ struct icp_qat_fw_init_admin_req *request = in;
mutex_lock(&admin->lock);
@@ -169,46 +130,71 @@ static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
ADF_CSR_WR(mailbox, mb_offset, 1);
- received = 0;
- for (times = 0; times < 50; times++) {
- msleep(20);
- if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
- received = 1;
- break;
- }
- }
- if (received)
+
+ ret = readl_poll_timeout(mailbox + mb_offset, status,
+ status == 0, ADF_ADMIN_POLL_DELAY_US,
+ ADF_ADMIN_POLL_TIMEOUT_US);
+ if (ret < 0) {
+ /* Response timeout */
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send admin msg %d to accelerator %d\n",
+ request->cmd_id, ae);
+ } else {
+ /* Response received from admin message, we can now
+ * make response data available in "out" parameter.
+ */
memcpy(out, admin->virt_addr + offset +
ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
- else
- dev_err(&GET_DEV(accel_dev),
- "Failed to send admin msg to accelerator\n");
+ }
mutex_unlock(&admin->lock);
- return received ? 0 : -EFAULT;
+ return ret;
+}
+
+static int adf_send_admin(struct adf_accel_dev *accel_dev,
+ struct icp_qat_fw_init_admin_req *req,
+ struct icp_qat_fw_init_admin_resp *resp,
+ const unsigned long ae_mask)
+{
+ u32 ae;
+
+ for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
+ if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
+ resp->status)
+ return -EFAULT;
+
+ return 0;
}
-static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+static int adf_init_me(struct adf_accel_dev *accel_dev)
{
+ struct icp_qat_fw_init_admin_req req;
+ struct icp_qat_fw_init_admin_resp resp;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 ae_mask = hw_device->ae_mask;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_INIT_ME;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
+{
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
- int i;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ u32 ae_mask = hw_device->ae_mask;
- memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
- req.init_admin_cmd_id = cmd;
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+ req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
- if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
- req.init_cfg_sz = 1024;
- req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
- }
- for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
- memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
- if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
- resp.init_resp_hdr.status)
- return -EFAULT;
- }
- return 0;
+ req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
+ req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
/**
@@ -221,11 +207,13 @@ static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
*/
int adf_send_admin_init(struct adf_accel_dev *accel_dev)
{
- int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+ int ret;
+ ret = adf_init_me(accel_dev);
if (ret)
return ret;
- return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+
+ return adf_set_fw_constants(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_send_admin_init);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index f5e960d23a7a..32102e27e559 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/aer.h>
@@ -86,7 +42,7 @@ void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
- uint16_t bridge_ctl = 0;
+ u16 bridge_ctl = 0;
if (!parent)
parent = pdev;
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index 5c7fdb0fc53d..ac462796cefc 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/list.h>
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
index 6a9c6f6b5ec9..376cde61a60e 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_H_
#define ADF_CFG_H_
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 1211261de7c2..1ef46ccfba47 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_COMMON_H_
#define ADF_CFG_COMMON_H_
@@ -81,16 +37,16 @@ enum adf_device_type {
struct adf_dev_status_info {
enum adf_device_type type;
- u32 accel_id;
- u32 instance_id;
- uint8_t num_ae;
- uint8_t num_accel;
- uint8_t num_logical_accel;
- uint8_t banks_per_accel;
- uint8_t state;
- uint8_t bus;
- uint8_t dev;
- uint8_t fun;
+ __u32 accel_id;
+ __u32 instance_id;
+ __u8 num_ae;
+ __u8 num_accel;
+ __u8 num_logical_accel;
+ __u8 banks_per_accel;
+ __u8 state;
+ __u8 bus;
+ __u8 dev;
+ __u8 fun;
char name[MAX_DEVICE_NAME_SIZE];
};
@@ -101,6 +57,6 @@ struct adf_dev_status_info {
struct adf_user_cfg_ctl_data)
#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
struct adf_user_cfg_ctl_data)
-#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
-#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, __u32)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, __s32)
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 7632ed0f25c5..314790f5b0af 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_STRINGS_H_
#define ADF_CFG_STRINGS_H_
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
index b5484bfa6996..421f4fb8b4dd 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_CFG_USER_H_
#define ADF_CFG_USER_H_
@@ -55,7 +11,7 @@ struct adf_user_cfg_key_val {
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
union {
struct adf_user_cfg_key_val *next;
- uint64_t padding3;
+ __u64 padding3;
};
enum adf_cfg_val_type type;
} __packed;
@@ -64,19 +20,19 @@ struct adf_user_cfg_section {
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
union {
struct adf_user_cfg_key_val *params;
- uint64_t padding1;
+ __u64 padding1;
};
union {
struct adf_user_cfg_section *next;
- uint64_t padding3;
+ __u64 padding3;
};
} __packed;
struct adf_user_cfg_ctl_data {
union {
struct adf_user_cfg_section *config_section;
- uint64_t padding;
+ __u64 padding;
};
- uint8_t device_id;
+ __u8 device_id;
} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index d78f8d5c89c3..ebfcb4ea618d 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_DRV_H
#define ADF_DRV_H
@@ -123,11 +79,11 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id);
struct adf_accel_dev *adf_devmgr_get_first(void);
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
-int adf_devmgr_verify_id(uint32_t id);
-void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_verify_id(u32 id);
+void adf_devmgr_get_num_dev(u32 *num);
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
int adf_dev_started(struct adf_accel_dev *accel_dev);
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
@@ -198,7 +154,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int ctx_mask, unsigned int upc);
void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
- unsigned int words_num, uint64_t *uword);
+ unsigned int words_num, u64 *uword);
void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
unsigned int uword_addr, unsigned int words_num,
unsigned int *data);
@@ -233,9 +189,9 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- uint32_t vf_mask);
+ u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- uint32_t vf_mask);
+ u32 vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index ef0e482ee04f..71d0c44aacca 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -270,7 +226,7 @@ static int adf_ctl_is_device_in_use(int id)
return 0;
}
-static void adf_ctl_stop_devices(uint32_t id)
+static void adf_ctl_stop_devices(u32 id)
{
struct adf_accel_dev *accel_dev;
@@ -374,7 +330,7 @@ out:
static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- uint32_t num_devices = 0;
+ u32 num_devices = 0;
adf_devmgr_get_num_dev(&num_devices);
if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 2d06409bd3c4..72753af056b3 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/list.h>
#include "adf_cfg.h"
@@ -52,7 +8,7 @@
static LIST_HEAD(accel_table);
static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
-static uint32_t num_devices;
+static u32 num_devices;
static u8 id_map[ADF_MAX_DEVICES];
struct vf_id_map {
@@ -355,7 +311,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
}
EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
-struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
{
struct list_head *itr;
int real_id;
@@ -380,7 +336,7 @@ unlock:
return NULL;
}
-int adf_devmgr_verify_id(uint32_t id)
+int adf_devmgr_verify_id(u32 id)
{
if (id == ADF_CFG_ALL_DEVICES)
return 0;
@@ -407,7 +363,7 @@ static int adf_get_num_dettached_vfs(void)
return vfs;
}
-void adf_devmgr_get_num_dev(uint32_t *num)
+void adf_devmgr_get_num_dev(u32 *num)
{
*num = num_devices - adf_get_num_dettached_vfs();
}
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index d7dd18d9bef8..d4162783f970 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_transport_internal.h"
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 26556c713049..42029153408e 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/bitops.h>
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index cd1cdf5305bc..36136f7db509 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index b3875fdf6cd7..519fd5acf713 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -1,50 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
index 5acd531a11ff..0690c031bfce 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_PF2VF_MSG_H
#define ADF_PF2VF_MSG_H
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index b36d8653b1ba..8827aa139f96 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/device.h>
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 2136cbe4bf6c..2ad774017200 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_transport_internal.h"
@@ -51,22 +7,22 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+static inline u32 adf_modulo(u32 data, u32 shift)
{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
+ u32 div = data >> shift;
+ u32 mult = div << shift;
return data - mult;
}
-static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+static inline int adf_check_ring_alignment(u64 addr, u64 size)
{
if (((size - 1) & addr) != 0)
return -EFAULT;
return 0;
}
-static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
{
int i = ADF_MIN_RING_SIZE;
@@ -77,7 +33,7 @@ static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
return ADF_DEFAULT_RING_SIZE;
}
-static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock(&bank->lock);
if (bank->ring_mask & (1 << ring)) {
@@ -89,14 +45,14 @@ static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
return 0;
}
-static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock(&bank->lock);
bank->ring_mask &= ~(1 << ring);
spin_unlock(&bank->lock);
}
-static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock_bh(&bank->lock);
bank->irq_mask |= (1 << ring);
@@ -106,7 +62,7 @@ static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
bank->irq_coalesc_timer);
}
-static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
spin_lock_bh(&bank->lock);
bank->irq_mask &= ~(1 << ring);
@@ -114,7 +70,7 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
}
-int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
if (atomic_add_return(1, ring->inflights) >
ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
@@ -136,18 +92,18 @@ int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
static int adf_handle_response(struct adf_etr_ring_data *ring)
{
- uint32_t msg_counter = 0;
- uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+ u32 msg_counter = 0;
+ u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
while (*msg != ADF_RING_EMPTY_SIG) {
- ring->callback((uint32_t *)msg);
+ ring->callback((u32 *)msg);
atomic_dec(ring->inflights);
*msg = ADF_RING_EMPTY_SIG;
ring->head = adf_modulo(ring->head +
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
msg_counter++;
- msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
+ msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
}
if (msg_counter > 0)
WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
@@ -158,7 +114,7 @@ static int adf_handle_response(struct adf_etr_ring_data *ring)
static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
{
- uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+ u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
ring->ring_number, ring_config);
@@ -166,7 +122,7 @@ static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
{
- uint32_t ring_config =
+ u32 ring_config =
BUILD_RESP_RING_CONFIG(ring->ring_size,
ADF_RING_NEAR_WATERMARK_512,
ADF_RING_NEAR_WATERMARK_0);
@@ -180,8 +136,8 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
struct adf_etr_bank_data *bank = ring->bank;
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- uint64_t ring_base;
- uint32_t ring_size_bytes =
+ u64 ring_base;
+ u32 ring_size_bytes =
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
@@ -215,7 +171,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
{
- uint32_t ring_size_bytes =
+ u32 ring_size_bytes =
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
@@ -228,8 +184,8 @@ static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
}
int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
- uint32_t bank_num, uint32_t num_msgs,
- uint32_t msg_size, const char *ring_name,
+ u32 bank_num, u32 num_msgs,
+ u32 msg_size, const char *ring_name,
adf_callback_fn callback, int poll_mode,
struct adf_etr_ring_data **ring_ptr)
{
@@ -237,7 +193,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
- uint32_t ring_num;
+ u32 ring_num;
int ret;
if (bank_num >= GET_MAX_BANKS(accel_dev)) {
@@ -330,7 +286,7 @@ void adf_remove_ring(struct adf_etr_ring_data *ring)
static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
{
- uint32_t empty_rings, i;
+ u32 empty_rings, i;
empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
empty_rings = ~empty_rings & bank->irq_mask;
@@ -353,7 +309,7 @@ void adf_response_handler(uintptr_t bank_addr)
static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
const char *section, const char *format,
- uint32_t key, uint32_t *value)
+ u32 key, u32 *value)
{
char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
@@ -370,7 +326,7 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
const char *section,
- uint32_t bank_num_in_accel)
+ u32 bank_num_in_accel)
{
if (adf_get_cfg_int(bank->accel_dev, section,
ADF_ETRMGR_COALESCE_TIMER_FORMAT,
@@ -384,12 +340,12 @@ static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
static int adf_init_bank(struct adf_accel_dev *accel_dev,
struct adf_etr_bank_data *bank,
- uint32_t bank_num, void __iomem *csr_addr)
+ u32 bank_num, void __iomem *csr_addr)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
- uint32_t i, coalesc_enabled = 0;
+ u32 i, coalesc_enabled = 0;
memset(bank, 0, sizeof(*bank));
bank->bank_number = bank_num;
@@ -461,8 +417,8 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
struct adf_etr_data *etr_data;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *csr_addr;
- uint32_t size;
- uint32_t num_banks = 0;
+ u32 size;
+ u32 num_banks = 0;
int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
@@ -508,7 +464,7 @@ EXPORT_SYMBOL_GPL(adf_init_etr_data);
static void cleanup_bank(struct adf_etr_bank_data *bank)
{
- uint32_t i;
+ u32 i;
for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
struct adf_accel_dev *accel_dev = bank->accel_dev;
@@ -528,7 +484,7 @@ static void cleanup_bank(struct adf_etr_bank_data *bank)
static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
{
struct adf_etr_data *etr_data = accel_dev->transport;
- uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+ u32 i, num_banks = GET_MAX_BANKS(accel_dev);
for (i = 0; i < num_banks; i++)
cleanup_bank(&etr_data->banks[i]);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
index 386485bd9c95..2c95f1697c76 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.h
+++ b/drivers/crypto/qat/qat_common/adf_transport.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_TRANSPORT_H
#define ADF_TRANSPORT_H
@@ -54,10 +10,10 @@ struct adf_etr_ring_data;
typedef void (*adf_callback_fn)(void *resp_msg);
int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
- uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+ u32 bank_num, u32 num_mgs, u32 msg_size,
const char *ring_name, adf_callback_fn callback,
int poll_mode, struct adf_etr_ring_data **ring_ptr);
-int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 80e02a2a0a09..950d1988556c 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
#define ADF_TRANSPORT_ACCESS_MACROS_H
@@ -132,9 +88,9 @@
ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
do { \
- uint32_t l_base = 0, u_base = 0; \
- l_base = (uint32_t)(value & 0xFFFFFFFF); \
- u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ u32 l_base = 0, u_base = 0; \
+ l_base = (u32)(value & 0xFFFFFFFF); \
+ u_base = (u32)((value & 0xFFFFFFFF00000000ULL) >> 32); \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e794e9d97b2c..2a2eccbf56ec 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index bb883368ac01..c7faf4e2d302 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_TRANSPORT_INTRN_H
#define ADF_TRANSPORT_INTRN_H
@@ -59,32 +15,31 @@ struct adf_etr_ring_debug_entry {
struct adf_etr_ring_data {
void *base_addr;
atomic_t *inflights;
- spinlock_t lock; /* protects ring data struct */
adf_callback_fn callback;
struct adf_etr_bank_data *bank;
dma_addr_t dma_addr;
- uint16_t head;
- uint16_t tail;
- uint8_t ring_number;
- uint8_t ring_size;
- uint8_t msg_size;
- uint8_t reserved;
struct adf_etr_ring_debug_entry *ring_debug;
-} __packed;
+ spinlock_t lock; /* protects ring data struct */
+ u16 head;
+ u16 tail;
+ u8 ring_number;
+ u8 ring_size;
+ u8 msg_size;
+};
struct adf_etr_bank_data {
struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
struct tasklet_struct resp_handler;
void __iomem *csr_addr;
- struct adf_accel_dev *accel_dev;
- uint32_t irq_coalesc_timer;
- uint16_t ring_mask;
- uint16_t irq_mask;
+ u32 irq_coalesc_timer;
+ u32 bank_number;
+ u16 ring_mask;
+ u16 irq_mask;
spinlock_t lock; /* protects bank data struct */
+ struct adf_accel_dev *accel_dev;
struct dentry *bank_debug_dir;
struct dentry *bank_debug_cfg;
- uint32_t bank_number;
-} __packed;
+};
struct adf_etr_data {
struct adf_etr_bank_data *banks;
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index cd5f37dffe8a..2c98fb63f7b7 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 4a73fc70f7a9..c4a44dc6af3e 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
index 46747f01b1d1..6dc09d270082 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_H_
#define _ICP_QAT_FW_H_
#include <linux/types.h>
@@ -89,41 +45,41 @@ enum icp_qat_fw_comn_request_id {
struct icp_qat_fw_comn_req_hdr_cd_pars {
union {
struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
} s;
struct {
- uint32_t serv_specif_fields[4];
+ __u32 serv_specif_fields[4];
} s1;
} u;
};
struct icp_qat_fw_comn_req_mid {
- uint64_t opaque_data;
- uint64_t src_data_addr;
- uint64_t dest_data_addr;
- uint32_t src_length;
- uint32_t dst_length;
+ __u64 opaque_data;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
+ __u32 src_length;
+ __u32 dst_length;
};
struct icp_qat_fw_comn_req_cd_ctrl {
- uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+ __u32 content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
};
struct icp_qat_fw_comn_req_hdr {
- uint8_t resrvd1;
- uint8_t service_cmd_id;
- uint8_t service_type;
- uint8_t hdr_flags;
- uint16_t serv_specif_flags;
- uint16_t comn_req_flags;
+ __u8 resrvd1;
+ __u8 service_cmd_id;
+ __u8 service_type;
+ __u8 hdr_flags;
+ __u16 serv_specif_flags;
+ __u16 comn_req_flags;
};
struct icp_qat_fw_comn_req_rqpars {
- uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+ __u32 serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
};
struct icp_qat_fw_comn_req {
@@ -135,24 +91,24 @@ struct icp_qat_fw_comn_req {
};
struct icp_qat_fw_comn_error {
- uint8_t xlat_err_code;
- uint8_t cmp_err_code;
+ __u8 xlat_err_code;
+ __u8 cmp_err_code;
};
struct icp_qat_fw_comn_resp_hdr {
- uint8_t resrvd1;
- uint8_t service_id;
- uint8_t response_type;
- uint8_t hdr_flags;
+ __u8 resrvd1;
+ __u8 service_id;
+ __u8 response_type;
+ __u8 hdr_flags;
struct icp_qat_fw_comn_error comn_error;
- uint8_t comn_status;
- uint8_t cmd_id;
+ __u8 comn_status;
+ __u8 cmd_id;
};
struct icp_qat_fw_comn_resp {
struct icp_qat_fw_comn_resp_hdr comn_hdr;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u64 opaque_data;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
};
#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
index 72a59faa9005..d4d188cd7ed0 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
#define _ICP_QAT_FW_INIT_ADMIN_H_
@@ -67,50 +23,75 @@ enum icp_qat_fw_init_admin_resp_status {
};
struct icp_qat_fw_init_admin_req {
- uint16_t init_cfg_sz;
- uint8_t resrvd1;
- uint8_t init_admin_cmd_id;
- uint32_t resrvd2;
- uint64_t opaque_data;
- uint64_t init_cfg_ptr;
- uint64_t resrvd3;
-};
-
-struct icp_qat_fw_init_admin_resp_hdr {
- uint8_t flags;
- uint8_t resrvd1;
- uint8_t status;
- uint8_t init_admin_cmd_id;
-};
+ __u16 init_cfg_sz;
+ __u8 resrvd1;
+ __u8 cmd_id;
+ __u32 resrvd2;
+ __u64 opaque_data;
+ __u64 init_cfg_ptr;
-struct icp_qat_fw_init_admin_resp_pars {
union {
- uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
struct {
- uint32_t version_patch_num;
- uint8_t context_id;
- uint8_t ae_id;
- uint16_t resrvd1;
- uint64_t resrvd2;
- } s1;
- struct {
- uint64_t req_rec_count;
- uint64_t resp_sent_count;
- } s2;
- } u;
+ __u16 ibuf_size_in_kb;
+ __u16 resrvd3;
+ };
+ __u32 idle_filter;
+ };
+
+ __u32 resrvd4;
};
struct icp_qat_fw_init_admin_resp {
- struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+ __u8 flags;
+ __u8 resrvd1;
+ __u8 status;
+ __u8 cmd_id;
union {
- uint32_t resrvd2;
+ __u32 resrvd2;
+ struct {
+ __u16 version_minor_num;
+ __u16 version_major_num;
+ };
+ };
+ __u64 opaque_data;
+ union {
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ __u32 version_patch_num;
+ __u8 context_id;
+ __u8 ae_id;
+ __u16 resrvd4;
+ __u64 resrvd5;
+ };
+ struct {
+ __u64 req_rec_count;
+ __u64 resp_sent_count;
+ };
+ struct {
+ __u16 compression_algos;
+ __u16 checksum_algos;
+ __u32 deflate_capabilities;
+ __u32 resrvd6;
+ __u32 lzs_capabilities;
+ };
+ struct {
+ __u32 cipher_algos;
+ __u32 hash_algos;
+ __u16 keygen_algos;
+ __u16 other;
+ __u16 public_key_algos;
+ __u16 prime_algos;
+ };
+ struct {
+ __u64 timestamp;
+ __u64 resrvd7;
+ };
struct {
- uint16_t version_minor_num;
- uint16_t version_major_num;
- } s;
- } u;
- uint64_t opaque_data;
- struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+ __u32 successful_count;
+ __u32 unsuccessful_count;
+ __u64 resrvd8;
+ };
+ };
};
#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
index c8d26697e8ea..6757ec09d81f 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_LA_H_
#define _ICP_QAT_FW_LA_H_
#include "icp_qat_fw.h"
@@ -226,14 +182,14 @@ struct icp_qat_fw_la_bulk_req {
struct icp_qat_fw_cipher_req_hdr_cd_pars {
union {
struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
} s;
struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
} s1;
} u;
};
@@ -241,70 +197,70 @@ struct icp_qat_fw_cipher_req_hdr_cd_pars {
struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
union {
struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
+ __u64 content_desc_addr;
+ __u16 content_desc_resrvd1;
+ __u8 content_desc_params_sz;
+ __u8 content_desc_hdr_resrvd2;
+ __u32 content_desc_resrvd3;
} s;
struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u32 cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
} sl;
} u;
};
struct icp_qat_fw_cipher_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id;
- uint8_t cipher_padding_sz;
- uint8_t resrvd1;
- uint16_t resrvd2;
- uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+ __u8 cipher_state_sz;
+ __u8 cipher_key_sz;
+ __u8 cipher_cfg_offset;
+ __u8 next_curr_id;
+ __u8 cipher_padding_sz;
+ __u8 resrvd1;
+ __u16 resrvd2;
+ __u32 resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
};
struct icp_qat_fw_auth_cd_ctrl_hdr {
- uint32_t resrvd1;
- uint8_t resrvd2;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id;
- uint8_t resrvd3;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd4;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
+ __u32 resrvd1;
+ __u8 resrvd2;
+ __u8 hash_flags;
+ __u8 hash_cfg_offset;
+ __u8 next_curr_id;
+ __u8 resrvd3;
+ __u8 outer_prefix_sz;
+ __u8 final_sz;
+ __u8 inner_res_sz;
+ __u8 resrvd4;
+ __u8 inner_state1_sz;
+ __u8 inner_state2_offset;
+ __u8 inner_state2_sz;
+ __u8 outer_config_offset;
+ __u8 outer_state1_sz;
+ __u8 outer_res_sz;
+ __u8 outer_prefix_offset;
};
struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id_cipher;
- uint8_t cipher_padding_sz;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id_auth;
- uint8_t resrvd1;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd2;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
+ __u8 cipher_state_sz;
+ __u8 cipher_key_sz;
+ __u8 cipher_cfg_offset;
+ __u8 next_curr_id_cipher;
+ __u8 cipher_padding_sz;
+ __u8 hash_flags;
+ __u8 hash_cfg_offset;
+ __u8 next_curr_id_auth;
+ __u8 resrvd1;
+ __u8 outer_prefix_sz;
+ __u8 final_sz;
+ __u8 inner_res_sz;
+ __u8 resrvd2;
+ __u8 inner_state1_sz;
+ __u8 inner_state2_offset;
+ __u8 inner_state2_sz;
+ __u8 outer_config_offset;
+ __u8 outer_state1_sz;
+ __u8 outer_res_sz;
+ __u8 outer_prefix_offset;
};
#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
@@ -315,48 +271,48 @@ struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
struct icp_qat_fw_la_cipher_req_params {
- uint32_t cipher_offset;
- uint32_t cipher_length;
+ __u32 cipher_offset;
+ __u32 cipher_length;
union {
- uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u32 cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
struct {
- uint64_t cipher_IV_ptr;
- uint64_t resrvd1;
+ __u64 cipher_IV_ptr;
+ __u64 resrvd1;
} s;
} u;
};
struct icp_qat_fw_la_auth_req_params {
- uint32_t auth_off;
- uint32_t auth_len;
+ __u32 auth_off;
+ __u32 auth_len;
union {
- uint64_t auth_partial_st_prefix;
- uint64_t aad_adr;
+ __u64 auth_partial_st_prefix;
+ __u64 aad_adr;
} u1;
- uint64_t auth_res_addr;
+ __u64 auth_res_addr;
union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
+ __u8 inner_prefix_sz;
+ __u8 aad_sz;
} u2;
- uint8_t resrvd1;
- uint8_t hash_state_sz;
- uint8_t auth_res_sz;
+ __u8 resrvd1;
+ __u8 hash_state_sz;
+ __u8 auth_res_sz;
} __packed;
struct icp_qat_fw_la_auth_req_params_resrvd_flds {
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
+ __u8 inner_prefix_sz;
+ __u8 aad_sz;
} u2;
- uint8_t resrvd1;
- uint16_t resrvd2;
+ __u8 resrvd1;
+ __u16 resrvd2;
};
struct icp_qat_fw_la_resp {
struct icp_qat_fw_comn_resp_hdr comn_resp;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+ __u64 opaque_data;
+ __u32 resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
};
#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
index 2ffef3e4fd68..3e8e291cd122 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
#define __ICP_QAT_FW_LOADER_HANDLE_H__
#include "icp_qat_uclo.h"
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
index 0d7a9b51ce9f..9dddae0009fc 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -1,100 +1,56 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_FW_PKE_
#define _ICP_QAT_FW_PKE_
#include "icp_qat_fw.h"
struct icp_qat_fw_req_hdr_pke_cd_pars {
- u64 content_desc_addr;
- u32 content_desc_resrvd;
- u32 func_id;
+ __u64 content_desc_addr;
+ __u32 content_desc_resrvd;
+ __u32 func_id;
};
struct icp_qat_fw_req_pke_mid {
- u64 opaque;
- u64 src_data_addr;
- u64 dest_data_addr;
+ __u64 opaque;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
};
struct icp_qat_fw_req_pke_hdr {
- u8 resrvd1;
- u8 resrvd2;
- u8 service_type;
- u8 hdr_flags;
- u16 comn_req_flags;
- u16 resrvd4;
+ __u8 resrvd1;
+ __u8 resrvd2;
+ __u8 service_type;
+ __u8 hdr_flags;
+ __u16 comn_req_flags;
+ __u16 resrvd4;
struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
};
struct icp_qat_fw_pke_request {
struct icp_qat_fw_req_pke_hdr pke_hdr;
struct icp_qat_fw_req_pke_mid pke_mid;
- u8 output_param_count;
- u8 input_param_count;
- u16 resrvd1;
- u32 resrvd2;
- u64 next_req_adr;
+ __u8 output_param_count;
+ __u8 input_param_count;
+ __u16 resrvd1;
+ __u32 resrvd2;
+ __u64 next_req_adr;
};
struct icp_qat_fw_resp_pke_hdr {
- u8 resrvd1;
- u8 resrvd2;
- u8 response_type;
- u8 hdr_flags;
- u16 comn_resp_flags;
- u16 resrvd4;
+ __u8 resrvd1;
+ __u8 resrvd2;
+ __u8 response_type;
+ __u8 hdr_flags;
+ __u16 comn_resp_flags;
+ __u16 resrvd4;
};
struct icp_qat_fw_pke_resp {
struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
- u64 opaque;
- u64 src_data_addr;
- u64 dest_data_addr;
+ __u64 opaque;
+ __u64 src_data_addr;
+ __u64 dest_data_addr;
};
#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS 7
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
index 7187917533d0..c0e9fc0c93dd 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hal.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hal.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef __ICP_QAT_HAL_H
#define __ICP_QAT_HAL_H
#include "icp_qat_fw_loader_handle.h"
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index 121d5e6e46ca..c4b6ef1506ab 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _ICP_QAT_HW_H_
#define _ICP_QAT_HW_H_
@@ -105,8 +61,8 @@ enum icp_qat_hw_auth_mode {
};
struct icp_qat_hw_auth_config {
- uint32_t config;
- uint32_t reserved;
+ __u32 config;
+ __u32 reserved;
};
#define QAT_AUTH_MODE_BITPOS 4
@@ -131,7 +87,7 @@ struct icp_qat_hw_auth_config {
struct icp_qat_hw_auth_counter {
__be32 counter;
- uint32_t reserved;
+ __u32 reserved;
};
#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
@@ -191,9 +147,9 @@ struct icp_qat_hw_auth_setup {
struct icp_qat_hw_auth_sha512 {
struct icp_qat_hw_auth_setup inner_setup;
- uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ __u8 state1[ICP_QAT_HW_SHA512_STATE1_SZ];
struct icp_qat_hw_auth_setup outer_setup;
- uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+ __u8 state2[ICP_QAT_HW_SHA512_STATE2_SZ];
};
struct icp_qat_hw_auth_algo_blk {
@@ -227,8 +183,8 @@ enum icp_qat_hw_cipher_mode {
};
struct icp_qat_hw_cipher_config {
- uint32_t val;
- uint32_t reserved;
+ __u32 val;
+ __u32 reserved;
};
enum icp_qat_hw_cipher_dir {
@@ -296,7 +252,7 @@ enum icp_qat_hw_cipher_convert {
struct icp_qat_hw_cipher_aes256_f8 {
struct icp_qat_hw_cipher_config cipher_config;
- uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+ __u8 key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
};
struct icp_qat_hw_cipher_algo_blk {
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index 5d1ee7e53492..8fe1ec344fa2 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef __ICP_QAT_UCLO_H__
#define __ICP_QAT_UCLO_H__
@@ -176,7 +132,7 @@ struct icp_qat_uof_encap_obj {
struct icp_qat_uclo_encap_uwblock {
unsigned int start_addr;
unsigned int words_num;
- uint64_t micro_words;
+ u64 micro_words;
};
struct icp_qat_uclo_encap_page {
@@ -215,7 +171,7 @@ struct icp_qat_uclo_objhdr {
struct icp_qat_uof_strtable {
unsigned int table_len;
unsigned int reserved;
- uint64_t strings;
+ u64 strings;
};
struct icp_qat_uclo_objhandle {
@@ -235,7 +191,7 @@ struct icp_qat_uclo_objhandle {
unsigned int ae_num;
unsigned int ustore_phy_size;
void *obj_buf;
- uint64_t *uword_buf;
+ u64 *uword_buf;
};
struct icp_qat_uof_uword_block {
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index e14d3dd291f0..72753b84dc95 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/crypto.h>
@@ -55,6 +11,7 @@
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
+#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -78,15 +35,15 @@ static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
+ u32 len;
+ u32 resrvd;
+ u64 addr;
} __packed;
struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
+ u64 resrvd;
+ u32 num_bufs;
+ u32 num_mapped_bufs;
struct qat_alg_buf bufers[];
} __packed __aligned(64);
@@ -131,7 +88,8 @@ struct qat_alg_skcipher_ctx {
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
- struct crypto_skcipher *tfm;
+ struct crypto_skcipher *ftfm;
+ bool fallback;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
@@ -151,7 +109,7 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
- const uint8_t *auth_key,
+ const u8 *auth_key,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
@@ -467,7 +425,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
struct icp_qat_fw_la_bulk_req *req,
struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
@@ -487,7 +445,7 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
}
static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
- int alg, const uint8_t *key,
+ int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
@@ -500,7 +458,7 @@ static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
}
static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
- int alg, const uint8_t *key,
+ int alg, const u8 *key,
unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
@@ -578,7 +536,7 @@ error:
}
static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
- const uint8_t *key,
+ const u8 *key,
unsigned int keylen,
int mode)
{
@@ -592,7 +550,7 @@ static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
return 0;
}
-static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -606,7 +564,7 @@ static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
ICP_QAT_HW_CIPHER_CBC_MODE);
}
-static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -658,7 +616,7 @@ out_free_inst:
return ret;
}
-static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -820,7 +778,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct aead_request *areq = qat_req->aead_req;
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
qat_alg_free_bufl(inst, qat_req);
@@ -835,7 +793,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
struct skcipher_request *sreq = qat_req->skcipher_req;
- uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ u8 stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -880,18 +838,18 @@ static int qat_alg_aead_dec(struct aead_request *areq)
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
cipher_param->cipher_length = areq->cryptlen - digst_size;
cipher_param->cipher_offset = areq->assoclen;
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -910,7 +868,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
struct icp_qat_fw_la_bulk_req *msg;
- uint8_t *iv = areq->iv;
+ u8 *iv = areq->iv;
int ret, ctr = 0;
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
@@ -922,11 +880,11 @@ static int qat_alg_aead_enc(struct aead_request *areq)
qat_req->aead_ctx = ctx;
qat_req->aead_req = areq;
qat_req->cb = qat_aead_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
cipher_param->cipher_length = areq->cryptlen;
@@ -936,7 +894,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_len = areq->assoclen + areq->cryptlen;
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1038,6 +996,25 @@ static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ ret = xts_verify_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ if (keylen >> 1 == AES_KEYSIZE_192) {
+ ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->fallback = true;
+
+ return 0;
+ }
+
+ ctx->fallback = false;
+
return qat_alg_skcipher_setkey(tfm, key, keylen,
ICP_QAT_HW_CIPHER_XTS_MODE);
}
@@ -1073,7 +1050,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
@@ -1082,7 +1059,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1102,6 +1079,24 @@ static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
return qat_alg_skcipher_encrypt(req);
}
+static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_encrypt(nreq);
+ }
+
+ return qat_alg_skcipher_encrypt(req);
+}
+
static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
@@ -1133,7 +1128,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_req->skcipher_ctx = ctx;
qat_req->skcipher_req = req;
qat_req->cb = qat_skcipher_alg_callback;
- qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
@@ -1142,7 +1137,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
- ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
if (ret == -EAGAIN) {
@@ -1161,6 +1156,25 @@ static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
return qat_alg_skcipher_decrypt(req);
}
+
+static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
+ struct skcipher_request *nreq = skcipher_request_ctx(req);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (ctx->fallback) {
+ memcpy(nreq, req, sizeof(*req));
+ skcipher_request_set_tfm(nreq, ctx->ftfm);
+ return crypto_skcipher_decrypt(nreq);
+ }
+
+ return qat_alg_skcipher_decrypt(req);
+}
+
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
const char *hash_name)
@@ -1217,10 +1231,25 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
+ return 0;
+}
+
+static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
+{
struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int reqsize;
+
+ ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->ftfm))
+ return PTR_ERR(ctx->ftfm);
+
+ reqsize = max(sizeof(struct qat_crypto_request),
+ sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(ctx->ftfm));
+ crypto_skcipher_set_reqsize(tfm, reqsize);
- crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
- ctx->tfm = tfm;
return 0;
}
@@ -1251,13 +1280,22 @@ static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
qat_crypto_put_instance(inst);
}
+static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
+{
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (ctx->ftfm)
+ crypto_free_skcipher(ctx->ftfm);
+
+ qat_alg_skcipher_exit_tfm(tfm);
+}
static struct aead_alg qat_aeads[] = { {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1274,7 +1312,7 @@ static struct aead_alg qat_aeads[] = { {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1291,7 +1329,7 @@ static struct aead_alg qat_aeads[] = { {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
.cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_module = THIS_MODULE,
@@ -1309,7 +1347,7 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "qat_aes_cbc",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
@@ -1327,7 +1365,7 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "qat_aes_ctr",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
@@ -1345,17 +1383,18 @@ static struct skcipher_alg qat_skciphers[] = { {
.base.cra_name = "xts(aes)",
.base.cra_driver_name = "qat_aes_xts",
.base.cra_priority = 4001,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
.base.cra_alignmask = 0,
.base.cra_module = THIS_MODULE,
- .init = qat_alg_skcipher_init_tfm,
- .exit = qat_alg_skcipher_exit_tfm,
+ .init = qat_alg_skcipher_init_xts_tfm,
+ .exit = qat_alg_skcipher_exit_xts_tfm,
.setkey = qat_alg_skcipher_xts_setkey,
- .decrypt = qat_alg_skcipher_blk_decrypt,
- .encrypt = qat_alg_skcipher_blk_encrypt,
+ .decrypt = qat_alg_skcipher_xts_decrypt,
+ .encrypt = qat_alg_skcipher_xts_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 692a7aaee749..846569ec9066 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -1,50 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
@@ -384,12 +339,12 @@ static int qat_dh_compute_value(struct kpp_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = n_input_params;
msg->output_param_count = 1;
do {
- ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret)
@@ -779,11 +734,11 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
do {
- ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret)
@@ -927,7 +882,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->pke_mid.opaque = (u64)(__force long)qat_req;
if (ctx->crt_mode)
msg->input_param_count = 6;
else
@@ -935,7 +890,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->output_param_count = 1;
do {
- ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg);
} while (ret == -EBUSY && ctr++ < 100);
if (!ret)
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index fb504cee0305..ab621b7dbd20 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/module.h>
#include <linux/slab.h>
#include "adf_accel_devices.h"
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index 300bb919a33a..12682d1e9f5f 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef _QAT_CRYPTO_INSTANCE_H_
#define _QAT_CRYPTO_INSTANCE_H_
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index ff149e176f64..fa467e0f8285 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/delay.h>
@@ -78,13 +34,13 @@
#define AE(handle, ae) handle->hal_handle->aes[ae]
-static const uint64_t inst_4b[] = {
+static const u64 inst_4b[] = {
0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
0x0A021000000ull
};
-static const uint64_t inst[] = {
+static const u64 inst[] = {
0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
@@ -546,7 +502,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
}
-static uint64_t qat_hal_parity_64bit(uint64_t word)
+static u64 qat_hal_parity_64bit(u64 word)
{
word ^= word >> 1;
word ^= word >> 2;
@@ -557,9 +513,9 @@ static uint64_t qat_hal_parity_64bit(uint64_t word)
return word & 1;
}
-static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+static u64 qat_hal_set_uword_ecc(u64 uword)
{
- uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+ u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
bit6_mask = 0xdaf69a46910ULL;
@@ -578,7 +534,7 @@ static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
- unsigned int words_num, uint64_t *uword)
+ unsigned int words_num, u64 *uword)
{
unsigned int ustore_addr;
unsigned int i;
@@ -588,7 +544,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
unsigned int uwrd_lo, uwrd_hi;
- uint64_t tmp;
+ u64 tmp;
tmp = qat_hal_set_uword_ecc(uword[i]);
uwrd_lo = (unsigned int)(tmp & 0xffffffff);
@@ -644,7 +600,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
- (uint64_t *)inst);
+ (u64 *)inst);
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
@@ -821,7 +777,7 @@ void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned int uaddr,
- unsigned int words_num, uint64_t *uword)
+ unsigned int words_num, u64 *uword)
{
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
@@ -871,11 +827,11 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
#define MAX_EXEC_INST 100
static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- uint64_t *micro_inst, unsigned int inst_num,
+ u64 *micro_inst, unsigned int inst_num,
int code_off, unsigned int max_cycle,
unsigned int *endpc)
{
- uint64_t savuwords[MAX_EXEC_INST];
+ u64 savuwords[MAX_EXEC_INST];
unsigned int ind_lm_addr0, ind_lm_addr1;
unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
unsigned int ind_cnt_sig;
@@ -972,7 +928,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
unsigned short reg_addr;
int status = 0;
- uint64_t insts, savuword;
+ u64 insts, savuword;
reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
if (reg_addr == BAD_REGADDR) {
@@ -984,7 +940,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = 0xA070000000ull | (reg_addr & 0x3ff);
break;
default:
- insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+ insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
@@ -1030,7 +986,7 @@ static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
unsigned short reg_num, unsigned int data)
{
unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
- uint64_t insts[] = {
+ u64 insts[] = {
0x0F440000000ull,
0x0F040000000ull,
0x0F0000C0300ull,
@@ -1076,13 +1032,13 @@ int qat_hal_get_ins_num(void)
return ARRAY_SIZE(inst_4b);
}
-static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+static int qat_hal_concat_micro_code(u64 *micro_inst,
unsigned int inst_num, unsigned int size,
unsigned int addr, unsigned int *value)
{
int i;
unsigned int cur_value;
- const uint64_t *inst_arr;
+ const u64 *inst_arr;
int fixup_offset;
int usize = 0;
int orig_num;
@@ -1107,7 +1063,7 @@ static int qat_hal_concat_micro_code(uint64_t *micro_inst,
static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- int *pfirst_exec, uint64_t *micro_inst,
+ int *pfirst_exec, u64 *micro_inst,
unsigned int inst_num)
{
int stat = 0;
@@ -1140,7 +1096,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_batch_init *lm_init_header)
{
struct icp_qat_uof_batch_init *plm_init;
- uint64_t *micro_inst_arry;
+ u64 *micro_inst_arry;
int micro_inst_num;
int alloc_inst_size;
int first_exec = 1;
@@ -1150,7 +1106,7 @@ int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
alloc_inst_size = lm_init_header->size;
if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
alloc_inst_size = handle->hal_handle->max_ustore;
- micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+ micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
GFP_KERNEL);
if (!micro_inst_arry)
return -ENOMEM;
@@ -1229,7 +1185,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
data16low;
unsigned short reg_mask;
int status = 0;
- uint64_t micro_inst[] = {
+ u64 micro_inst[] = {
0x0F440000000ull,
0x0F040000000ull,
0x0A000000000ull,
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 6bd8f6a2a24f..bff759e2f811 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
@@ -332,13 +288,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
}
return 0;
out_err:
+ /* Do not free the list head unless we allocated it. */
+ tail_old = tail_old->next;
+ if (flag) {
+ kfree(*init_tab_base);
+ *init_tab_base = NULL;
+ }
+
while (tail_old) {
mem_init = tail_old->next;
kfree(tail_old);
tail_old = mem_init;
}
- if (flag)
- kfree(*init_tab_base);
return -ENOMEM;
}
@@ -411,16 +372,16 @@ static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_size;
unsigned int patt_pos;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
- uint64_t *fill_data;
+ u64 *fill_data;
uof_image = image->img_ptr;
- fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+ fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
GFP_KERNEL);
if (!fill_data)
return -ENOMEM;
for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
memcpy(&fill_data[i], &uof_image->fill_pattern,
- sizeof(uint64_t));
+ sizeof(u64));
page = image->page;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
@@ -981,7 +942,7 @@ static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
pr_err("QAT: UOF incompatible\n");
return -EINVAL;
}
- obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+ obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
GFP_KERNEL);
if (!obj_handle->uword_buf)
return -ENOMEM;
@@ -1185,7 +1146,7 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
return 0;
}
-#define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low)
+#define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
#define BITS_IN_DWORD 32
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
@@ -1514,10 +1475,10 @@ void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
struct icp_qat_uclo_encap_page *encap_page,
- uint64_t *uword, unsigned int addr_p,
- unsigned int raddr, uint64_t fill)
+ u64 *uword, unsigned int addr_p,
+ unsigned int raddr, u64 fill)
{
- uint64_t uwrd = 0;
+ u64 uwrd = 0;
unsigned int i;
if (!encap_page) {
@@ -1547,12 +1508,12 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
{
unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
- uint64_t fill_pat;
+ u64 fill_pat;
/* load the page starting at appropriate ustore address */
/* get fill-pattern from an image -- they are all the same */
memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
- sizeof(uint64_t));
+ sizeof(u64));
uw_physical_addr = encap_page->beg_addr_p;
uw_relative_addr = 0;
words_num = encap_page->micro_words_num;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 1dfcab317bed..b975c263446d 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -1,62 +1,18 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include "adf_dh895xcc_hw_data.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
-static const uint32_t thrd_to_arb_map_sku4[] = {
+static const u32 thrd_to_arb_map_sku4[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x00000000, 0x00000000, 0x00000000, 0x00000000
};
-static const uint32_t thrd_to_arb_map_sku6[] = {
+static const u32 thrd_to_arb_map_sku6[] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
@@ -68,20 +24,20 @@ static struct adf_hw_device_class dh895xcc_class = {
.instances = 0
};
-static uint32_t get_accel_mask(uint32_t fuse)
+static u32 get_accel_mask(u32 fuse)
{
return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
ADF_DH895XCC_ACCELERATORS_MASK;
}
-static uint32_t get_ae_mask(uint32_t fuse)
+static u32 get_ae_mask(u32 fuse)
{
return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
}
-static uint32_t get_num_accels(struct adf_hw_device_data *self)
+static u32 get_num_accels(struct adf_hw_device_data *self)
{
- uint32_t i, ctr = 0;
+ u32 i, ctr = 0;
if (!self || !self->accel_mask)
return 0;
@@ -93,9 +49,9 @@ static uint32_t get_num_accels(struct adf_hw_device_data *self)
return ctr;
}
-static uint32_t get_num_aes(struct adf_hw_device_data *self)
+static u32 get_num_aes(struct adf_hw_device_data *self)
{
- uint32_t i, ctr = 0;
+ u32 i, ctr = 0;
if (!self || !self->ae_mask)
return 0;
@@ -107,17 +63,17 @@ static uint32_t get_num_aes(struct adf_hw_device_data *self)
return ctr;
}
-static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_PMISC_BAR;
}
-static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_ETR_BAR;
}
-static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+static u32 get_sram_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_SRAM_BAR;
}
@@ -161,12 +117,12 @@ static void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
}
}
-static uint32_t get_pf2vf_offset(uint32_t i)
+static u32 get_pf2vf_offset(u32 i)
{
return ADF_DH895XCC_PF2VF_OFFSET(i);
}
-static uint32_t get_vintmsk_offset(uint32_t i)
+static u32 get_vintmsk_offset(u32 i)
{
return ADF_DH895XCC_VINTMSK_OFFSET(i);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 092f7353ed23..082a04466dca 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_DH895x_HW_DATA_H_
#define ADF_DH895x_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index b11bf8c0e683..4e877b75822b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index a3b4dd8099a7..5246f0524ca3 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
index 6ddc19bd4410..2bfcc67f8f39 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2015 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2015 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
+/* Copyright(c) 2015 - 2020 Intel Corporation */
#ifndef ADF_DH895XVF_HW_DATA_H_
#define ADF_DH895XVF_HW_DATA_H_
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 1b762eefc6c1..7d6e1db272c2 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -1,49 +1,5 @@
-/*
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
- Copyright(c) 2014 Intel Corporation.
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- Contact Information:
- qat-linux@intel.com
-
- BSD LICENSE
- Copyright(c) 2014 Intel Corporation.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+/* Copyright(c) 2014 - 2020 Intel Corporation */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 7770660bc853..cffa9fc628ff 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -14,7 +14,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
/**
@@ -43,6 +43,7 @@ struct qce_cipher_reqctx {
struct sg_table src_tbl;
struct scatterlist *src_sg;
unsigned int cryptlen;
+ struct skcipher_request fallback_req; // keep at the end
};
static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
index 9f989cba0f1b..85ba16418a04 100644
--- a/drivers/crypto/qce/common.h
+++ b/drivers/crypto/qce/common.h
@@ -87,6 +87,8 @@ struct qce_alg_template {
struct ahash_alg ahash;
} alg;
struct qce_device *qce;
+ const u8 *hash_zero;
+ const u32 digest_size;
};
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 1ab62e7d5f3c..c230843e2ffb 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -203,10 +203,18 @@ static int qce_import_common(struct ahash_request *req, u64 in_count,
static int qce_ahash_import(struct ahash_request *req, const void *in)
{
- struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
- unsigned long flags = rctx->flags;
- bool hmac = IS_SHA_HMAC(flags);
- int ret = -EINVAL;
+ struct qce_sha_reqctx *rctx;
+ unsigned long flags;
+ bool hmac;
+ int ret;
+
+ ret = qce_ahash_init(req);
+ if (ret)
+ return ret;
+
+ rctx = ahash_request_ctx(req);
+ flags = rctx->flags;
+ hmac = IS_SHA_HMAC(flags);
if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
const struct sha1_state *state = in;
@@ -284,8 +292,6 @@ static int qce_ahash_update(struct ahash_request *req)
if (!sg_last)
return -EINVAL;
- sg_mark_end(sg_last);
-
if (rctx->buflen) {
sg_init_table(rctx->sg, 2);
sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
@@ -305,8 +311,12 @@ static int qce_ahash_final(struct ahash_request *req)
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
struct qce_device *qce = tmpl->qce;
- if (!rctx->buflen)
+ if (!rctx->buflen) {
+ if (tmpl->hash_zero)
+ memcpy(req->result, tmpl->hash_zero,
+ tmpl->alg.ahash.halg.digestsize);
return 0;
+ }
rctx->last_blk = true;
@@ -338,6 +348,13 @@ static int qce_ahash_digest(struct ahash_request *req)
rctx->first_blk = true;
rctx->last_blk = true;
+ if (!rctx->nbytes_orig) {
+ if (tmpl->hash_zero)
+ memcpy(req->result, tmpl->hash_zero,
+ tmpl->alg.ahash.halg.digestsize);
+ return 0;
+ }
+
return qce->async_req_enqueue(tmpl->qce, &req->base);
}
@@ -490,6 +507,11 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
alg->halg.digestsize = def->digestsize;
alg->halg.statesize = def->statesize;
+ if (IS_SHA1(def->flags))
+ tmpl->hash_zero = sha1_zero_message_hash;
+ else if (IS_SHA256(def->flags))
+ tmpl->hash_zero = sha256_zero_message_hash;
+
base = &alg->halg.base;
base->cra_blocksize = def->blocksize;
base->cra_priority = 300;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 9412433f3b21..5630c5addd28 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -178,7 +178,7 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
break;
}
- ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -235,16 +235,15 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
req->cryptlen <= aes_sw_max_len) ||
(IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
req->cryptlen % QCE_SECTOR_SIZE))) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- ret = encrypt ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+ crypto_skcipher_decrypt(&rctx->fallback_req);
return ret;
}
@@ -263,10 +262,9 @@ static int qce_skcipher_decrypt(struct skcipher_request *req)
static int qce_skcipher_init(struct crypto_skcipher *tfm)
{
- struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- memset(ctx, 0, sizeof(*ctx));
- crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
+ /* take the size without the fallback skcipher_request at the end */
+ crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
+ fallback_req));
return 0;
}
@@ -274,17 +272,21 @@ static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- qce_skcipher_init(tfm);
- ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
- 0, CRYPTO_ALG_NEED_FALLBACK);
- return PTR_ERR_OR_ZERO(ctx->fallback);
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback));
+ return 0;
}
static void qce_skcipher_exit(struct crypto_skcipher *tfm)
{
struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
}
struct qce_skcipher_def {
@@ -404,6 +406,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg->base.cra_priority = 300;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
alg->base.cra_alignmask = 0;
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
new file mode 100644
index 000000000000..5bc099052bd2
--- /dev/null
+++ b/drivers/crypto/sa2ul.c
@@ -0,0 +1,2420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * K3 SA2UL crypto accelerator driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Keerthy
+ * Vitaly Andrianov
+ * Tero Kristo
+ */
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "sa2ul.h"
+
+/* Byte offset for key in encryption security context */
+#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
+/* Byte offset for Aux-1 in encryption security context */
+#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
+
+#define SA_CMDL_UPD_ENC 0x0001
+#define SA_CMDL_UPD_AUTH 0x0002
+#define SA_CMDL_UPD_ENC_IV 0x0004
+#define SA_CMDL_UPD_AUTH_IV 0x0008
+#define SA_CMDL_UPD_AUX_KEY 0x0010
+
+#define SA_AUTH_SUBKEY_LEN 16
+#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
+#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
+
+#define MODE_CONTROL_BYTES 27
+#define SA_HASH_PROCESSING 0
+#define SA_CRYPTO_PROCESSING 0
+#define SA_UPLOAD_HASH_TO_TLR BIT(6)
+
+#define SA_SW0_FLAGS_MASK 0xF0000
+#define SA_SW0_CMDL_INFO_MASK 0x1F00000
+#define SA_SW0_CMDL_PRESENT BIT(4)
+#define SA_SW0_ENG_ID_MASK 0x3E000000
+#define SA_SW0_DEST_INFO_PRESENT BIT(30)
+#define SA_SW2_EGRESS_LENGTH 0xFF000000
+#define SA_BASIC_HASH 0x10
+
+#define SHA256_DIGEST_WORDS 8
+/* Make 32-bit word from 4 bytes */
+#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
+ ((b2) << 8) | (b3))
+
+/* size of SCCTL structure in bytes */
+#define SA_SCCTL_SZ 16
+
+/* Max Authentication tag size */
+#define SA_MAX_AUTH_TAG_SZ 64
+
+#define PRIV_ID 0x1
+#define PRIV 0x1
+
+static struct device *sa_k3_dev;
+
+/**
+ * struct sa_cmdl_cfg - Command label configuration descriptor
+ * @aalg: authentication algorithm ID
+ * @enc_eng_id: Encryption Engine ID supported by the SA hardware
+ * @auth_eng_id: Authentication Engine ID
+ * @iv_size: Initialization Vector size
+ * @akey: Authentication key
+ * @akey_len: Authentication key length
+ * @enc: True, if this is an encode request
+ */
+struct sa_cmdl_cfg {
+ int aalg;
+ u8 enc_eng_id;
+ u8 auth_eng_id;
+ u8 iv_size;
+ const u8 *akey;
+ u16 akey_len;
+ bool enc;
+};
+
+/**
+ * struct algo_data - Crypto algorithm specific data
+ * @enc_eng: Encryption engine info structure
+ * @auth_eng: Authentication engine info structure
+ * @auth_ctrl: Authentication control word
+ * @hash_size: Size of digest
+ * @iv_idx: iv index in psdata
+ * @iv_out_size: iv out size
+ * @ealg_id: Encryption Algorithm ID
+ * @aalg_id: Authentication algorithm ID
+ * @mci_enc: Mode Control Instruction for Encryption algorithm
+ * @mci_dec: Mode Control Instruction for Decryption
+ * @inv_key: Whether the encryption algorithm demands key inversion
+ * @ctx: Pointer to the algorithm context
+ * @keyed_mac: Whether the authentication algorithm has key
+ * @prep_iopad: Function pointer to generate intermediate ipad/opad
+ */
+struct algo_data {
+ struct sa_eng_info enc_eng;
+ struct sa_eng_info auth_eng;
+ u8 auth_ctrl;
+ u8 hash_size;
+ u8 iv_idx;
+ u8 iv_out_size;
+ u8 ealg_id;
+ u8 aalg_id;
+ u8 *mci_enc;
+ u8 *mci_dec;
+ bool inv_key;
+ struct sa_tfm_ctx *ctx;
+ bool keyed_mac;
+ void (*prep_iopad)(struct algo_data *algo, const u8 *key,
+ u16 key_sz, __be32 *ipad, __be32 *opad);
+};
+
+/**
+ * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
+ * @type: Type of the crypto algorithm.
+ * @alg: Union of crypto algorithm definitions.
+ * @registered: Flag indicating if the crypto algorithm is already registered
+ */
+struct sa_alg_tmpl {
+ u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ struct aead_alg aead;
+ } alg;
+ bool registered;
+};
+
+/**
+ * struct sa_rx_data: RX Packet miscellaneous data place holder
+ * @req: crypto request data pointer
+ * @ddev: pointer to the DMA device
+ * @tx_in: dma_async_tx_descriptor pointer for rx channel
+ * @split_src_sg: Set if the src sg is split and needs to be freed up
+ * @split_dst_sg: Set if the dst sg is split and needs to be freed up
+ * @enc: Flag indicating either encryption or decryption
+ * @enc_iv_size: Initialisation vector size
+ * @iv_idx: Initialisation vector index
+ * @rx_sg: Static scatterlist entry for overriding RX data
+ * @tx_sg: Static scatterlist entry for overriding TX data
+ * @src: Source data pointer
+ * @dst: Destination data pointer
+ */
+struct sa_rx_data {
+ void *req;
+ struct device *ddev;
+ struct dma_async_tx_descriptor *tx_in;
+ struct scatterlist *split_src_sg;
+ struct scatterlist *split_dst_sg;
+ u8 enc;
+ u8 enc_iv_size;
+ u8 iv_idx;
+ struct scatterlist rx_sg;
+ struct scatterlist tx_sg;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+};
+
+/**
+ * struct sa_req: SA request definition
+ * @dev: device for the request
+ * @size: total data to the xmitted via DMA
+ * @enc_offset: offset of cipher data
+ * @enc_size: data to be passed to cipher engine
+ * @enc_iv: cipher IV
+ * @auth_offset: offset of the authentication data
+ * @auth_size: size of the authentication data
+ * @auth_iv: authentication IV
+ * @type: algorithm type for the request
+ * @cmdl: command label pointer
+ * @base: pointer to the base request
+ * @ctx: pointer to the algorithm context data
+ * @enc: true if this is an encode request
+ * @src: source data
+ * @dst: destination data
+ * @callback: DMA callback for the request
+ * @mdata_size: metadata size passed to DMA
+ */
+struct sa_req {
+ struct device *dev;
+ u16 size;
+ u8 enc_offset;
+ u16 enc_size;
+ u8 *enc_iv;
+ u8 auth_offset;
+ u16 auth_size;
+ u8 *auth_iv;
+ u32 type;
+ u32 *cmdl;
+ struct crypto_async_request *base;
+ struct sa_tfm_ctx *ctx;
+ bool enc;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ dma_async_tx_callback callback;
+ u16 mdata_size;
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for encryption
+ */
+static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for decryption
+ */
+static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
+ { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for encryption
+ */
+static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For CBC (Cipher Block Chaining) mode for decryption
+ */
+static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
+ { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For ECB (Electronic Code Book) mode for encryption
+ */
+static u8 mci_ecb_enc_array[3][27] = {
+ { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for various Key lengths 128, 192, 256
+ * For ECB (Electronic Code Book) mode for decryption
+ */
+static u8 mci_ecb_dec_array[3][27] = {
+ { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+};
+
+/*
+ * Mode Control Instructions for DES algorithm
+ * For CBC (Cipher Block Chaining) mode and ECB mode
+ * encryption and for decryption respectively
+ */
+static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
+ 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
+ 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
+ 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
+ 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+};
+
+/*
+ * Perform 16 byte or 128 bit swizzling
+ * The SA2UL Expects the security context to
+ * be in little Endian and the bus width is 128 bits or 16 bytes
+ * Hence swap 16 bytes at a time from higher to lower address
+ */
+static void sa_swiz_128(u8 *in, u16 len)
+{
+ u8 data[16];
+ int i, j;
+
+ for (i = 0; i < len; i += 16) {
+ memcpy(data, &in[i], 16);
+ for (j = 0; j < 16; j++)
+ in[i + j] = data[15 - j];
+ }
+}
+
+/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
+static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
+{
+ int i;
+
+ for (i = 0; i < key_sz; i++) {
+ k_ipad[i] = key[i] ^ 0x36;
+ k_opad[i] = key[i] ^ 0x5c;
+ }
+
+ /* Instead of XOR with 0 */
+ for (; i < SHA1_BLOCK_SIZE; i++) {
+ k_ipad[i] = 0x36;
+ k_opad[i] = 0x5c;
+ }
+}
+
+static void sa_export_shash(struct shash_desc *hash, int block_size,
+ int digest_size, __be32 *out)
+{
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ } sha;
+ void *state;
+ u32 *result;
+ int i;
+
+ switch (digest_size) {
+ case SHA1_DIGEST_SIZE:
+ state = &sha.sha1;
+ result = sha.sha1.state;
+ break;
+ case SHA256_DIGEST_SIZE:
+ state = &sha.sha256;
+ result = sha.sha256.state;
+ break;
+ default:
+ dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
+ digest_size);
+ return;
+ }
+
+ crypto_shash_export(hash, state);
+
+ for (i = 0; i < digest_size >> 2; i++)
+ out[i] = cpu_to_be32(result[i]);
+}
+
+static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
+ u16 key_sz, __be32 *ipad, __be32 *opad)
+{
+ SHASH_DESC_ON_STACK(shash, data->ctx->shash);
+ int block_size = crypto_shash_blocksize(data->ctx->shash);
+ int digest_size = crypto_shash_digestsize(data->ctx->shash);
+ u8 k_ipad[SHA1_BLOCK_SIZE];
+ u8 k_opad[SHA1_BLOCK_SIZE];
+
+ shash->tfm = data->ctx->shash;
+
+ prepare_kiopad(k_ipad, k_opad, key, key_sz);
+
+ memzero_explicit(ipad, block_size);
+ memzero_explicit(opad, block_size);
+
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, k_ipad, block_size);
+ sa_export_shash(shash, block_size, digest_size, ipad);
+
+ crypto_shash_init(shash);
+ crypto_shash_update(shash, k_opad, block_size);
+
+ sa_export_shash(shash, block_size, digest_size, opad);
+}
+
+/* Derive the inverse key used in AES-CBC decryption operation */
+static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
+{
+ struct crypto_aes_ctx ctx;
+ int key_pos;
+
+ if (aes_expandkey(&ctx, key, key_sz)) {
+ dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
+ return -EINVAL;
+ }
+
+ /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
+ if (key_sz == AES_KEYSIZE_192) {
+ ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
+ ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
+ }
+
+ /* Based crypto_aes_expand_key logic */
+ switch (key_sz) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ key_pos = key_sz + 24;
+ break;
+
+ case AES_KEYSIZE_256:
+ key_pos = key_sz + 24 - 4;
+ break;
+
+ default:
+ dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
+ return -EINVAL;
+ }
+
+ memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
+ return 0;
+}
+
+/* Set Security context for the encryption engine */
+static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
+ u8 enc, u8 *sc_buf)
+{
+ const u8 *mci = NULL;
+
+ /* Set Encryption mode selector to crypto processing */
+ sc_buf[0] = SA_CRYPTO_PROCESSING;
+
+ if (enc)
+ mci = ad->mci_enc;
+ else
+ mci = ad->mci_dec;
+ /* Set the mode control instructions in security context */
+ if (mci)
+ memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
+
+ /* For AES-CBC decryption get the inverse key */
+ if (ad->inv_key && !enc) {
+ if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
+ return -EINVAL;
+ /* For all other cases: key is used */
+ } else {
+ memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
+ }
+
+ return 0;
+}
+
+/* Set Security context for the authentication engine */
+static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
+ u8 *sc_buf)
+{
+ __be32 ipad[64], opad[64];
+
+ /* Set Authentication mode selector to hash processing */
+ sc_buf[0] = SA_HASH_PROCESSING;
+ /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
+ sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
+ sc_buf[1] |= ad->auth_ctrl;
+
+ /* Copy the keys or ipad/opad */
+ if (ad->keyed_mac) {
+ ad->prep_iopad(ad, key, key_sz, ipad, opad);
+
+ /* Copy ipad to AuthKey */
+ memcpy(&sc_buf[32], ipad, ad->hash_size);
+ /* Copy opad to Aux-1 */
+ memcpy(&sc_buf[64], opad, ad->hash_size);
+ } else {
+ /* basic hash */
+ sc_buf[1] |= SA_BASIC_HASH;
+ }
+}
+
+static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
+{
+ int j;
+
+ for (j = 0; j < ((size16) ? 4 : 2); j++) {
+ *out = cpu_to_be32(*((u32 *)iv));
+ iv += 4;
+ out++;
+ }
+}
+
+/* Format general command label */
+static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
+ struct sa_cmdl_upd_info *upd_info)
+{
+ u8 enc_offset = 0, auth_offset = 0, total = 0;
+ u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
+ u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
+ u32 *word_ptr = (u32 *)cmdl;
+ int i;
+
+ /* Clear the command label */
+ memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
+
+ /* Iniialize the command update structure */
+ memzero_explicit(upd_info, sizeof(*upd_info));
+
+ if (cfg->enc_eng_id && cfg->auth_eng_id) {
+ if (cfg->enc) {
+ auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
+ enc_next_eng = cfg->auth_eng_id;
+
+ if (cfg->iv_size)
+ auth_offset += cfg->iv_size;
+ } else {
+ enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
+ auth_next_eng = cfg->enc_eng_id;
+ }
+ }
+
+ if (cfg->enc_eng_id) {
+ upd_info->flags |= SA_CMDL_UPD_ENC;
+ upd_info->enc_size.index = enc_offset >> 2;
+ upd_info->enc_offset.index = upd_info->enc_size.index + 1;
+ /* Encryption command label */
+ cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
+
+ /* Encryption modes requiring IV */
+ if (cfg->iv_size) {
+ upd_info->flags |= SA_CMDL_UPD_ENC_IV;
+ upd_info->enc_iv.index =
+ (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
+ upd_info->enc_iv.size = cfg->iv_size;
+
+ cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
+
+ cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
+ (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
+ total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
+ } else {
+ cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES;
+ total += SA_CMDL_HEADER_SIZE_BYTES;
+ }
+ }
+
+ if (cfg->auth_eng_id) {
+ upd_info->flags |= SA_CMDL_UPD_AUTH;
+ upd_info->auth_size.index = auth_offset >> 2;
+ upd_info->auth_offset.index = upd_info->auth_size.index + 1;
+ cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
+ cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
+ SA_CMDL_HEADER_SIZE_BYTES;
+ total += SA_CMDL_HEADER_SIZE_BYTES;
+ }
+
+ total = roundup(total, 8);
+
+ for (i = 0; i < total / 4; i++)
+ word_ptr[i] = swab32(word_ptr[i]);
+
+ return total;
+}
+
+/* Update Command label */
+static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
+ struct sa_cmdl_upd_info *upd_info)
+{
+ int i = 0, j;
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
+ cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
+ cmdl[upd_info->enc_size.index] |= req->enc_size;
+ cmdl[upd_info->enc_offset.index] &=
+ ~SA_CMDL_SOP_BYPASS_LEN_MASK;
+ cmdl[upd_info->enc_offset.index] |=
+ ((u32)req->enc_offset <<
+ __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
+ __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
+ u32 *enc_iv = (u32 *)req->enc_iv;
+
+ for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
+ data[j] = cpu_to_be32(*enc_iv);
+ enc_iv++;
+ }
+ }
+ }
+
+ if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
+ cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
+ cmdl[upd_info->auth_size.index] |= req->auth_size;
+ cmdl[upd_info->auth_offset.index] &=
+ ~SA_CMDL_SOP_BYPASS_LEN_MASK;
+ cmdl[upd_info->auth_offset.index] |=
+ ((u32)req->auth_offset <<
+ __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
+ if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
+ sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
+ req->auth_iv,
+ (upd_info->auth_iv.size > 8));
+ }
+ if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
+ int offset = (req->auth_size & 0xF) ? 4 : 0;
+
+ memcpy(&cmdl[upd_info->aux_key_info.index],
+ &upd_info->aux_key[offset], 16);
+ }
+ }
+}
+
+/* Format SWINFO words to be sent to SA */
+static
+void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
+ u8 cmdl_present, u8 cmdl_offset, u8 flags,
+ u8 hash_size, u32 *swinfo)
+{
+ swinfo[0] = sc_id;
+ swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
+ if (likely(cmdl_present))
+ swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
+ __ffs(SA_SW0_CMDL_INFO_MASK));
+ swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
+
+ swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
+ swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
+ swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
+ swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
+}
+
+/* Dump the security context */
+static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
+{
+#ifdef DEBUG
+ dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, SA_CTX_MAX_SZ, false);
+#endif
+}
+
+static
+int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
+ u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
+ struct algo_data *ad, u8 enc, u32 *swinfo)
+{
+ int enc_sc_offset = 0;
+ int auth_sc_offset = 0;
+ u8 *sc_buf = ctx->sc;
+ u16 sc_id = ctx->sc_id;
+ u8 first_engine = 0;
+
+ memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
+
+ if (ad->auth_eng.eng_id) {
+ if (enc)
+ first_engine = ad->enc_eng.eng_id;
+ else
+ first_engine = ad->auth_eng.eng_id;
+
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
+ sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
+ if (!ad->hash_size)
+ return -EINVAL;
+ ad->hash_size = roundup(ad->hash_size, 8);
+
+ } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
+ enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
+ first_engine = ad->enc_eng.eng_id;
+ sc_buf[1] = SA_SCCTL_FE_ENC;
+ ad->hash_size = ad->iv_out_size;
+ }
+
+ /* SCCTL Owner info: 0=host, 1=CP_ACE */
+ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
+ memcpy(&sc_buf[2], &sc_id, 2);
+ sc_buf[4] = 0x0;
+ sc_buf[5] = PRIV_ID;
+ sc_buf[6] = PRIV;
+ sc_buf[7] = 0x0;
+
+ /* Prepare context for encryption engine */
+ if (ad->enc_eng.sc_size) {
+ if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
+ &sc_buf[enc_sc_offset]))
+ return -EINVAL;
+ }
+
+ /* Prepare context for authentication engine */
+ if (ad->auth_eng.sc_size)
+ sa_set_sc_auth(ad, auth_key, auth_key_sz,
+ &sc_buf[auth_sc_offset]);
+
+ /* Set the ownership of context to CP_ACE */
+ sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
+
+ /* swizzle the security context */
+ sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
+
+ sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
+ SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
+
+ sa_dump_sc(sc_buf, ctx->sc_phys);
+
+ return 0;
+}
+
+/* Free the per direction context memory */
+static void sa_free_ctx_info(struct sa_ctx_info *ctx,
+ struct sa_crypto_data *data)
+{
+ unsigned long bn;
+
+ bn = ctx->sc_id - data->sc_id_start;
+ spin_lock(&data->scid_lock);
+ __clear_bit(bn, data->ctx_bm);
+ data->sc_id--;
+ spin_unlock(&data->scid_lock);
+
+ if (ctx->sc) {
+ dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
+ ctx->sc = NULL;
+ }
+}
+
+static int sa_init_ctx_info(struct sa_ctx_info *ctx,
+ struct sa_crypto_data *data)
+{
+ unsigned long bn;
+ int err;
+
+ spin_lock(&data->scid_lock);
+ bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
+ __set_bit(bn, data->ctx_bm);
+ data->sc_id++;
+ spin_unlock(&data->scid_lock);
+
+ ctx->sc_id = (u16)(data->sc_id_start + bn);
+
+ ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
+ if (!ctx->sc) {
+ dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
+ err = -ENOMEM;
+ goto scid_rollback;
+ }
+
+ return 0;
+
+scid_rollback:
+ spin_lock(&data->scid_lock);
+ __clear_bit(bn, data->ctx_bm);
+ data->sc_id--;
+ spin_unlock(&data->scid_lock);
+
+ return err;
+}
+
+static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ sa_free_ctx_info(&ctx->enc, data);
+ sa_free_ctx_info(&ctx->dec, data);
+
+ crypto_free_sync_skcipher(ctx->fallback.skcipher);
+}
+
+static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ int ret;
+
+ memzero_explicit(ctx, sizeof(*ctx));
+ ctx->dev_data = data;
+
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+ ret = sa_init_ctx_info(&ctx->dec, data);
+ if (ret) {
+ sa_free_ctx_info(&ctx->enc, data);
+ return ret;
+ }
+
+ ctx->fallback.skcipher =
+ crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback.skcipher)) {
+ dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(ctx->fallback.skcipher);
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+ return 0;
+}
+
+static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen, struct algo_data *ad)
+{
+ struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+ int ret;
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ad->enc_eng.eng_id = SA_ENG_ID_EM1;
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+
+ memzero_explicit(&cfg, sizeof(cfg));
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.iv_size = crypto_skcipher_ivsize(tfm);
+
+ crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback.skcipher,
+ tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen);
+ if (ret)
+ return ret;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
+ &ctx->enc.epib[1]))
+ goto badkey;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ /* Setup Decryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
+ &ctx->dec.epib[1]))
+ goto badkey;
+
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
+ &ctx->dec.cmdl_upd_info);
+
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->dec.cmdl_size = cmdl_len;
+ ctx->iv_idx = ad->iv_idx;
+
+ return 0;
+
+badkey:
+ dev_err(sa_k3_dev, "%s: badkey\n", __func__);
+ return -EINVAL;
+}
+
+static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ int key_idx = (keylen >> 3) - 2;
+
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad.mci_enc = mci_cbc_enc_array[key_idx];
+ ad.mci_dec = mci_cbc_dec_array[key_idx];
+ ad.inv_key = true;
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.iv_idx = 4;
+ ad.iv_out_size = 16;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ int key_idx = (keylen >> 3) - 2;
+
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad.mci_enc = mci_ecb_enc_array[key_idx];
+ ad.mci_dec = mci_ecb_dec_array[key_idx];
+ ad.inv_key = true;
+ ad.ealg_id = SA_EALG_ID_AES_ECB;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.mci_enc = mci_cbc_3des_enc_array;
+ ad.mci_dec = mci_cbc_3des_dec_array;
+ ad.ealg_id = SA_EALG_ID_3DES_CBC;
+ ad.iv_idx = 6;
+ ad.iv_out_size = 8;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.mci_enc = mci_ecb_3des_enc_array;
+ ad.mci_dec = mci_ecb_3des_dec_array;
+
+ return sa_cipher_setkey(tfm, key, keylen, &ad);
+}
+
+static void sa_aes_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct skcipher_request *req;
+ int sglen;
+ u32 *result;
+ __be32 *mdptr;
+ size_t ml, pl;
+ int i;
+ enum dma_data_direction dir_src;
+ bool diff_dst;
+
+ req = container_of(rxd->req, struct skcipher_request, base);
+ sglen = sg_nents_for_len(req->src, req->cryptlen);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ if (req->iv) {
+ mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
+ &ml);
+ result = (u32 *)req->iv;
+
+ for (i = 0; i < (rxd->enc_iv_size / 4); i++)
+ result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
+ }
+
+ dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
+ kfree(rxd->split_src_sg);
+
+ if (diff_dst) {
+ sglen = sg_nents_for_len(req->dst, req->cryptlen);
+
+ dma_unmap_sg(rxd->ddev, req->dst, sglen,
+ DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ kfree(rxd);
+
+ skcipher_request_complete(req, 0);
+}
+
+static void
+sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
+{
+ u32 *out, *in;
+ int i;
+
+ for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
+ *out++ = *in++;
+
+ mdptr[4] = (0xFFFF << 16);
+ for (out = &mdptr[5], in = psdata, i = 0;
+ i < pslen / sizeof(u32); i++)
+ *out++ = *in++;
+}
+
+static int sa_run(struct sa_req *req)
+{
+ struct sa_rx_data *rxd;
+ gfp_t gfp_flags;
+ u32 cmdl[SA_MAX_CMDL_WORDS];
+ struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
+ struct device *ddev;
+ struct dma_chan *dma_rx;
+ int sg_nents, src_nents, dst_nents;
+ int mapped_src_nents, mapped_dst_nents;
+ struct scatterlist *src, *dst;
+ size_t pl, ml, split_size;
+ struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
+ int ret;
+ struct dma_async_tx_descriptor *tx_out;
+ u32 *mdptr;
+ bool diff_dst;
+ enum dma_data_direction dir_src;
+
+ gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ rxd = kzalloc(sizeof(*rxd), gfp_flags);
+ if (!rxd)
+ return -ENOMEM;
+
+ if (req->src != req->dst) {
+ diff_dst = true;
+ dir_src = DMA_TO_DEVICE;
+ } else {
+ diff_dst = false;
+ dir_src = DMA_BIDIRECTIONAL;
+ }
+
+ /*
+ * SA2UL has an interesting feature where the receive DMA channel
+ * is selected based on the data passed to the engine. Within the
+ * transition range, there is also a space where it is impossible
+ * to determine where the data will end up, and this should be
+ * avoided. This will be handled by the SW fallback mechanism by
+ * the individual algorithm implementations.
+ */
+ if (req->size >= 256)
+ dma_rx = pdata->dma_rx2;
+ else
+ dma_rx = pdata->dma_rx1;
+
+ ddev = dma_rx->device->dev;
+
+ memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
+
+ sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
+
+ if (req->type != CRYPTO_ALG_TYPE_AHASH) {
+ if (req->enc)
+ req->type |=
+ (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
+ else
+ req->type |=
+ (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
+ }
+
+ cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
+
+ /*
+ * Map the packets, first we check if the data fits into a single
+ * sg entry and use that if possible. If it does not fit, we check
+ * if we need to do sg_split to align the scatterlist data on the
+ * actual data size being processed by the crypto engine.
+ */
+ src = req->src;
+ sg_nents = sg_nents_for_len(src, req->size);
+
+ split_size = req->size;
+
+ if (sg_nents == 1 && split_size <= req->src->length) {
+ src = &rxd->rx_sg;
+ sg_init_table(src, 1);
+ sg_set_page(src, sg_page(req->src), split_size,
+ req->src->offset);
+ src_nents = 1;
+ dma_map_sg(ddev, src, sg_nents, dir_src);
+ } else {
+ mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
+ dir_src);
+ ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
+ &src, &src_nents, gfp_flags);
+ if (ret) {
+ src_nents = sg_nents;
+ src = req->src;
+ } else {
+ rxd->split_src_sg = src;
+ }
+ }
+
+ if (!diff_dst) {
+ dst_nents = src_nents;
+ dst = src;
+ } else {
+ dst_nents = sg_nents_for_len(req->dst, req->size);
+
+ if (dst_nents == 1 && split_size <= req->dst->length) {
+ dst = &rxd->tx_sg;
+ sg_init_table(dst, 1);
+ sg_set_page(dst, sg_page(req->dst), split_size,
+ req->dst->offset);
+ dst_nents = 1;
+ dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
+ &split_size, &dst, &dst_nents,
+ gfp_flags);
+ if (ret) {
+ dst_nents = dst_nents;
+ dst = req->dst;
+ } else {
+ rxd->split_dst_sg = dst;
+ }
+ }
+ }
+
+ if (unlikely(src_nents != sg_nents)) {
+ dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
+ ret = -EIO;
+ goto err_cleanup;
+ }
+
+ rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxd->tx_in) {
+ dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ rxd->req = (void *)req->base;
+ rxd->enc = req->enc;
+ rxd->ddev = ddev;
+ rxd->src = src;
+ rxd->dst = dst;
+ rxd->iv_idx = req->ctx->iv_idx;
+ rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
+ rxd->tx_in->callback = req->callback;
+ rxd->tx_in->callback_param = rxd;
+
+ tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
+ src_nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!tx_out) {
+ dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /*
+ * Prepare metadata for DMA engine. This essentially describes the
+ * crypto algorithm to be used, data sizes, different keys etc.
+ */
+ mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
+
+ sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
+ sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
+ sa_ctx->epib);
+
+ ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
+ dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
+
+ dmaengine_submit(tx_out);
+ dmaengine_submit(rxd->tx_in);
+
+ dma_async_issue_pending(dma_rx);
+ dma_async_issue_pending(pdata->dma_tx);
+
+ return -EINPROGRESS;
+
+err_cleanup:
+ dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
+ kfree(rxd->split_src_sg);
+
+ if (req->src != req->dst) {
+ dst_nents = sg_nents_for_len(req->dst, req->size);
+ dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ kfree(rxd);
+
+ return ret;
+}
+
+static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
+{
+ struct sa_tfm_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct crypto_alg *alg = req->base.tfm->__crt_alg;
+ struct sa_req sa_req = { 0 };
+ int ret;
+
+ if (!req->cryptlen)
+ return 0;
+
+ if (req->cryptlen % alg->cra_blocksize)
+ return -EINVAL;
+
+ /* Use SW fallback if the data size is not supported */
+ if (req->cryptlen > SA_MAX_DATA_SZ ||
+ (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
+ req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
+
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ if (enc)
+ ret = crypto_skcipher_encrypt(subreq);
+ else
+ ret = crypto_skcipher_decrypt(subreq);
+
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ sa_req.size = req->cryptlen;
+ sa_req.enc_size = req->cryptlen;
+ sa_req.src = req->src;
+ sa_req.dst = req->dst;
+ sa_req.enc_iv = iv;
+ sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
+ sa_req.enc = enc;
+ sa_req.callback = sa_aes_dma_in_callback;
+ sa_req.mdata_size = 44;
+ sa_req.base = &req->base;
+ sa_req.ctx = ctx;
+
+ return sa_run(&sa_req);
+}
+
+static int sa_encrypt(struct skcipher_request *req)
+{
+ return sa_cipher_run(req, req->iv, 1);
+}
+
+static int sa_decrypt(struct skcipher_request *req)
+{
+ return sa_cipher_run(req, req->iv, 0);
+}
+
+static void sa_sha_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct ahash_request *req;
+ struct crypto_ahash *tfm;
+ unsigned int authsize;
+ int i, sg_nents;
+ size_t ml, pl;
+ u32 *result;
+ __be32 *mdptr;
+
+ req = container_of(rxd->req, struct ahash_request, base);
+ tfm = crypto_ahash_reqtfm(req);
+ authsize = crypto_ahash_digestsize(tfm);
+
+ mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
+ result = (u32 *)req->result;
+
+ for (i = 0; i < (authsize / 4); i++)
+ result[i] = be32_to_cpu(mdptr[i + 4]);
+
+ sg_nents = sg_nents_for_len(req->src, req->nbytes);
+ dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
+
+ kfree(rxd->split_src_sg);
+
+ kfree(rxd);
+
+ ahash_request_complete(req, 0);
+}
+
+static int zero_message_process(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int sa_digest_size = crypto_ahash_digestsize(tfm);
+
+ switch (sa_digest_size) {
+ case SHA1_DIGEST_SIZE:
+ memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
+ break;
+ case SHA256_DIGEST_SIZE:
+ memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
+ break;
+ case SHA512_DIGEST_SIZE:
+ memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sa_sha_run(struct ahash_request *req)
+{
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_req sa_req = { 0 };
+ size_t auth_len;
+
+ auth_len = req->nbytes;
+
+ if (!auth_len)
+ return zero_message_process(req);
+
+ if (auth_len > SA_MAX_DATA_SZ ||
+ (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
+ auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
+ struct ahash_request *subreq = &rctx->fallback_req;
+ int ret = 0;
+
+ ahash_request_set_tfm(subreq, ctx->fallback.ahash);
+ subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ crypto_ahash_init(subreq);
+
+ subreq->nbytes = auth_len;
+ subreq->src = req->src;
+ subreq->result = req->result;
+
+ ret |= crypto_ahash_update(subreq);
+
+ subreq->nbytes = 0;
+
+ ret |= crypto_ahash_final(subreq);
+
+ return ret;
+ }
+
+ sa_req.size = auth_len;
+ sa_req.auth_size = auth_len;
+ sa_req.src = req->src;
+ sa_req.dst = req->src;
+ sa_req.enc = true;
+ sa_req.type = CRYPTO_ALG_TYPE_AHASH;
+ sa_req.callback = sa_sha_dma_in_callback;
+ sa_req.mdata_size = 28;
+ sa_req.ctx = ctx;
+ sa_req.base = &req->base;
+
+ return sa_run(&sa_req);
+}
+
+static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
+{
+ int bs = crypto_shash_blocksize(ctx->shash);
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+ ad->auth_eng.eng_id = SA_ENG_ID_AM1;
+ ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
+
+ memset(ctx->authkey, 0, bs);
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.aalg = ad->aalg_id;
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.auth_eng_id = ad->auth_eng.eng_id;
+ cfg.iv_size = 0;
+ cfg.akey = NULL;
+ cfg.akey_len = 0;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
+ &ctx->enc.epib[1]))
+ goto badkey;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ goto badkey;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ return 0;
+
+badkey:
+ dev_err(sa_k3_dev, "%s: badkey\n", __func__);
+ return -EINVAL;
+}
+
+static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->dev_data = data;
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+
+ if (alg_base) {
+ ctx->shash = crypto_alloc_shash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash)) {
+ dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
+ alg_base);
+ return PTR_ERR(ctx->shash);
+ }
+ /* for fallback */
+ ctx->fallback.ahash =
+ crypto_alloc_ahash(alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback.ahash)) {
+ dev_err(ctx->dev_data->dev,
+ "Could not load fallback driver\n");
+ return PTR_ERR(ctx->fallback.ahash);
+ }
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct sa_sha_req_ctx) +
+ crypto_ahash_reqsize(ctx->fallback.ahash));
+
+ return 0;
+}
+
+static int sa_sha_digest(struct ahash_request *req)
+{
+ return sa_sha_run(req);
+}
+
+static int sa_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n",
+ crypto_ahash_digestsize(tfm), (u64)rctx);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int sa_sha_update(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int sa_sha_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int sa_sha_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags =
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int sa_sha_import(struct ahash_request *req, const void *in)
+{
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int sa_sha_export(struct ahash_request *req, void *out)
+{
+ struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct ahash_request *subreq = &rctx->fallback_req;
+
+ ahash_request_set_tfm(subreq, ctx->fallback.ahash);
+ subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(subreq, out);
+}
+
+static int sa_sha1_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha1");
+
+ ad.aalg_id = SA_AALG_ID_SHA1;
+ ad.hash_size = SHA1_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static int sa_sha256_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha256");
+
+ ad.aalg_id = SA_AALG_ID_SHA2_256;
+ ad.hash_size = SHA256_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static int sa_sha512_cra_init(struct crypto_tfm *tfm)
+{
+ struct algo_data ad = { 0 };
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ sa_sha_cra_init_alg(tfm, "sha512");
+
+ ad.aalg_id = SA_AALG_ID_SHA2_512;
+ ad.hash_size = SHA512_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
+
+ sa_sha_setup(ctx, &ad);
+
+ return 0;
+}
+
+static void sa_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
+ sa_free_ctx_info(&ctx->enc, data);
+
+ crypto_free_shash(ctx->shash);
+ crypto_free_ahash(ctx->fallback.ahash);
+}
+
+static void sa_aead_dma_in_callback(void *data)
+{
+ struct sa_rx_data *rxd = (struct sa_rx_data *)data;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ unsigned int start;
+ unsigned int authsize;
+ u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
+ size_t pl, ml;
+ int i, sglen;
+ int err = 0;
+ u16 auth_len;
+ u32 *mdptr;
+ bool diff_dst;
+ enum dma_data_direction dir_src;
+
+ req = container_of(rxd->req, struct aead_request, base);
+ tfm = crypto_aead_reqtfm(req);
+ start = req->assoclen + req->cryptlen;
+ authsize = crypto_aead_authsize(tfm);
+
+ diff_dst = (req->src != req->dst) ? true : false;
+ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
+ mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
+ for (i = 0; i < (authsize / 4); i++)
+ mdptr[i + 4] = swab32(mdptr[i + 4]);
+
+ auth_len = req->assoclen + req->cryptlen;
+ if (!rxd->enc)
+ auth_len -= authsize;
+
+ sglen = sg_nents_for_len(rxd->src, auth_len);
+ dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
+ kfree(rxd->split_src_sg);
+
+ if (diff_dst) {
+ sglen = sg_nents_for_len(rxd->dst, auth_len);
+ dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
+ kfree(rxd->split_dst_sg);
+ }
+
+ if (rxd->enc) {
+ scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
+ 1);
+ } else {
+ start -= authsize;
+ scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
+ 0);
+
+ err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
+ }
+
+ kfree(rxd);
+
+ aead_request_complete(req, err);
+}
+
+static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
+ const char *fallback)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+ int ret;
+
+ memzero_explicit(ctx, sizeof(*ctx));
+
+ ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->shash)) {
+ dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
+ return PTR_ERR(ctx->shash);
+ }
+
+ ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(ctx->fallback.aead)) {
+ dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
+ fallback);
+ return PTR_ERR(ctx->fallback.aead);
+ }
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->fallback.aead));
+
+ ret = sa_init_ctx_info(&ctx->enc, data);
+ if (ret)
+ return ret;
+
+ ret = sa_init_ctx_info(&ctx->dec, data);
+ if (ret) {
+ sa_free_ctx_info(&ctx->enc, data);
+ return ret;
+ }
+
+ dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
+ __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
+ ctx->dec.sc_id, &ctx->dec.sc_phys);
+
+ return ret;
+}
+
+static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
+{
+ return sa_cra_init_aead(tfm, "sha1",
+ "authenc(hmac(sha1-ce),cbc(aes-ce))");
+}
+
+static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
+{
+ return sa_cra_init_aead(tfm, "sha256",
+ "authenc(hmac(sha256-ce),cbc(aes-ce))");
+}
+
+static void sa_exit_tfm_aead(struct crypto_aead *tfm)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
+
+ crypto_free_shash(ctx->shash);
+ crypto_free_aead(ctx->fallback.aead);
+
+ sa_free_ctx_info(&ctx->enc, data);
+ sa_free_ctx_info(&ctx->dec, data);
+}
+
+/* AEAD algorithm configuration interface function */
+static int sa_aead_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen,
+ struct algo_data *ad)
+{
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
+ struct crypto_authenc_keys keys;
+ int cmdl_len;
+ struct sa_cmdl_cfg cfg;
+ int key_idx;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ return -EINVAL;
+
+ /* Convert the key size (16/24/32) to the key size index (0/1/2) */
+ key_idx = (keys.enckeylen >> 3) - 2;
+ if (key_idx >= 3)
+ return -EINVAL;
+
+ ad->ctx = ctx;
+ ad->enc_eng.eng_id = SA_ENG_ID_EM1;
+ ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
+ ad->auth_eng.eng_id = SA_ENG_ID_AM1;
+ ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
+ ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
+ ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
+ ad->inv_key = true;
+ ad->keyed_mac = true;
+ ad->ealg_id = SA_EALG_ID_AES_CBC;
+ ad->prep_iopad = sa_prepare_iopads;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.enc = true;
+ cfg.aalg = ad->aalg_id;
+ cfg.enc_eng_id = ad->enc_eng.eng_id;
+ cfg.auth_eng_id = ad->auth_eng.eng_id;
+ cfg.iv_size = crypto_aead_ivsize(authenc);
+ cfg.akey = keys.authkey;
+ cfg.akey_len = keys.authkeylen;
+
+ /* Setup Encryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
+ keys.authkey, keys.authkeylen,
+ ad, 1, &ctx->enc.epib[1]))
+ return -EINVAL;
+
+ cmdl_len = sa_format_cmdl_gen(&cfg,
+ (u8 *)ctx->enc.cmdl,
+ &ctx->enc.cmdl_upd_info);
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ return -EINVAL;
+
+ ctx->enc.cmdl_size = cmdl_len;
+
+ /* Setup Decryption Security Context & Command label template */
+ if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
+ keys.authkey, keys.authkeylen,
+ ad, 0, &ctx->dec.epib[1]))
+ return -EINVAL;
+
+ cfg.enc = false;
+ cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
+ &ctx->dec.cmdl_upd_info);
+
+ if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
+ return -EINVAL;
+
+ ctx->dec.cmdl_size = cmdl_len;
+
+ crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->fallback.aead,
+ crypto_aead_get_flags(authenc) &
+ CRYPTO_TFM_REQ_MASK);
+ crypto_aead_setkey(ctx->fallback.aead, key, keylen);
+
+ return 0;
+}
+
+static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
+
+ return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
+}
+
+static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
+ ad.hash_size = SHA1_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
+
+ return sa_aead_setkey(authenc, key, keylen, &ad);
+}
+
+static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct algo_data ad = { 0 };
+
+ ad.ealg_id = SA_EALG_ID_AES_CBC;
+ ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
+ ad.hash_size = SHA256_DIGEST_SIZE;
+ ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
+
+ return sa_aead_setkey(authenc, key, keylen, &ad);
+}
+
+static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct sa_req sa_req = { 0 };
+ size_t auth_size, enc_size;
+
+ enc_size = req->cryptlen;
+ auth_size = req->assoclen + req->cryptlen;
+
+ if (!enc) {
+ enc_size -= crypto_aead_authsize(tfm);
+ auth_size -= crypto_aead_authsize(tfm);
+ }
+
+ if (auth_size > SA_MAX_DATA_SZ ||
+ (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
+ auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
+ struct aead_request *subreq = aead_request_ctx(req);
+ int ret;
+
+ aead_request_set_tfm(subreq, ctx->fallback.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ ret = enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+ return ret;
+ }
+
+ sa_req.enc_offset = req->assoclen;
+ sa_req.enc_size = enc_size;
+ sa_req.auth_size = auth_size;
+ sa_req.size = auth_size;
+ sa_req.enc_iv = iv;
+ sa_req.type = CRYPTO_ALG_TYPE_AEAD;
+ sa_req.enc = enc;
+ sa_req.callback = sa_aead_dma_in_callback;
+ sa_req.mdata_size = 52;
+ sa_req.base = &req->base;
+ sa_req.ctx = ctx;
+ sa_req.src = req->src;
+ sa_req.dst = req->dst;
+
+ return sa_run(&sa_req);
+}
+
+/* AEAD algorithm encrypt interface function */
+static int sa_aead_encrypt(struct aead_request *req)
+{
+ return sa_aead_run(req, req->iv, 1);
+}
+
+/* AEAD algorithm decrypt interface function */
+static int sa_aead_decrypt(struct aead_request *req)
+{
+ return sa_aead_run(req, req->iv, 0);
+}
+
+static struct sa_alg_tmpl sa_algs[] = {
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sa_aes_cbc_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sa_aes_ecb_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = sa_3des_cbc_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_SKCIPHER,
+ .alg.skcipher = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-sa2ul",
+ .base.cra_priority = 30000,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = sa_cipher_cra_init,
+ .exit = sa_cipher_cra_exit,
+ .min_keysize = 3 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = sa_3des_ecb_setkey,
+ .encrypt = sa_encrypt,
+ .decrypt = sa_decrypt,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha1_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha1_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha256_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha256_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.ahash = {
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-sa2ul",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = sa_sha512_cra_init,
+ .cra_exit = sa_sha_cra_exit,
+ },
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sa_sha_req_ctx) +
+ sizeof(struct sha512_state),
+ .init = sa_sha_init,
+ .update = sa_sha_update,
+ .final = sa_sha_final,
+ .finup = sa_sha_finup,
+ .digest = sa_sha_digest,
+ .export = sa_sha_export,
+ .import = sa_sha_import,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha1),cbc(aes))-sa2ul",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_priority = 3000,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+
+ .init = sa_cra_init_aead_sha1,
+ .exit = sa_exit_tfm_aead,
+ .setkey = sa_aead_cbc_sha1_setkey,
+ .setauthsize = sa_aead_setauthsize,
+ .encrypt = sa_aead_encrypt,
+ .decrypt = sa_aead_decrypt,
+ },
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc(hmac(sha256),cbc(aes))-sa2ul",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_ctxsize = sizeof(struct sa_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 0,
+ .cra_priority = 3000,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+
+ .init = sa_cra_init_aead_sha256,
+ .exit = sa_exit_tfm_aead,
+ .setkey = sa_aead_cbc_sha256_setkey,
+ .setauthsize = sa_aead_setauthsize,
+ .encrypt = sa_aead_encrypt,
+ .decrypt = sa_aead_decrypt,
+ },
+ },
+};
+
+/* Register the algorithms in crypto framework */
+static void sa_register_algos(const struct device *dev)
+{
+ char *alg_name;
+ u32 type;
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ type = sa_algs[i].type;
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
+ alg_name = sa_algs[i].alg.skcipher.base.cra_name;
+ err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
+ } else if (type == CRYPTO_ALG_TYPE_AHASH) {
+ alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
+ err = crypto_register_ahash(&sa_algs[i].alg.ahash);
+ } else if (type == CRYPTO_ALG_TYPE_AEAD) {
+ alg_name = sa_algs[i].alg.aead.base.cra_name;
+ err = crypto_register_aead(&sa_algs[i].alg.aead);
+ } else {
+ dev_err(dev,
+ "un-supported crypto algorithm (%d)",
+ sa_algs[i].type);
+ continue;
+ }
+
+ if (err)
+ dev_err(dev, "Failed to register '%s'\n", alg_name);
+ else
+ sa_algs[i].registered = true;
+ }
+}
+
+/* Unregister the algorithms in crypto framework */
+static void sa_unregister_algos(const struct device *dev)
+{
+ u32 type;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
+ type = sa_algs[i].type;
+ if (!sa_algs[i].registered)
+ continue;
+ if (type == CRYPTO_ALG_TYPE_SKCIPHER)
+ crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
+ else if (type == CRYPTO_ALG_TYPE_AHASH)
+ crypto_unregister_ahash(&sa_algs[i].alg.ahash);
+ else if (type == CRYPTO_ALG_TYPE_AEAD)
+ crypto_unregister_aead(&sa_algs[i].alg.aead);
+
+ sa_algs[i].registered = false;
+ }
+}
+
+static int sa_init_mem(struct sa_crypto_data *dev_data)
+{
+ struct device *dev = &dev_data->pdev->dev;
+ /* Setup dma pool for security context buffers */
+ dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
+ SA_CTX_MAX_SZ, 64, 0);
+ if (!dev_data->sc_pool) {
+ dev_err(dev, "Failed to create dma pool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int sa_dma_init(struct sa_crypto_data *dd)
+{
+ int ret;
+ struct dma_slave_config cfg;
+
+ dd->dma_rx1 = NULL;
+ dd->dma_tx = NULL;
+ dd->dma_rx2 = NULL;
+
+ ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
+ if (ret)
+ return ret;
+
+ dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
+ if (IS_ERR(dd->dma_rx1)) {
+ if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
+ return PTR_ERR(dd->dma_rx1);
+ }
+
+ dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
+ if (IS_ERR(dd->dma_rx2)) {
+ dma_release_channel(dd->dma_rx1);
+ if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
+ return PTR_ERR(dd->dma_rx2);
+ }
+
+ dd->dma_tx = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_tx)) {
+ if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER)
+ dev_err(dd->dev, "Unable to request tx DMA channel\n");
+ ret = PTR_ERR(dd->dma_tx);
+ goto err_dma_tx;
+ }
+
+ memzero_explicit(&cfg, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 4;
+ cfg.dst_maxburst = 4;
+
+ ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = dmaengine_slave_config(dd->dma_tx, &cfg);
+ if (ret) {
+ dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+
+err_dma_tx:
+ dma_release_channel(dd->dma_rx1);
+ dma_release_channel(dd->dma_rx2);
+
+ return ret;
+}
+
+static int sa_link_child(struct device *dev, void *data)
+{
+ struct device *parent = data;
+
+ device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
+
+ return 0;
+}
+
+static int sa_ul_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ static void __iomem *saul_base;
+ struct sa_crypto_data *dev_data;
+ u32 val;
+ int ret;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ sa_k3_dev = dev;
+ dev_data->dev = dev;
+ dev_data->pdev = pdev;
+ platform_set_drvdata(pdev, dev_data);
+ dev_set_drvdata(sa_k3_dev, dev_data);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ sa_init_mem(dev_data);
+ ret = sa_dma_init(dev_data);
+ if (ret)
+ goto disable_pm_runtime;
+
+ spin_lock_init(&dev_data->scid_lock);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ saul_base = devm_ioremap_resource(dev, res);
+
+ dev_data->base = saul_base;
+ val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
+ SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
+ SA_EEC_TRNG_EN;
+
+ writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
+
+ sa_register_algos(dev);
+
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ goto release_dma;
+
+ device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
+
+ return 0;
+
+release_dma:
+ sa_unregister_algos(&pdev->dev);
+
+ dma_release_channel(dev_data->dma_rx2);
+ dma_release_channel(dev_data->dma_rx1);
+ dma_release_channel(dev_data->dma_tx);
+
+ dma_pool_destroy(dev_data->sc_pool);
+
+disable_pm_runtime:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int sa_ul_remove(struct platform_device *pdev)
+{
+ struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
+
+ sa_unregister_algos(&pdev->dev);
+
+ dma_release_channel(dev_data->dma_rx2);
+ dma_release_channel(dev_data->dma_rx1);
+ dma_release_channel(dev_data->dma_tx);
+
+ dma_pool_destroy(dev_data->sc_pool);
+
+ platform_set_drvdata(pdev, NULL);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ {.compatible = "ti,j721e-sa2ul",},
+ {.compatible = "ti,am654-sa2ul",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver sa_ul_driver = {
+ .probe = sa_ul_probe,
+ .remove = sa_ul_remove,
+ .driver = {
+ .name = "saul-crypto",
+ .of_match_table = of_match,
+ },
+};
+module_platform_driver(sa_ul_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h
new file mode 100644
index 000000000000..7f7e3fe60d11
--- /dev/null
+++ b/drivers/crypto/sa2ul.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * K3 SA2UL crypto accelerator driver
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Keerthy
+ * Vitaly Andrianov
+ * Tero Kristo
+ */
+
+#ifndef _K3_SA2UL_
+#define _K3_SA2UL_
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/hw_random.h>
+#include <crypto/aes.h>
+
+#define SA_ENGINE_ENABLE_CONTROL 0x1000
+
+struct sa_tfm_ctx;
+/*
+ * SA_ENGINE_ENABLE_CONTROL register bits
+ */
+#define SA_EEC_ENCSS_EN 0x00000001
+#define SA_EEC_AUTHSS_EN 0x00000002
+#define SA_EEC_TRNG_EN 0x00000008
+#define SA_EEC_PKA_EN 0x00000010
+#define SA_EEC_CTXCACH_EN 0x00000080
+#define SA_EEC_CPPI_PORT_IN_EN 0x00000200
+#define SA_EEC_CPPI_PORT_OUT_EN 0x00000800
+
+/*
+ * Encoding used to identify the typo of crypto operation
+ * performed on the packet when the packet is returned
+ * by SA
+ */
+#define SA_REQ_SUBTYPE_ENC 0x0001
+#define SA_REQ_SUBTYPE_DEC 0x0002
+#define SA_REQ_SUBTYPE_SHIFT 16
+#define SA_REQ_SUBTYPE_MASK 0xffff
+
+/* Number of 32 bit words in EPIB */
+#define SA_DMA_NUM_EPIB_WORDS 4
+
+/* Number of 32 bit words in PS data */
+#define SA_DMA_NUM_PS_WORDS 16
+#define NKEY_SZ 3
+#define MCI_SZ 27
+
+/*
+ * Maximum number of simultaeneous security contexts
+ * supported by the driver
+ */
+#define SA_MAX_NUM_CTX 512
+
+/*
+ * Assumption: CTX size is multiple of 32
+ */
+#define SA_CTX_SIZE_TO_DMA_SIZE(ctx_sz) \
+ ((ctx_sz) ? ((ctx_sz) / 32 - 1) : 0)
+
+#define SA_CTX_ENC_KEY_OFFSET 32
+#define SA_CTX_ENC_AUX1_OFFSET 64
+#define SA_CTX_ENC_AUX2_OFFSET 96
+#define SA_CTX_ENC_AUX3_OFFSET 112
+#define SA_CTX_ENC_AUX4_OFFSET 128
+
+/* Next Engine Select code in CP_ACE */
+#define SA_ENG_ID_EM1 2 /* Enc/Dec engine with AES/DEC core */
+#define SA_ENG_ID_EM2 3 /* Encryption/Decryption enginefor pass 2 */
+#define SA_ENG_ID_AM1 4 /* Auth. engine with SHA1/MD5/SHA2 core */
+#define SA_ENG_ID_AM2 5 /* Authentication engine for pass 2 */
+#define SA_ENG_ID_OUTPORT2 20 /* Egress module 2 */
+
+/*
+ * Command Label Definitions
+ */
+#define SA_CMDL_OFFSET_NESC 0 /* Next Engine Select Code */
+#define SA_CMDL_OFFSET_LABEL_LEN 1 /* Engine Command Label Length */
+/* 16-bit Length of Data to be processed */
+#define SA_CMDL_OFFSET_DATA_LEN 2
+#define SA_CMDL_OFFSET_DATA_OFFSET 4 /* Stat Data Offset */
+#define SA_CMDL_OFFSET_OPTION_CTRL1 5 /* Option Control Byte 1 */
+#define SA_CMDL_OFFSET_OPTION_CTRL2 6 /* Option Control Byte 2 */
+#define SA_CMDL_OFFSET_OPTION_CTRL3 7 /* Option Control Byte 3 */
+#define SA_CMDL_OFFSET_OPTION_BYTE 8
+
+#define SA_CMDL_HEADER_SIZE_BYTES 8
+
+#define SA_CMDL_OPTION_BYTES_MAX_SIZE 72
+#define SA_CMDL_MAX_SIZE_BYTES (SA_CMDL_HEADER_SIZE_BYTES + \
+ SA_CMDL_OPTION_BYTES_MAX_SIZE)
+
+/* SWINFO word-0 flags */
+#define SA_SW_INFO_FLAG_EVICT 0x0001
+#define SA_SW_INFO_FLAG_TEAR 0x0002
+#define SA_SW_INFO_FLAG_NOPD 0x0004
+
+/*
+ * This type represents the various packet types to be processed
+ * by the PHP engine in SA.
+ * It is used to identify the corresponding PHP processing function.
+ */
+#define SA_CTX_PE_PKT_TYPE_3GPP_AIR 0 /* 3GPP Air Cipher */
+#define SA_CTX_PE_PKT_TYPE_SRTP 1 /* SRTP */
+#define SA_CTX_PE_PKT_TYPE_IPSEC_AH 2 /* IPSec Authentication Header */
+/* IPSec Encapsulating Security Payload */
+#define SA_CTX_PE_PKT_TYPE_IPSEC_ESP 3
+/* Indicates that it is in data mode, It may not be used by PHP */
+#define SA_CTX_PE_PKT_TYPE_NONE 4
+#define SA_CTX_ENC_TYPE1_SZ 64 /* Encryption SC with Key only */
+#define SA_CTX_ENC_TYPE2_SZ 96 /* Encryption SC with Key and Aux1 */
+
+#define SA_CTX_AUTH_TYPE1_SZ 64 /* Auth SC with Key only */
+#define SA_CTX_AUTH_TYPE2_SZ 96 /* Auth SC with Key and Aux1 */
+/* Size of security context for PHP engine */
+#define SA_CTX_PHP_PE_CTX_SZ 64
+
+#define SA_CTX_MAX_SZ (64 + SA_CTX_ENC_TYPE2_SZ + SA_CTX_AUTH_TYPE2_SZ)
+
+/*
+ * Encoding of F/E control in SCCTL
+ * Bit 0-1: Fetch PHP Bytes
+ * Bit 2-3: Fetch Encryption/Air Ciphering Bytes
+ * Bit 4-5: Fetch Authentication Bytes or Encr pass 2
+ * Bit 6-7: Evict PHP Bytes
+ *
+ * where 00 = 0 bytes
+ * 01 = 64 bytes
+ * 10 = 96 bytes
+ * 11 = 128 bytes
+ */
+#define SA_CTX_DMA_SIZE_0 0
+#define SA_CTX_DMA_SIZE_64 1
+#define SA_CTX_DMA_SIZE_96 2
+#define SA_CTX_DMA_SIZE_128 3
+
+/*
+ * Byte offset of the owner word in SCCTL
+ * in the security context
+ */
+#define SA_CTX_SCCTL_OWNER_OFFSET 0
+
+#define SA_CTX_ENC_KEY_OFFSET 32
+#define SA_CTX_ENC_AUX1_OFFSET 64
+#define SA_CTX_ENC_AUX2_OFFSET 96
+#define SA_CTX_ENC_AUX3_OFFSET 112
+#define SA_CTX_ENC_AUX4_OFFSET 128
+
+#define SA_SCCTL_FE_AUTH_ENC 0x65
+#define SA_SCCTL_FE_ENC 0x8D
+
+#define SA_ALIGN_MASK (sizeof(u32) - 1)
+#define SA_ALIGNED __aligned(32)
+
+#define SA_AUTH_SW_CTRL_MD5 1
+#define SA_AUTH_SW_CTRL_SHA1 2
+#define SA_AUTH_SW_CTRL_SHA224 3
+#define SA_AUTH_SW_CTRL_SHA256 4
+#define SA_AUTH_SW_CTRL_SHA384 5
+#define SA_AUTH_SW_CTRL_SHA512 6
+
+/* SA2UL can only handle maximum data size of 64KB */
+#define SA_MAX_DATA_SZ U16_MAX
+
+/*
+ * SA2UL can provide unpredictable results with packet sizes that fall
+ * the following range, so avoid using it.
+ */
+#define SA_UNSAFE_DATA_SZ_MIN 240
+#define SA_UNSAFE_DATA_SZ_MAX 256
+
+/**
+ * struct sa_crypto_data - Crypto driver instance data
+ * @base: Base address of the register space
+ * @pdev: Platform device pointer
+ * @sc_pool: security context pool
+ * @dev: Device pointer
+ * @scid_lock: secure context ID lock
+ * @sc_id_start: starting index for SC ID
+ * @sc_id_end: Ending index for SC ID
+ * @sc_id: Security Context ID
+ * @ctx_bm: Bitmap to keep track of Security context ID's
+ * @ctx: SA tfm context pointer
+ * @dma_rx1: Pointer to DMA rx channel for sizes < 256 Bytes
+ * @dma_rx2: Pointer to DMA rx channel for sizes > 256 Bytes
+ * @dma_tx: Pointer to DMA TX channel
+ */
+struct sa_crypto_data {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct dma_pool *sc_pool;
+ struct device *dev;
+ spinlock_t scid_lock; /* lock for SC-ID allocation */
+ /* Security context data */
+ u16 sc_id_start;
+ u16 sc_id_end;
+ u16 sc_id;
+ unsigned long ctx_bm[DIV_ROUND_UP(SA_MAX_NUM_CTX,
+ BITS_PER_LONG)];
+ struct sa_tfm_ctx *ctx;
+ struct dma_chan *dma_rx1;
+ struct dma_chan *dma_rx2;
+ struct dma_chan *dma_tx;
+};
+
+/**
+ * struct sa_cmdl_param_info: Command label parameters info
+ * @index: Index of the parameter in the command label format
+ * @offset: the offset of the parameter
+ * @size: Size of the parameter
+ */
+struct sa_cmdl_param_info {
+ u16 index;
+ u16 offset;
+ u16 size;
+};
+
+/* Maximum length of Auxiliary data in 32bit words */
+#define SA_MAX_AUX_DATA_WORDS 8
+
+/**
+ * struct sa_cmdl_upd_info: Command label updation info
+ * @flags: flags in command label
+ * @submode: Encryption submodes
+ * @enc_size: Size of first pass encryption size
+ * @enc_size2: Size of second pass encryption size
+ * @enc_offset: Encryption payload offset in the packet
+ * @enc_iv: Encryption initialization vector for pass2
+ * @enc_iv2: Encryption initialization vector for pass2
+ * @aad: Associated data
+ * @payload: Payload info
+ * @auth_size: Authentication size for pass 1
+ * @auth_size2: Authentication size for pass 2
+ * @auth_offset: Authentication payload offset
+ * @auth_iv: Authentication initialization vector
+ * @aux_key_info: Authentication aux key information
+ * @aux_key: Aux key for authentication
+ */
+struct sa_cmdl_upd_info {
+ u16 flags;
+ u16 submode;
+ struct sa_cmdl_param_info enc_size;
+ struct sa_cmdl_param_info enc_size2;
+ struct sa_cmdl_param_info enc_offset;
+ struct sa_cmdl_param_info enc_iv;
+ struct sa_cmdl_param_info enc_iv2;
+ struct sa_cmdl_param_info aad;
+ struct sa_cmdl_param_info payload;
+ struct sa_cmdl_param_info auth_size;
+ struct sa_cmdl_param_info auth_size2;
+ struct sa_cmdl_param_info auth_offset;
+ struct sa_cmdl_param_info auth_iv;
+ struct sa_cmdl_param_info aux_key_info;
+ u32 aux_key[SA_MAX_AUX_DATA_WORDS];
+};
+
+/*
+ * Number of 32bit words appended after the command label
+ * in PSDATA to identify the crypto request context.
+ * word-0: Request type
+ * word-1: pointer to request
+ */
+#define SA_PSDATA_CTX_WORDS 4
+
+/* Maximum size of Command label in 32 words */
+#define SA_MAX_CMDL_WORDS (SA_DMA_NUM_PS_WORDS - SA_PSDATA_CTX_WORDS)
+
+/**
+ * struct sa_ctx_info: SA context information
+ * @sc: Pointer to security context
+ * @sc_phys: Security context physical address that is passed on to SA2UL
+ * @sc_id: Security context ID
+ * @cmdl_size: Command label size
+ * @cmdl: Command label for a particular iteration
+ * @cmdl_upd_info: structure holding command label updation info
+ * @epib: Extended protocol information block words
+ */
+struct sa_ctx_info {
+ u8 *sc;
+ dma_addr_t sc_phys;
+ u16 sc_id;
+ u16 cmdl_size;
+ u32 cmdl[SA_MAX_CMDL_WORDS];
+ struct sa_cmdl_upd_info cmdl_upd_info;
+ /* Store Auxiliary data such as K2/K3 subkeys in AES-XCBC */
+ u32 epib[SA_DMA_NUM_EPIB_WORDS];
+};
+
+/**
+ * struct sa_tfm_ctx: TFM context structure
+ * @dev_data: struct sa_crypto_data pointer
+ * @enc: struct sa_ctx_info for encryption
+ * @dec: struct sa_ctx_info for decryption
+ * @keylen: encrption/decryption keylength
+ * @iv_idx: Initialization vector index
+ * @key: encryption key
+ * @fallback: SW fallback algorithm
+ */
+struct sa_tfm_ctx {
+ struct sa_crypto_data *dev_data;
+ struct sa_ctx_info enc;
+ struct sa_ctx_info dec;
+ struct sa_ctx_info auth;
+ int keylen;
+ int iv_idx;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ u8 authkey[SHA512_BLOCK_SIZE];
+ struct crypto_shash *shash;
+ /* for fallback */
+ union {
+ struct crypto_sync_skcipher *skcipher;
+ struct crypto_ahash *ahash;
+ struct crypto_aead *aead;
+ } fallback;
+};
+
+/**
+ * struct sa_sha_req_ctx: Structure used for sha request
+ * @dev_data: struct sa_crypto_data pointer
+ * @cmdl: Complete command label with psdata and epib included
+ * @fallback_req: SW fallback request container
+ */
+struct sa_sha_req_ctx {
+ struct sa_crypto_data *dev_data;
+ u32 cmdl[SA_MAX_CMDL_WORDS + SA_PSDATA_CTX_WORDS];
+ struct ahash_request fallback_req;
+};
+
+enum sa_submode {
+ SA_MODE_GEN = 0,
+ SA_MODE_CCM,
+ SA_MODE_GCM,
+ SA_MODE_GMAC
+};
+
+/* Encryption algorithms */
+enum sa_ealg_id {
+ SA_EALG_ID_NONE = 0, /* No encryption */
+ SA_EALG_ID_NULL, /* NULL encryption */
+ SA_EALG_ID_AES_CTR, /* AES Counter mode */
+ SA_EALG_ID_AES_F8, /* AES F8 mode */
+ SA_EALG_ID_AES_CBC, /* AES CBC mode */
+ SA_EALG_ID_DES_CBC, /* DES CBC mode */
+ SA_EALG_ID_3DES_CBC, /* 3DES CBC mode */
+ SA_EALG_ID_CCM, /* Counter with CBC-MAC mode */
+ SA_EALG_ID_GCM, /* Galois Counter mode */
+ SA_EALG_ID_AES_ECB,
+ SA_EALG_ID_LAST
+};
+
+/* Authentication algorithms */
+enum sa_aalg_id {
+ SA_AALG_ID_NONE = 0, /* No Authentication */
+ SA_AALG_ID_NULL = SA_EALG_ID_LAST, /* NULL Authentication */
+ SA_AALG_ID_MD5, /* MD5 mode */
+ SA_AALG_ID_SHA1, /* SHA1 mode */
+ SA_AALG_ID_SHA2_224, /* 224-bit SHA2 mode */
+ SA_AALG_ID_SHA2_256, /* 256-bit SHA2 mode */
+ SA_AALG_ID_SHA2_512, /* 512-bit SHA2 mode */
+ SA_AALG_ID_HMAC_MD5, /* HMAC with MD5 mode */
+ SA_AALG_ID_HMAC_SHA1, /* HMAC with SHA1 mode */
+ SA_AALG_ID_HMAC_SHA2_224, /* HMAC with 224-bit SHA2 mode */
+ SA_AALG_ID_HMAC_SHA2_256, /* HMAC with 256-bit SHA2 mode */
+ SA_AALG_ID_GMAC, /* Galois Message Auth. Code mode */
+ SA_AALG_ID_CMAC, /* Cipher-based Mes. Auth. Code mode */
+ SA_AALG_ID_CBC_MAC, /* Cipher Block Chaining */
+ SA_AALG_ID_AES_XCBC /* AES Extended Cipher Block Chaining */
+};
+
+/*
+ * Mode control engine algorithms used to index the
+ * mode control instruction tables
+ */
+enum sa_eng_algo_id {
+ SA_ENG_ALGO_ECB = 0,
+ SA_ENG_ALGO_CBC,
+ SA_ENG_ALGO_CFB,
+ SA_ENG_ALGO_OFB,
+ SA_ENG_ALGO_CTR,
+ SA_ENG_ALGO_F8,
+ SA_ENG_ALGO_F8F9,
+ SA_ENG_ALGO_GCM,
+ SA_ENG_ALGO_GMAC,
+ SA_ENG_ALGO_CCM,
+ SA_ENG_ALGO_CMAC,
+ SA_ENG_ALGO_CBCMAC,
+ SA_NUM_ENG_ALGOS
+};
+
+/**
+ * struct sa_eng_info: Security accelerator engine info
+ * @eng_id: Engine ID
+ * @sc_size: security context size
+ */
+struct sa_eng_info {
+ u8 eng_id;
+ u16 sc_size;
+};
+
+#endif /* _K3_SA2UL_ */
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 466e30bd529c..0c8cb23ae708 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -146,11 +146,12 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
struct sahara_aes_reqctx {
unsigned long mode;
+ struct skcipher_request fallback_req; // keep at the end
};
/*
@@ -617,10 +618,10 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback.
*/
- crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- return crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+ return crypto_skcipher_setkey(ctx->fallback, key, keylen);
}
static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
@@ -651,21 +652,19 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT);
@@ -673,21 +672,19 @@ static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, 0);
@@ -695,21 +692,19 @@ static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_encrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
@@ -717,21 +712,19 @@ static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
{
+ struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
-
- skcipher_request_set_sync_tfm(subreq, ctx->fallback);
- skcipher_request_set_callback(subreq, req->base.flags,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
- return err;
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+ skcipher_request_set_callback(&rctx->fallback_req,
+ req->base.flags,
+ req->base.complete,
+ req->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+ req->dst, req->cryptlen, req->iv);
+ return crypto_skcipher_decrypt(&rctx->fallback_req);
}
return sahara_aes_crypt(req, FLAGS_CBC);
@@ -742,14 +735,15 @@ static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
const char *name = crypto_tfm_alg_name(&tfm->base);
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
+ ctx->fallback = crypto_alloc_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
return PTR_ERR(ctx->fallback);
}
- crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback));
return 0;
}
@@ -758,7 +752,7 @@ static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
{
struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9c6db7f698c4..7c547352a862 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2264,7 +2264,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2285,7 +2286,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2306,7 +2308,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2330,7 +2333,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha1-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -2352,7 +2356,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2373,7 +2378,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2394,7 +2400,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2418,7 +2425,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha224-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -2440,7 +2448,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2461,7 +2470,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2482,7 +2492,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2506,7 +2517,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha256-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -2528,7 +2540,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -2549,7 +2562,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha384-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -2571,7 +2585,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -2592,7 +2607,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-sha512-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -2614,7 +2630,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2635,7 +2652,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2655,7 +2673,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-3des-talitos",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2678,7 +2697,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_driver_name = "authenc-hmac-md5-"
"cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -2699,7 +2719,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-talitos",
.base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = skcipher_aes_setkey,
@@ -2712,7 +2733,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-talitos",
.base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -2727,7 +2749,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "ctr-aes-talitos",
.base.cra_blocksize = 1,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -2742,7 +2765,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-talitos",
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = skcipher_des_setkey,
@@ -2755,7 +2779,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-talitos",
.base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
@@ -2770,7 +2795,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-3des-talitos",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = skcipher_des3_setkey,
@@ -2784,7 +2810,8 @@ static struct talitos_alg_template driver_algs[] = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-3des-talitos",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -2804,7 +2831,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2819,7 +2847,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2834,7 +2863,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2849,7 +2879,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2864,7 +2895,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2879,7 +2911,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2894,7 +2927,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2909,7 +2943,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos",
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2924,7 +2959,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos",
.cra_blocksize = SHA224_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2939,7 +2975,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos",
.cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2954,7 +2991,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos",
.cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2969,7 +3007,8 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos",
.cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index c24f2db8d5e8..a5ee8c2fb4e0 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -545,7 +545,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
*
* Initialize structures.
*/
-static int hash_init(struct ahash_request *req)
+static int ux500_hash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -1359,7 +1359,7 @@ static int ahash_sha1_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
ctx->digestsize = SHA1_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int ahash_sha256_init(struct ahash_request *req)
@@ -1372,7 +1372,7 @@ static int ahash_sha256_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HASH;
ctx->digestsize = SHA256_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int ahash_sha1_digest(struct ahash_request *req)
@@ -1425,7 +1425,7 @@ static int hmac_sha1_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA1_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int hmac_sha256_init(struct ahash_request *req)
@@ -1438,7 +1438,7 @@ static int hmac_sha256_init(struct ahash_request *req)
ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
ctx->digestsize = SHA256_DIGEST_SIZE;
- return hash_init(req);
+ return ux500_hash_init(req);
}
static int hmac_sha1_digest(struct ahash_request *req)
@@ -1515,7 +1515,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA1,
.conf.oper_mode = HASH_OPER_MODE_HASH,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha1_digest,
@@ -1538,7 +1538,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA256,
.conf.oper_mode = HASH_OPER_MODE_HASH,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = ahash_sha256_digest,
@@ -1561,7 +1561,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA1,
.conf.oper_mode = HASH_OPER_MODE_HMAC,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = hmac_sha1_digest,
@@ -1585,7 +1585,7 @@ static struct hash_algo_template hash_algs[] = {
.conf.algorithm = HASH_ALGO_SHA256,
.conf.oper_mode = HASH_OPER_MODE_HMAC,
.hash = {
- .init = hash_init,
+ .init = ux500_hash_init,
.update = ahash_update,
.final = ahash_final,
.digest = hmac_sha256_digest,
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index cb8a6ea2a4bc..b2601958282e 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -597,7 +597,8 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "virtio_crypto_aes_cbc",
.base.cra_priority = 150,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
.base.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index c8a962c62663..77e744eaedd0 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -498,11 +498,11 @@ free_vqs:
}
#endif
-static unsigned int features[] = {
+static const unsigned int features[] = {
/* none */
};
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index cd11558893cd..27079354dbe9 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -364,6 +364,7 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_AEAD |
CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = ZYNQMP_AES_BLK_SIZE,
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
index a1b199de9006..e97a9c9d010c 100644
--- a/drivers/firmware/efi/embedded-firmware.c
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -37,9 +37,8 @@ static const struct dmi_system_id * const embedded_fw_table[] = {
static int __init efi_check_md_for_embedded_firmware(
efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc)
{
- struct sha256_state sctx;
struct efi_embedded_fw *fw;
- u8 sha256[32];
+ u8 hash[32];
u64 i, size;
u8 *map;
@@ -54,10 +53,8 @@ static int __init efi_check_md_for_embedded_firmware(
if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN))
continue;
- sha256_init(&sctx);
- sha256_update(&sctx, map + i, desc->length);
- sha256_final(&sctx, sha256);
- if (memcmp(sha256, desc->sha256, 32) == 0)
+ sha256(map + i, desc->length, hash);
+ if (memcmp(hash, desc->sha256, 32) == 0)
break;
}
if ((i + desc->length) > size) {
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 2b4d2b06ccbd..fcde59c65a81 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -106,6 +106,24 @@ struct acomp_alg {
*/
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
u32 mask);
+/**
+ * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * compression algorithm e.g. "deflate"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ * @node: specifies the NUMA node the ZIP hardware belongs to
+ *
+ * Allocate a handle for a compression algorithm. Drivers should try to use
+ * (de)compressors on the specified NUMA node.
+ * The returned struct crypto_acomp is the handle that is required for any
+ * subsequent API invocation for the compression operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
+ u32 mask, int node);
static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
{
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 00a9cf98debe..143d884d65c7 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -116,7 +116,7 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
-int crypto_check_attr_type(struct rtattr **tb, u32 type);
+int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
@@ -235,18 +235,29 @@ static inline struct crypto_async_request *crypto_get_backlog(
container_of(queue->backlog, struct crypto_async_request, list);
}
-static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
+static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
{
- return (type ^ off) & mask & off;
+ return (algt->type ^ off) & algt->mask & off;
}
/*
- * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
- * Otherwise returns zero.
+ * When an algorithm uses another algorithm (e.g., if it's an instance of a
+ * template), these are the flags that should always be set on the "outer"
+ * algorithm if any "inner" algorithm has them set.
*/
-static inline int crypto_requires_sync(u32 type, u32 mask)
+#define CRYPTO_ALG_INHERITED_FLAGS \
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \
+ CRYPTO_ALG_ALLOCATES_MEMORY)
+
+/*
+ * Given the type and mask that specify the flags restrictions on a template
+ * instance being created, return the mask that should be passed to
+ * crypto_grab_*() (along with type=0) to honor any request the user made to
+ * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
+ */
+static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
{
- return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
+ return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
}
noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index 2676f4fbd4c1..3a1c72fdb7cf 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -25,11 +25,7 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#ifdef CONFIG_X86_64
-#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
-#else
#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
-#endif
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
index 234ee28078ef..d2ac3ff7dc1e 100644
--- a/include/crypto/chacha20poly1305.h
+++ b/include/crypto/chacha20poly1305.h
@@ -45,4 +45,6 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+bool chacha20poly1305_selftest(void);
+
#endif /* __CHACHA20POLY1305_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 4829d2367eda..19ce91f2359f 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -687,7 +687,7 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
* The message digest API is able to maintain state information for the
* caller.
*
- * The synchronous message digest API can store user-related context in in its
+ * The synchronous message digest API can store user-related context in its
* shash_desc request data structure.
*/
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 088c1ded2714..ee6412314f8f 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -135,6 +135,7 @@ struct af_alg_async_req {
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
+ * @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
*/
struct af_alg_ctx {
@@ -151,6 +152,7 @@ struct af_alg_ctx {
bool more;
bool merge;
bool enc;
+ bool init;
unsigned int len;
};
@@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset);
void af_alg_wmem_wakeup(struct sock *sk);
-int af_alg_wait_for_data(struct sock *sk, unsigned flags);
+int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 229d37681a9d..7fd7126f593a 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@ struct aead_geniv_ctx {
};
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask);
+ struct rtattr **tb);
int aead_init_geniv(struct crypto_aead *tfm);
void aead_exit_geniv(struct crypto_aead *tfm);
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 10753ff71d46..4ff3da816630 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -147,6 +147,7 @@ static inline void sha256_init(struct sha256_state *sctx)
}
void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
void sha256_final(struct sha256_state *sctx, u8 *out);
+void sha256(const u8 *data, unsigned int len, u8 *out);
static inline void sha224_init(struct sha256_state *sctx)
{
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 141e7690f9c3..5663f71198b3 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -18,7 +18,7 @@
* @iv: Initialisation Vector
* @src: Source SG list
* @dst: Destination SG list
- * @base: Underlying async request request
+ * @base: Underlying async request
* @__ctx: Start of private context data
*/
struct skcipher_request {
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 763863dbc079..ef90e07c9635 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -16,9 +16,8 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
#include <linux/completion.h>
/*
@@ -61,8 +60,8 @@
#define CRYPTO_ALG_ASYNC 0x00000080
/*
- * Set this bit if and only if the algorithm requires another algorithm of
- * the same type to handle corner cases.
+ * Set if the algorithm (or an algorithm which it uses) requires another
+ * algorithm of the same type to handle corner cases.
*/
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
@@ -102,6 +101,38 @@
#define CRYPTO_NOLOAD 0x00008000
/*
+ * The algorithm may allocate memory during request processing, i.e. during
+ * encryption, decryption, or hashing. Users can request an algorithm with this
+ * flag unset if they can't handle memory allocation failures.
+ *
+ * This flag is currently only implemented for algorithms of type "skcipher",
+ * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not
+ * have this flag set even if they allocate memory.
+ *
+ * In some edge cases, algorithms can allocate memory regardless of this flag.
+ * To avoid these cases, users must obey the following usage constraints:
+ * skcipher:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - If the data were to be divided into chunks of size
+ * crypto_skcipher_walksize() (with any remainder going at the end), no
+ * chunk can cross a page boundary or a scatterlist element boundary.
+ * aead:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - The first scatterlist element must contain all the associated data,
+ * and its pages must be !PageHighMem.
+ * - If the plaintext/ciphertext were to be divided into chunks of size
+ * crypto_aead_walksize() (with the remainder going at the end), no chunk
+ * can cross a page boundary or a scatterlist element boundary.
+ * ahash:
+ * - The result buffer must be aligned to the algorithm's alignmask.
+ * - crypto_ahash_finup() must not be used unless the algorithm implements
+ * ->finup() natively.
+ */
+#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
@@ -595,6 +626,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
struct crypto_tfm {
u32 crt_flags;
+
+ int node;
void (*exit)(struct crypto_tfm *tfm);
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 7bd6d8af0004..5d906dfbf3ed 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -63,6 +63,9 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
+/*-- mpi-sub-ui.c --*/
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
+
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 7302efff5e65..a433f13fc4bf 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -67,17 +67,6 @@ struct padata_serial_queue {
};
/**
- * struct padata_parallel_queue - The percpu padata parallel queue
- *
- * @reorder: List to wait for reordering after parallel processing.
- * @num_obj: Number of objects that are processed by this cpu.
- */
-struct padata_parallel_queue {
- struct padata_list reorder;
- atomic_t num_obj;
-};
-
-/**
* struct padata_cpumask - The cpumasks for the parallel/serial workers
*
* @pcpu: cpumask for the parallel workers.
@@ -93,7 +82,7 @@ struct padata_cpumask {
* that depends on the cpumask in use.
*
* @ps: padata_shell object.
- * @pqueue: percpu padata queues used for parallelization.
+ * @reorder_list: percpu reorder lists
* @squeue: percpu padata queues used for serialuzation.
* @refcnt: Number of objects holding a reference on this parallel_data.
* @seq_nr: Sequence number of the parallelized data object.
@@ -105,7 +94,7 @@ struct padata_cpumask {
*/
struct parallel_data {
struct padata_shell *ps;
- struct padata_parallel_queue __percpu *pqueue;
+ struct padata_list __percpu *reorder_list;
struct padata_serial_queue __percpu *squeue;
atomic_t refcnt;
unsigned int seq_nr;
@@ -167,7 +156,6 @@ struct padata_mt_job {
* @serial_wq: The workqueue used for serial work.
* @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
- * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
@@ -179,7 +167,6 @@ struct padata_instance {
struct workqueue_struct *serial_wq;
struct list_head pslist;
struct padata_cpumask cpumask;
- struct padata_cpumask rcpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -194,7 +181,7 @@ extern void __init padata_init(void);
static inline void __init padata_init(void) {}
#endif
-extern struct padata_instance *padata_alloc_possible(const char *name);
+extern struct padata_instance *padata_alloc(const char *name);
extern void padata_free(struct padata_instance *pinst);
extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
extern void padata_free_shell(struct padata_shell *ps);
@@ -204,6 +191,4 @@ extern void padata_do_serial(struct padata_priv *padata);
extern void __init padata_do_multithreaded(struct padata_mt_job *job);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
-extern int padata_start(struct padata_instance *pinst);
-extern void padata_stop(struct padata_instance *pinst);
#endif
diff --git a/kernel/padata.c b/kernel/padata.c
index 4373f7adaa40..16cb894dc272 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -250,13 +250,11 @@ EXPORT_SYMBOL(padata_do_parallel);
static struct padata_priv *padata_find_next(struct parallel_data *pd,
bool remove_object)
{
- struct padata_parallel_queue *next_queue;
struct padata_priv *padata;
struct padata_list *reorder;
int cpu = pd->cpu;
- next_queue = per_cpu_ptr(pd->pqueue, cpu);
- reorder = &next_queue->reorder;
+ reorder = per_cpu_ptr(pd->reorder_list, cpu);
spin_lock(&reorder->lock);
if (list_empty(&reorder->list)) {
@@ -291,7 +289,7 @@ static void padata_reorder(struct parallel_data *pd)
int cb_cpu;
struct padata_priv *padata;
struct padata_serial_queue *squeue;
- struct padata_parallel_queue *next_queue;
+ struct padata_list *reorder;
/*
* We need to ensure that only one cpu can work on dequeueing of
@@ -339,9 +337,8 @@ static void padata_reorder(struct parallel_data *pd)
*/
smp_mb();
- next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
- if (!list_empty(&next_queue->reorder.list) &&
- padata_find_next(pd, false))
+ reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
+ if (!list_empty(&reorder->list) && padata_find_next(pd, false))
queue_work(pinst->serial_wq, &pd->reorder_work);
}
@@ -401,17 +398,16 @@ void padata_do_serial(struct padata_priv *padata)
{
struct parallel_data *pd = padata->pd;
int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
- struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
- hashed_cpu);
+ struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
struct padata_priv *cur;
- spin_lock(&pqueue->reorder.lock);
+ spin_lock(&reorder->lock);
/* Sort in ascending order of sequence number. */
- list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
+ list_for_each_entry_reverse(cur, &reorder->list, list)
if (cur->seq_nr < padata->seq_nr)
break;
list_add(&padata->list, &cur->list);
- spin_unlock(&pqueue->reorder.lock);
+ spin_unlock(&reorder->lock);
/*
* Ensure the addition to the reorder list is ordered correctly
@@ -441,28 +437,6 @@ static int padata_setup_cpumasks(struct padata_instance *pinst)
return err;
}
-static int pd_setup_cpumasks(struct parallel_data *pd,
- const struct cpumask *pcpumask,
- const struct cpumask *cbcpumask)
-{
- int err = -ENOMEM;
-
- if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
- goto out;
- if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
- goto free_pcpu_mask;
-
- cpumask_copy(pd->cpumask.pcpu, pcpumask);
- cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
-
- return 0;
-
-free_pcpu_mask:
- free_cpumask_var(pd->cpumask.pcpu);
-out:
- return err;
-}
-
static void __init padata_mt_helper(struct work_struct *w)
{
struct padata_work *pw = container_of(w, struct padata_work, pw_work);
@@ -575,17 +549,15 @@ static void padata_init_squeues(struct parallel_data *pd)
}
}
-/* Initialize all percpu queues used by parallel workers */
-static void padata_init_pqueues(struct parallel_data *pd)
+/* Initialize per-CPU reorder lists */
+static void padata_init_reorder_list(struct parallel_data *pd)
{
int cpu;
- struct padata_parallel_queue *pqueue;
+ struct padata_list *list;
for_each_cpu(cpu, pd->cpumask.pcpu) {
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
-
- __padata_list_init(&pqueue->reorder);
- atomic_set(&pqueue->num_obj, 0);
+ list = per_cpu_ptr(pd->reorder_list, cpu);
+ __padata_list_init(list);
}
}
@@ -593,30 +565,31 @@ static void padata_init_pqueues(struct parallel_data *pd)
static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
{
struct padata_instance *pinst = ps->pinst;
- const struct cpumask *cbcpumask;
- const struct cpumask *pcpumask;
struct parallel_data *pd;
- cbcpumask = pinst->rcpumask.cbcpu;
- pcpumask = pinst->rcpumask.pcpu;
-
pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
if (!pd)
goto err;
- pd->pqueue = alloc_percpu(struct padata_parallel_queue);
- if (!pd->pqueue)
+ pd->reorder_list = alloc_percpu(struct padata_list);
+ if (!pd->reorder_list)
goto err_free_pd;
pd->squeue = alloc_percpu(struct padata_serial_queue);
if (!pd->squeue)
- goto err_free_pqueue;
+ goto err_free_reorder_list;
pd->ps = ps;
- if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
+
+ if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
goto err_free_squeue;
+ if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
+ goto err_free_pcpu;
+
+ cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
+ cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
- padata_init_pqueues(pd);
+ padata_init_reorder_list(pd);
padata_init_squeues(pd);
pd->seq_nr = -1;
atomic_set(&pd->refcnt, 1);
@@ -626,10 +599,12 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
return pd;
+err_free_pcpu:
+ free_cpumask_var(pd->cpumask.pcpu);
err_free_squeue:
free_percpu(pd->squeue);
-err_free_pqueue:
- free_percpu(pd->pqueue);
+err_free_reorder_list:
+ free_percpu(pd->reorder_list);
err_free_pd:
kfree(pd);
err:
@@ -640,7 +615,7 @@ static void padata_free_pd(struct parallel_data *pd)
{
free_cpumask_var(pd->cpumask.pcpu);
free_cpumask_var(pd->cpumask.cbcpu);
- free_percpu(pd->pqueue);
+ free_percpu(pd->reorder_list);
free_percpu(pd->squeue);
kfree(pd);
}
@@ -682,12 +657,6 @@ static int padata_replace(struct padata_instance *pinst)
pinst->flags |= PADATA_RESET;
- cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
- cpu_online_mask);
-
- cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
- cpu_online_mask);
-
list_for_each_entry(ps, &pinst->pslist, list) {
err = padata_replace_one(ps);
if (err)
@@ -789,43 +758,6 @@ out:
}
EXPORT_SYMBOL(padata_set_cpumask);
-/**
- * padata_start - start the parallel processing
- *
- * @pinst: padata instance to start
- *
- * Return: 0 on success or negative error code
- */
-int padata_start(struct padata_instance *pinst)
-{
- int err = 0;
-
- mutex_lock(&pinst->lock);
-
- if (pinst->flags & PADATA_INVALID)
- err = -EINVAL;
-
- __padata_start(pinst);
-
- mutex_unlock(&pinst->lock);
-
- return err;
-}
-EXPORT_SYMBOL(padata_start);
-
-/**
- * padata_stop - stop the parallel processing
- *
- * @pinst: padata instance to stop
- */
-void padata_stop(struct padata_instance *pinst)
-{
- mutex_lock(&pinst->lock);
- __padata_stop(pinst);
- mutex_unlock(&pinst->lock);
-}
-EXPORT_SYMBOL(padata_stop);
-
#ifdef CONFIG_HOTPLUG_CPU
static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
@@ -907,9 +839,6 @@ static void __padata_free(struct padata_instance *pinst)
WARN_ON(!list_empty(&pinst->pslist));
- padata_stop(pinst);
- free_cpumask_var(pinst->rcpumask.cbcpu);
- free_cpumask_var(pinst->rcpumask.pcpu);
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
destroy_workqueue(pinst->serial_wq);
@@ -1044,18 +973,12 @@ static struct kobj_type padata_attr_type = {
};
/**
- * padata_alloc - allocate and initialize a padata instance and specify
- * cpumasks for serial and parallel workers.
- *
+ * padata_alloc - allocate and initialize a padata instance
* @name: used to identify the instance
- * @pcpumask: cpumask that will be used for padata parallelization
- * @cbcpumask: cpumask that will be used for padata serialization
*
* Return: new instance on success, NULL on error
*/
-static struct padata_instance *padata_alloc(const char *name,
- const struct cpumask *pcpumask,
- const struct cpumask *cbcpumask)
+struct padata_instance *padata_alloc(const char *name)
{
struct padata_instance *pinst;
@@ -1081,26 +1004,16 @@ static struct padata_instance *padata_alloc(const char *name,
free_cpumask_var(pinst->cpumask.pcpu);
goto err_free_serial_wq;
}
- if (!padata_validate_cpumask(pinst, pcpumask) ||
- !padata_validate_cpumask(pinst, cbcpumask))
- goto err_free_masks;
-
- if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
- goto err_free_masks;
- if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
- goto err_free_rcpumask_pcpu;
INIT_LIST_HEAD(&pinst->pslist);
- cpumask_copy(pinst->cpumask.pcpu, pcpumask);
- cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
- cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
- cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
+ cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
+ cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
if (padata_setup_cpumasks(pinst))
- goto err_free_rcpumask_cbcpu;
+ goto err_free_masks;
- pinst->flags = 0;
+ __padata_start(pinst);
kobject_init(&pinst->kobj, &padata_attr_type);
mutex_init(&pinst->lock);
@@ -1116,10 +1029,6 @@ static struct padata_instance *padata_alloc(const char *name,
return pinst;
-err_free_rcpumask_cbcpu:
- free_cpumask_var(pinst->rcpumask.cbcpu);
-err_free_rcpumask_pcpu:
- free_cpumask_var(pinst->rcpumask.pcpu);
err_free_masks:
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
@@ -1133,21 +1042,7 @@ err_free_inst:
err:
return NULL;
}
-
-/**
- * padata_alloc_possible - Allocate and initialize padata instance.
- * Use the cpu_possible_mask for serial and
- * parallel workers.
- *
- * @name: used to identify the instance
- *
- * Return: new instance on success, NULL on error
- */
-struct padata_instance *padata_alloc_possible(const char *name)
-{
- return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
-}
-EXPORT_SYMBOL(padata_alloc_possible);
+EXPORT_SYMBOL(padata_alloc);
/**
* padata_free - free a padata instance
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 8cc01a603416..1ed2ed487097 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -17,64 +17,69 @@
#include <linux/notifier.h>
static struct crypto_shash __rcu *crct10dif_tfm;
-static struct static_key crct10dif_fallback __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
static DEFINE_MUTEX(crc_t10dif_mutex);
+static struct work_struct crct10dif_rehash_work;
-static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
{
struct crypto_alg *alg = data;
- struct crypto_shash *new, *old;
if (val != CRYPTO_MSG_ALG_LOADED ||
- static_key_false(&crct10dif_fallback) ||
- strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
- return 0;
+ strcmp(alg->cra_name, CRC_T10DIF_STRING))
+ return NOTIFY_DONE;
+
+ schedule_work(&crct10dif_rehash_work);
+ return NOTIFY_OK;
+}
+
+static void crc_t10dif_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
mutex_lock(&crc_t10dif_mutex);
old = rcu_dereference_protected(crct10dif_tfm,
lockdep_is_held(&crc_t10dif_mutex));
- if (!old) {
- mutex_unlock(&crc_t10dif_mutex);
- return 0;
- }
- new = crypto_alloc_shash("crct10dif", 0, 0);
+ new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0);
if (IS_ERR(new)) {
mutex_unlock(&crc_t10dif_mutex);
- return 0;
+ return;
}
rcu_assign_pointer(crct10dif_tfm, new);
mutex_unlock(&crc_t10dif_mutex);
- synchronize_rcu();
- crypto_free_shash(old);
- return 0;
+ if (old) {
+ synchronize_rcu();
+ crypto_free_shash(old);
+ } else {
+ static_branch_disable(&crct10dif_fallback);
+ }
}
static struct notifier_block crc_t10dif_nb = {
- .notifier_call = crc_t10dif_rehash,
+ .notifier_call = crc_t10dif_notify,
};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
struct {
struct shash_desc shash;
- char ctx[2];
+ __u16 crc;
} desc;
int err;
- if (static_key_false(&crct10dif_fallback))
+ if (static_branch_unlikely(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
rcu_read_lock();
desc.shash.tfm = rcu_dereference(crct10dif_tfm);
- *(__u16 *)desc.ctx = crc;
-
+ desc.crc = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
rcu_read_unlock();
BUG_ON(err);
- return *(__u16 *)desc.ctx;
+ return desc.crc;
}
EXPORT_SYMBOL(crc_t10dif_update);
@@ -86,19 +91,17 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
crypto_register_notifier(&crc_t10dif_nb);
- crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
- if (IS_ERR(crct10dif_tfm)) {
- static_key_slow_inc(&crct10dif_fallback);
- crct10dif_tfm = NULL;
- }
+ crc_t10dif_rehash(&crct10dif_rehash_work);
return 0;
}
static void __exit crc_t10dif_mod_fini(void)
{
crypto_unregister_notifier(&crc_t10dif_nb);
- crypto_free_shash(crct10dif_tfm);
+ cancel_work_sync(&crct10dif_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
}
module_init(crc_t10dif_mod_init);
@@ -106,15 +109,23 @@ module_exit(crc_t10dif_mod_fini);
static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
{
- if (static_key_false(&crct10dif_fallback))
+ struct crypto_shash *tfm;
+ int len;
+
+ if (static_branch_unlikely(&crct10dif_fallback))
return sprintf(buffer, "fallback\n");
- return sprintf(buffer, "%s\n",
- crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+ rcu_read_lock();
+ tfm = rcu_dereference(crct10dif_tfm);
+ len = snprintf(buffer, PAGE_SIZE, "%s\n",
+ crypto_shash_driver_name(tfm));
+ rcu_read_unlock();
+
+ return len;
}
-module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444);
-MODULE_DESCRIPTION("T10 DIF CRC calculation");
+MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index ad0699ce702f..431e04280332 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -21,8 +21,6 @@
#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
-bool __init chacha20poly1305_selftest(void);
-
static void chacha_load_key(u32 *k, const u8 *in)
{
k[0] = get_unaligned_le32(in);
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 2e621697c5c3..2321f6cb322f 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -280,4 +280,14 @@ void sha224_final(struct sha256_state *sctx, u8 *out)
}
EXPORT_SYMBOL(sha224_final);
+void sha256(const u8 *data, unsigned int len, u8 *out)
+{
+ struct sha256_state sctx;
+
+ sha256_init(&sctx);
+ sha256_update(&sctx, data, len);
+ sha256_final(&sctx, out);
+}
+EXPORT_SYMBOL(sha256);
+
MODULE_LICENSE("GPL");
diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile
index d5874a7f5ff9..43b8fce14079 100644
--- a/lib/mpi/Makefile
+++ b/lib/mpi/Makefile
@@ -16,6 +16,7 @@ mpi-y = \
mpicoder.o \
mpi-bit.o \
mpi-cmp.o \
+ mpi-sub-ui.o \
mpih-cmp.o \
mpih-div.o \
mpih-mul.o \
diff --git a/lib/mpi/mpi-sub-ui.c b/lib/mpi/mpi-sub-ui.c
new file mode 100644
index 000000000000..b41b082b5f3e
--- /dev/null
+++ b/lib/mpi/mpi-sub-ui.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI.
+ *
+ * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015
+ * Free Software Foundation, Inc.
+ *
+ * This file was based on the GNU MP Library source file:
+ * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h
+ *
+ * The GNU MP Library is free software; you can redistribute it and/or modify
+ * it under the terms of either:
+ *
+ * * the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your
+ * option) any later version.
+ *
+ * or
+ *
+ * * the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * or both in parallel, as here.
+ *
+ * The GNU MP Library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received copies of the GNU General Public License and the
+ * GNU Lesser General Public License along with the GNU MP Library. If not,
+ * see https://www.gnu.org/licenses/.
+ */
+
+#include "mpi-internal.h"
+
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval)
+{
+ if (u->nlimbs == 0) {
+ if (mpi_resize(w, 1) < 0)
+ return -ENOMEM;
+ w->d[0] = vval;
+ w->nlimbs = (vval != 0);
+ w->sign = (vval != 0);
+ return 0;
+ }
+
+ /* If not space for W (and possible carry), increase space. */
+ if (mpi_resize(w, u->nlimbs + 1))
+ return -ENOMEM;
+
+ if (u->sign) {
+ mpi_limb_t cy;
+
+ cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ w->d[u->nlimbs] = cy;
+ w->nlimbs = u->nlimbs + cy;
+ w->sign = 1;
+ } else {
+ /* The signs are different. Need exact comparison to determine
+ * which operand to subtract from which.
+ */
+ if (u->nlimbs == 1 && u->d[0] < vval) {
+ w->d[0] = vval - u->d[0];
+ w->nlimbs = 1;
+ w->sign = 1;
+ } else {
+ mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ /* Size can decrease with at most one limb. */
+ w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0));
+ w->sign = 0;
+ }
+ }
+
+ mpi_normalize(w);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_sub_ui);
diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c
index 3d980713a9e2..82bd2b54d741 100644
--- a/net/mptcp/crypto.c
+++ b/net/mptcp/crypto.c
@@ -32,11 +32,8 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn)
{
__be32 mptcp_hashed_key[SHA256_DIGEST_WORDS];
__be64 input = cpu_to_be64(key);
- struct sha256_state state;
- sha256_init(&state);
- sha256_update(&state, (__force u8 *)&input, sizeof(input));
- sha256_final(&state, (u8 *)mptcp_hashed_key);
+ sha256((__force u8 *)&input, sizeof(input), (u8 *)mptcp_hashed_key);
if (token)
*token = be32_to_cpu(mptcp_hashed_key[0]);
@@ -47,7 +44,6 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn)
void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
{
u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE];
- struct sha256_state state;
u8 key1be[8];
u8 key2be[8];
int i;
@@ -67,13 +63,10 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
memcpy(&input[SHA256_BLOCK_SIZE], msg, len);
- sha256_init(&state);
- sha256_update(&state, input, SHA256_BLOCK_SIZE + len);
-
/* emit sha256(K1 || msg) on the second input block, so we can
* reuse 'input' for the last hashing
*/
- sha256_final(&state, &input[SHA256_BLOCK_SIZE]);
+ sha256(input, SHA256_BLOCK_SIZE + len, &input[SHA256_BLOCK_SIZE]);
/* Prepare second part of hmac */
memset(input, 0x5C, SHA256_BLOCK_SIZE);
@@ -82,9 +75,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
for (i = 0; i < 8; i++)
input[i + 8] ^= key2be[i];
- sha256_init(&state);
- sha256_update(&state, input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE);
- sha256_final(&state, (u8 *)hmac);
+ sha256(input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE, hmac);
}
#ifdef CONFIG_MPTCP_HMAC_TEST
diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c
index 8d45c628e988..ab009c7dfdf4 100644
--- a/sound/soc/codecs/cros_ec_codec.c
+++ b/sound/soc/codecs/cros_ec_codec.c
@@ -103,28 +103,6 @@ error:
return ret;
}
-static int calculate_sha256(struct cros_ec_codec_priv *priv,
- uint8_t *buf, uint32_t size, uint8_t *digest)
-{
- struct sha256_state sctx;
-
- sha256_init(&sctx);
- sha256_update(&sctx, buf, size);
- sha256_final(&sctx, digest);
-
-#ifdef DEBUG
- {
- char digest_str[65];
-
- bin2hex(digest_str, digest, 32);
- digest_str[64] = 0;
- dev_dbg(priv->dev, "hash=%s\n", digest_str);
- }
-#endif
-
- return 0;
-}
-
static int dmic_get_gain(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -782,9 +760,8 @@ static int wov_hotword_model_put(struct snd_kcontrol *kcontrol,
if (IS_ERR(buf))
return PTR_ERR(buf);
- ret = calculate_sha256(priv, buf, size, digest);
- if (ret)
- goto leave;
+ sha256(buf, size, digest);
+ dev_dbg(priv->dev, "hash=%*phN\n", SHA256_DIGEST_SIZE, digest);
p.cmd = EC_CODEC_WOV_GET_LANG;
ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,