summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-26 13:00:59 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-26 13:00:59 -0700
commit44a6b8442190cf213081060b610dae2e822f802b (patch)
tree2280bfe385bef8b6416a6493ea8988a975008165 /drivers/crypto
parent945c40c6b007eb4b07374a38ea37b2a34da306b1 (diff)
parenta43478863b16cb0986fd2ec9d1f1b9ebaaec5922 (diff)
downloadlinux-44a6b8442190cf213081060b610dae2e822f802b.tar.gz
linux-44a6b8442190cf213081060b610dae2e822f802b.tar.bz2
linux-44a6b8442190cf213081060b610dae2e822f802b.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: - Fixed algorithm construction hang when self-test fails. - Added SHA variants to talitos AEAD list. - New driver for Exynos random number generator. - Performance enhancements for arc4. - Added hwrng support to caam. - Added ahash support to caam. - Fixed bad kfree in aesni-intel. - Allow aesni-intel in FIPS mode. - Added atmel driver with support for AES/3DES/SHA. - Bug fixes for mv_cesa. - CRC hardware driver for BF60x family processors. * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits) crypto: twofish-avx - remove useless instruction crypto: testmgr - add aead cbc aes hmac sha1,256,512 test vectors crypto: talitos - add sha224, sha384 and sha512 to existing AEAD algorithms crypto: talitos - export the talitos_submit function crypto: talitos - move talitos structures to header file crypto: atmel - add new tests to tcrypt crypto: atmel - add Atmel SHA1/SHA256 driver crypto: atmel - add Atmel DES/TDES driver crypto: atmel - add Atmel AES driver ARM: AT91SAM9G45: add crypto peripherals crypto: testmgr - allow aesni-intel and ghash_clmulni-intel in fips mode hwrng: exynos - Add support for Exynos random number generator crypto: aesni-intel - fix wrong kfree pointer crypto: caam - ERA retrieval and printing for SEC device crypto: caam - Using alloc_coherent for caam job rings crypto: algapi - Fix hang on crypto allocation crypto: arc4 - now arc needs blockcipher support crypto: caam - one tasklet per job ring crypto: caam - consolidate memory barriers from job ring en/dequeue crypto: caam - only query h/w in job ring dequeue path ...
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig56
-rw-r--r--drivers/crypto/Makefile7
-rw-r--r--drivers/crypto/atmel-aes-regs.h62
-rw-r--r--drivers/crypto/atmel-aes.c1206
-rw-r--r--drivers/crypto/atmel-sha-regs.h46
-rw-r--r--drivers/crypto/atmel-sha.c1112
-rw-r--r--drivers/crypto/atmel-tdes-regs.h89
-rw-r--r--drivers/crypto/atmel-tdes.c1215
-rw-r--r--drivers/crypto/bfin_crc.c780
-rw-r--r--drivers/crypto/caam/Kconfig30
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamalg.c572
-rw-r--r--drivers/crypto/caam/caamhash.c1878
-rw-r--r--drivers/crypto/caam/caamrng.c309
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c179
-rw-r--r--drivers/crypto/caam/ctrl.h13
-rw-r--r--drivers/crypto/caam/desc.h31
-rw-r--r--drivers/crypto/caam/desc_constr.h57
-rw-r--r--drivers/crypto/caam/error.c44
-rw-r--r--drivers/crypto/caam/intern.h6
-rw-r--r--drivers/crypto/caam/jr.c115
-rw-r--r--drivers/crypto/caam/key_gen.c122
-rw-r--r--drivers/crypto/caam/key_gen.h17
-rw-r--r--drivers/crypto/caam/pdb.h401
-rw-r--r--drivers/crypto/caam/regs.h38
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h156
-rw-r--r--drivers/crypto/mv_cesa.c61
-rw-r--r--drivers/crypto/talitos.c283
-rw-r--r--drivers/crypto/talitos.h123
30 files changed, 8382 insertions, 632 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1092a770482e..7d74d092aa8f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -298,7 +298,7 @@ config CRYPTO_DEV_TEGRA_AES
will be called tegra-aes.
config CRYPTO_DEV_NX
- tristate "Support for Power7+ in-Nest cryptographic accleration"
+ tristate "Support for Power7+ in-Nest cryptographic acceleration"
depends on PPC64 && IBMVIO
select CRYPTO_AES
select CRYPTO_CBC
@@ -325,4 +325,58 @@ if CRYPTO_DEV_UX500
source "drivers/crypto/ux500/Kconfig"
endif # if CRYPTO_DEV_UX500
+config CRYPTO_DEV_BFIN_CRC
+ tristate "Support for Blackfin CRC hardware"
+ depends on BF60x
+ help
+ Newer Blackfin processors have CRC hardware. Select this if you
+ want to use the Blackfin CRC module.
+
+config CRYPTO_DEV_ATMEL_AES
+ tristate "Support for Atmel AES hw accelerator"
+ depends on ARCH_AT91
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_AES
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ select CONFIG_AT_HDMAC
+ help
+ Some Atmel processors have AES hw accelerator.
+ Select this if you want to use the Atmel module for
+ AES algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-aes.
+
+config CRYPTO_DEV_ATMEL_TDES
+ tristate "Support for Atmel DES/TDES hw accelerator"
+ depends on ARCH_AT91
+ select CRYPTO_DES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ help
+ Some Atmel processors have DES/TDES hw accelerator.
+ Select this if you want to use the Atmel module for
+ DES/TDES algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-tdes.
+
+config CRYPTO_DEV_ATMEL_SHA
+ tristate "Support for Atmel SHA1/SHA256 hw accelerator"
+ depends on ARCH_AT91
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_ALGAPI
+ help
+ Some Atmel processors have SHA1/SHA256 hw accelerator.
+ Select this if you want to use the Atmel module for
+ SHA1/SHA256 algorithms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-sha.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 01390325d72d..880a47b0b023 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -14,4 +14,9 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
-obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ \ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
+obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
new file mode 100644
index 000000000000..2786bb1a5aa0
--- /dev/null
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -0,0 +1,62 @@
+#ifndef __ATMEL_AES_REGS_H__
+#define __ATMEL_AES_REGS_H__
+
+#define AES_CR 0x00
+#define AES_CR_START (1 << 0)
+#define AES_CR_SWRST (1 << 8)
+#define AES_CR_LOADSEED (1 << 16)
+
+#define AES_MR 0x04
+#define AES_MR_CYPHER_DEC (0 << 0)
+#define AES_MR_CYPHER_ENC (1 << 0)
+#define AES_MR_DUALBUFF (1 << 3)
+#define AES_MR_PROCDLY_MASK (0xF << 4)
+#define AES_MR_PROCDLY_OFFSET 4
+#define AES_MR_SMOD_MASK (0x3 << 8)
+#define AES_MR_SMOD_MANUAL (0x0 << 8)
+#define AES_MR_SMOD_AUTO (0x1 << 8)
+#define AES_MR_SMOD_IDATAR0 (0x2 << 8)
+#define AES_MR_KEYSIZE_MASK (0x3 << 10)
+#define AES_MR_KEYSIZE_128 (0x0 << 10)
+#define AES_MR_KEYSIZE_192 (0x1 << 10)
+#define AES_MR_KEYSIZE_256 (0x2 << 10)
+#define AES_MR_OPMOD_MASK (0x7 << 12)
+#define AES_MR_OPMOD_ECB (0x0 << 12)
+#define AES_MR_OPMOD_CBC (0x1 << 12)
+#define AES_MR_OPMOD_OFB (0x2 << 12)
+#define AES_MR_OPMOD_CFB (0x3 << 12)
+#define AES_MR_OPMOD_CTR (0x4 << 12)
+#define AES_MR_LOD (0x1 << 15)
+#define AES_MR_CFBS_MASK (0x7 << 16)
+#define AES_MR_CFBS_128b (0x0 << 16)
+#define AES_MR_CFBS_64b (0x1 << 16)
+#define AES_MR_CFBS_32b (0x2 << 16)
+#define AES_MR_CFBS_16b (0x3 << 16)
+#define AES_MR_CFBS_8b (0x4 << 16)
+#define AES_MR_CKEY_MASK (0xF << 20)
+#define AES_MR_CKEY_OFFSET 20
+#define AES_MR_CMTYP_MASK (0x1F << 24)
+#define AES_MR_CMTYP_OFFSET 24
+
+#define AES_IER 0x10
+#define AES_IDR 0x14
+#define AES_IMR 0x18
+#define AES_ISR 0x1C
+#define AES_INT_DATARDY (1 << 0)
+#define AES_INT_URAD (1 << 8)
+#define AES_ISR_URAT_MASK (0xF << 12)
+#define AES_ISR_URAT_IDR_WR_PROC (0x0 << 12)
+#define AES_ISR_URAT_ODR_RD_PROC (0x1 << 12)
+#define AES_ISR_URAT_MR_WR_PROC (0x2 << 12)
+#define AES_ISR_URAT_ODR_RD_SUBK (0x3 << 12)
+#define AES_ISR_URAT_MR_WR_SUBK (0x4 << 12)
+#define AES_ISR_URAT_WOR_RD (0x5 << 12)
+
+#define AES_KEYWR(x) (0x20 + ((x) * 0x04))
+#define AES_IDATAR(x) (0x40 + ((x) * 0x04))
+#define AES_ODATAR(x) (0x50 + ((x) * 0x04))
+#define AES_IVR(x) (0x60 + ((x) * 0x04))
+
+#define AES_HW_VERSION 0xFC
+
+#endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
new file mode 100644
index 000000000000..6bb20fffbf49
--- /dev/null
+++ b/drivers/crypto/atmel-aes.c
@@ -0,0 +1,1206 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL AES HW acceleration.
+ *
+ * Copyright (c) 2012 Eukréa Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-aes.c driver.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/platform_data/atmel-aes.h>
+#include "atmel-aes-regs.h"
+
+#define CFB8_BLOCK_SIZE 1
+#define CFB16_BLOCK_SIZE 2
+#define CFB32_BLOCK_SIZE 4
+#define CFB64_BLOCK_SIZE 8
+
+/* AES flags */
+#define AES_FLAGS_MODE_MASK 0x01ff
+#define AES_FLAGS_ENCRYPT BIT(0)
+#define AES_FLAGS_CBC BIT(1)
+#define AES_FLAGS_CFB BIT(2)
+#define AES_FLAGS_CFB8 BIT(3)
+#define AES_FLAGS_CFB16 BIT(4)
+#define AES_FLAGS_CFB32 BIT(5)
+#define AES_FLAGS_CFB64 BIT(6)
+#define AES_FLAGS_OFB BIT(7)
+#define AES_FLAGS_CTR BIT(8)
+
+#define AES_FLAGS_INIT BIT(16)
+#define AES_FLAGS_DMA BIT(17)
+#define AES_FLAGS_BUSY BIT(18)
+
+#define AES_FLAGS_DUALBUFF BIT(24)
+
+#define ATMEL_AES_QUEUE_LENGTH 1
+#define ATMEL_AES_CACHE_SIZE 0
+
+#define ATMEL_AES_DMA_THRESHOLD 16
+
+
+struct atmel_aes_dev;
+
+struct atmel_aes_ctx {
+ struct atmel_aes_dev *dd;
+
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
+struct atmel_aes_reqctx {
+ unsigned long mode;
+};
+
+struct atmel_aes_dma {
+ struct dma_chan *chan;
+ struct dma_slave_config dma_conf;
+};
+
+struct atmel_aes_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ void __iomem *io_base;
+
+ struct atmel_aes_ctx *ctx;
+ struct device *dev;
+ struct clk *iclk;
+ int irq;
+
+ unsigned long flags;
+ int err;
+
+ spinlock_t lock;
+ struct crypto_queue queue;
+
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ struct ablkcipher_request *req;
+ size_t total;
+
+ struct scatterlist *in_sg;
+ unsigned int nb_in_sg;
+
+ struct scatterlist *out_sg;
+ unsigned int nb_out_sg;
+
+ size_t bufcnt;
+
+ u8 buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
+ int dma_in;
+ struct atmel_aes_dma dma_lch_in;
+
+ u8 buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
+ int dma_out;
+ struct atmel_aes_dma dma_lch_out;
+
+ u32 hw_version;
+};
+
+struct atmel_aes_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+static struct atmel_aes_drv atmel_aes = {
+ .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
+};
+
+static int atmel_aes_sg_length(struct ablkcipher_request *req,
+ struct scatterlist *sg)
+{
+ unsigned int total = req->nbytes;
+ int sg_nb;
+ unsigned int len;
+ struct scatterlist *sg_list;
+
+ sg_nb = 0;
+ sg_list = sg;
+ total = req->nbytes;
+
+ while (total) {
+ len = min(sg_list->length, total);
+
+ sg_nb++;
+ total -= len;
+
+ sg_list = sg_next(sg_list);
+ if (!sg_list)
+ total = 0;
+ }
+
+ return sg_nb;
+}
+
+static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
+{
+ return readl_relaxed(dd->io_base + offset);
+}
+
+static inline void atmel_aes_write(struct atmel_aes_dev *dd,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
+ u32 *value, int count)
+{
+ for (; count--; value++, offset += 4)
+ *value = atmel_aes_read(dd, offset);
+}
+
+static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
+ u32 *value, int count)
+{
+ for (; count--; value++, offset += 4)
+ atmel_aes_write(dd, offset, *value);
+}
+
+static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
+{
+ atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
+
+ if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
+ dd->flags |= AES_FLAGS_DUALBUFF;
+}
+
+static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
+{
+ struct atmel_aes_dev *aes_dd = NULL;
+ struct atmel_aes_dev *tmp;
+
+ spin_lock_bh(&atmel_aes.lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
+ aes_dd = tmp;
+ break;
+ }
+ ctx->dd = aes_dd;
+ } else {
+ aes_dd = ctx->dd;
+ }
+
+ spin_unlock_bh(&atmel_aes.lock);
+
+ return aes_dd;
+}
+
+static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
+{
+ clk_prepare_enable(dd->iclk);
+
+ if (!(dd->flags & AES_FLAGS_INIT)) {
+ atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+ atmel_aes_dualbuff_test(dd);
+ dd->flags |= AES_FLAGS_INIT;
+ dd->err = 0;
+ }
+
+ return 0;
+}
+
+static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
+{
+ atmel_aes_hw_init(dd);
+
+ dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
+
+ clk_disable_unprepare(dd->iclk);
+}
+
+static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
+{
+ struct ablkcipher_request *req = dd->req;
+
+ clk_disable_unprepare(dd->iclk);
+ dd->flags &= ~AES_FLAGS_BUSY;
+
+ req->base.complete(&req->base, err);
+}
+
+static void atmel_aes_dma_callback(void *data)
+{
+ struct atmel_aes_dev *dd = data;
+
+ /* dma_lch_out - completed */
+ tasklet_schedule(&dd->done_task);
+}
+
+static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
+{
+ struct dma_async_tx_descriptor *in_desc, *out_desc;
+ int nb_dma_sg_in, nb_dma_sg_out;
+
+ dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
+ if (!dd->nb_in_sg)
+ goto exit_err;
+
+ nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
+ DMA_TO_DEVICE);
+ if (!nb_dma_sg_in)
+ goto exit_err;
+
+ in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
+ nb_dma_sg_in, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!in_desc)
+ goto unmap_in;
+
+ /* callback not needed */
+
+ dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
+ if (!dd->nb_out_sg)
+ goto unmap_in;
+
+ nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
+ DMA_FROM_DEVICE);
+ if (!nb_dma_sg_out)
+ goto unmap_out;
+
+ out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
+ nb_dma_sg_out, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!out_desc)
+ goto unmap_out;
+
+ out_desc->callback = atmel_aes_dma_callback;
+ out_desc->callback_param = dd;
+
+ dd->total -= dd->req->nbytes;
+
+ dmaengine_submit(out_desc);
+ dma_async_issue_pending(dd->dma_lch_out.chan);
+
+ dmaengine_submit(in_desc);
+ dma_async_issue_pending(dd->dma_lch_in.chan);
+
+ return 0;
+
+unmap_out:
+ dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
+ DMA_FROM_DEVICE);
+unmap_in:
+ dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
+ DMA_TO_DEVICE);
+exit_err:
+ return -EINVAL;
+}
+
+static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
+{
+ dd->flags &= ~AES_FLAGS_DMA;
+
+ /* use cache buffers */
+ dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
+ if (!dd->nb_in_sg)
+ return -EINVAL;
+
+ dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
+ if (!dd->nb_in_sg)
+ return -EINVAL;
+
+ dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
+ dd->buf_in, dd->total);
+
+ if (!dd->bufcnt)
+ return -EINVAL;
+
+ dd->total -= dd->bufcnt;
+
+ atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
+ atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
+ dd->bufcnt >> 2);
+
+ return 0;
+}
+
+static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
+{
+ int err;
+
+ if (dd->flags & AES_FLAGS_CFB8) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (dd->flags & AES_FLAGS_CFB16) {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ dd->dma_lch_in.dma_conf.dst_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dd->dma_lch_out.dma_conf.src_addr_width =
+ DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
+
+ dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
+ dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
+
+ dd->flags |= AES_FLAGS_DMA;
+ err = atmel_aes_crypt_dma(dd);
+
+ return err;
+}
+
+static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
+{
+ int err;
+ u32 valcr = 0, valmr = 0;
+
+ err = atmel_aes_hw_init(dd);
+
+ if (err)
+ return err;
+
+ /* MR register must be set before IV registers */
+ if (dd->ctx->keylen == AES_KEYSIZE_128)
+ valmr |= AES_MR_KEYSIZE_128;
+ else if (dd->ctx->keylen == AES_KEYSIZE_192)
+ valmr |= AES_MR_KEYSIZE_192;
+ else
+ valmr |= AES_MR_KEYSIZE_256;
+
+ if (dd->flags & AES_FLAGS_CBC) {
+ valmr |= AES_MR_OPMOD_CBC;
+ } else if (dd->flags & AES_FLAGS_CFB) {
+ valmr |= AES_MR_OPMOD_CFB;
+ if (dd->flags & AES_FLAGS_CFB8)
+ valmr |= AES_MR_CFBS_8b;
+ else if (dd->flags & AES_FLAGS_CFB16)
+ valmr |= AES_MR_CFBS_16b;
+ else if (dd->flags & AES_FLAGS_CFB32)
+ valmr |= AES_MR_CFBS_32b;
+ else if (dd->flags & AES_FLAGS_CFB64)
+ valmr |= AES_MR_CFBS_64b;
+ } else if (dd->flags & AES_FLAGS_OFB) {
+ valmr |= AES_MR_OPMOD_OFB;
+ } else if (dd->flags & AES_FLAGS_CTR) {
+ valmr |= AES_MR_OPMOD_CTR;
+ } else {
+ valmr |= AES_MR_OPMOD_ECB;
+ }
+
+ if (dd->flags & AES_FLAGS_ENCRYPT)
+ valmr |= AES_MR_CYPHER_ENC;
+
+ if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
+ valmr |= AES_MR_SMOD_IDATAR0;
+ if (dd->flags & AES_FLAGS_DUALBUFF)
+ valmr |= AES_MR_DUALBUFF;
+ } else {
+ valmr |= AES_MR_SMOD_AUTO;
+ }
+
+ atmel_aes_write(dd, AES_CR, valcr);
+ atmel_aes_write(dd, AES_MR, valmr);
+
+ atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
+ dd->ctx->keylen >> 2);
+
+ if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
+ (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
+ dd->req->info) {
+ atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
+ }
+
+ return 0;
+}
+
+static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
+ struct ablkcipher_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct atmel_aes_ctx *ctx;
+ struct atmel_aes_reqctx *rctx;
+ unsigned long flags;
+ int err, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ablkcipher_enqueue_request(&dd->queue, req);
+ if (dd->flags & AES_FLAGS_BUSY) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (async_req)
+ dd->flags |= AES_FLAGS_BUSY;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ /* assign new request to device */
+ dd->req = req;
+ dd->total = req->nbytes;
+ dd->in_sg = req->src;
+ dd->out_sg = req->dst;
+
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= AES_FLAGS_MODE_MASK;
+ dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
+ dd->ctx = ctx;
+ ctx->dd = dd;
+
+ err = atmel_aes_write_ctrl(dd);
+ if (!err) {
+ if (dd->total > ATMEL_AES_DMA_THRESHOLD)
+ err = atmel_aes_crypt_dma_start(dd);
+ else
+ err = atmel_aes_crypt_cpu_start(dd);
+ }
+ if (err) {
+ /* aes_task will not finish it, so do it here */
+ atmel_aes_finish_req(dd, err);
+ tasklet_schedule(&dd->queue_task);
+ }
+
+ return ret;
+}
+
+static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
+{
+ int err = -EINVAL;
+
+ if (dd->flags & AES_FLAGS_DMA) {
+ dma_unmap_sg(dd->dev, dd->out_sg,
+ dd->nb_out_sg, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg,
+ dd->nb_in_sg, DMA_TO_DEVICE);
+ err = 0;
+ }
+
+ return err;
+}
+
+static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct atmel_aes_dev *dd;
+
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of AES blocks\n");
+ return -EINVAL;
+ }
+
+ dd = atmel_aes_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return atmel_aes_handle_queue(dd, req);
+}
+
+static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl && sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
+{
+ int err = -ENOMEM;
+ struct aes_platform_data *pdata;
+ dma_cap_mask_t mask_in, mask_out;
+
+ pdata = dd->dev->platform_data;
+
+ if (pdata && pdata->dma_slave->txdata.dma_dev &&
+ pdata->dma_slave->rxdata.dma_dev) {
+
+ /* Try to grab 2 DMA channels */
+ dma_cap_zero(mask_in);
+ dma_cap_set(DMA_SLAVE, mask_in);
+
+ dd->dma_lch_in.chan = dma_request_channel(mask_in,
+ atmel_aes_filter, &pdata->dma_slave->rxdata);
+ if (!dd->dma_lch_in.chan)
+ goto err_dma_in;
+
+ dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
+ dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
+ AES_IDATAR(0);
+ dd->dma_lch_in.dma_conf.src_maxburst = 1;
+ dd->dma_lch_in.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_in.dma_conf.device_fc = false;
+
+ dma_cap_zero(mask_out);
+ dma_cap_set(DMA_SLAVE, mask_out);
+ dd->dma_lch_out.chan = dma_request_channel(mask_out,
+ atmel_aes_filter, &pdata->dma_slave->txdata);
+ if (!dd->dma_lch_out.chan)
+ goto err_dma_out;
+
+ dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
+ dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
+ AES_ODATAR(0);
+ dd->dma_lch_out.dma_conf.src_maxburst = 1;
+ dd->dma_lch_out.dma_conf.dst_maxburst = 1;
+ dd->dma_lch_out.dma_conf.device_fc = false;
+
+ return 0;
+ } else {
+ return -ENODEV;
+ }
+
+err_dma_out:
+ dma_release_channel(dd->dma_lch_in.chan);
+err_dma_in:
+ return err;
+}
+
+static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
+{
+ dma_release_channel(dd->dma_lch_in.chan);
+ dma_release_channel(dd->dma_lch_out.chan);
+}
+
+static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ 0);
+}
+
+static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
+}
+
+static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_CBC);
+}
+
+static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
+}
+
+static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_OFB);
+}
+
+static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
+}
+
+static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_CFB);
+}
+
+static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
+}
+
+static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_CFB | AES_FLAGS_CFB64);
+}
+
+static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
+}
+
+static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_CFB | AES_FLAGS_CFB32);
+}
+
+static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
+}
+
+static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_CFB | AES_FLAGS_CFB16);
+}
+
+static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB8);
+}
+
+static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_CFB | AES_FLAGS_CFB8);
+}
+
+static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
+}
+
+static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_aes_crypt(req,
+ AES_FLAGS_CTR);
+}
+
+static int atmel_aes_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+
+ return 0;
+}
+
+static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg aes_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "atmel-ecb-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ecb_encrypt,
+ .decrypt = atmel_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "atmel-cbc-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cbc_encrypt,
+ .decrypt = atmel_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "atmel-ofb-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ofb_encrypt,
+ .decrypt = atmel_aes_ofb_decrypt,
+ }
+},
+{
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "atmel-cfb-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb_encrypt,
+ .decrypt = atmel_aes_cfb_decrypt,
+ }
+},
+{
+ .cra_name = "cfb32(aes)",
+ .cra_driver_name = "atmel-cfb32-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB32_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb32_encrypt,
+ .decrypt = atmel_aes_cfb32_decrypt,
+ }
+},
+{
+ .cra_name = "cfb16(aes)",
+ .cra_driver_name = "atmel-cfb16-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB16_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb16_encrypt,
+ .decrypt = atmel_aes_cfb16_decrypt,
+ }
+},
+{
+ .cra_name = "cfb8(aes)",
+ .cra_driver_name = "atmel-cfb8-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB64_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb8_encrypt,
+ .decrypt = atmel_aes_cfb8_decrypt,
+ }
+},
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "atmel-ctr-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_ctr_encrypt,
+ .decrypt = atmel_aes_ctr_decrypt,
+ }
+},
+};
+
+static struct crypto_alg aes_cfb64_alg[] = {
+{
+ .cra_name = "cfb64(aes)",
+ .cra_driver_name = "atmel-cfb64-aes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB64_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_aes_ctx),
+ .cra_alignmask = 0x0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_aes_cra_init,
+ .cra_exit = atmel_aes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = atmel_aes_setkey,
+ .encrypt = atmel_aes_cfb64_encrypt,
+ .decrypt = atmel_aes_cfb64_decrypt,
+ }
+},
+};
+
+static void atmel_aes_queue_task(unsigned long data)
+{
+ struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
+
+ atmel_aes_handle_queue(dd, NULL);
+}
+
+static void atmel_aes_done_task(unsigned long data)
+{
+ struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
+ int err;
+
+ if (!(dd->flags & AES_FLAGS_DMA)) {
+ atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
+ dd->bufcnt >> 2);
+
+ if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
+ dd->buf_out, dd->bufcnt))
+ err = 0;
+ else
+ err = -EINVAL;
+
+ goto cpu_end;
+ }
+
+ err = atmel_aes_crypt_dma_stop(dd);
+
+ err = dd->err ? : err;
+
+ if (dd->total && !err) {
+ err = atmel_aes_crypt_dma_start(dd);
+ if (!err)
+ return; /* DMA started. Not fininishing. */
+ }
+
+cpu_end:
+ atmel_aes_finish_req(dd, err);
+ atmel_aes_handle_queue(dd, NULL);
+}
+
+static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
+{
+ struct atmel_aes_dev *aes_dd = dev_id;
+ u32 reg;
+
+ reg = atmel_aes_read(aes_dd, AES_ISR);
+ if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
+ atmel_aes_write(aes_dd, AES_IDR, reg);
+ if (AES_FLAGS_BUSY & aes_dd->flags)
+ tasklet_schedule(&aes_dd->done_task);
+ else
+ dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
+ crypto_unregister_alg(&aes_algs[i]);
+ if (dd->hw_version >= 0x130)
+ crypto_unregister_alg(&aes_cfb64_alg[0]);
+}
+
+static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
+{
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ INIT_LIST_HEAD(&aes_algs[i].cra_list);
+ err = crypto_register_alg(&aes_algs[i]);
+ if (err)
+ goto err_aes_algs;
+ }
+
+ atmel_aes_hw_version_init(dd);
+
+ if (dd->hw_version >= 0x130) {
+ INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list);
+ err = crypto_register_alg(&aes_cfb64_alg[0]);
+ if (err)
+ goto err_aes_cfb64_alg;
+ }
+
+ return 0;
+
+err_aes_cfb64_alg:
+ i = ARRAY_SIZE(aes_algs);
+err_aes_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&aes_algs[j]);
+
+ return err;
+}
+
+static int __devinit atmel_aes_probe(struct platform_device *pdev)
+{
+ struct atmel_aes_dev *aes_dd;
+ struct aes_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct resource *aes_res;
+ unsigned long aes_phys_size;
+ int err;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ err = -ENXIO;
+ goto aes_dd_err;
+ }
+
+ aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
+ if (aes_dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ err = -ENOMEM;
+ goto aes_dd_err;
+ }
+
+ aes_dd->dev = dev;
+
+ platform_set_drvdata(pdev, aes_dd);
+
+ INIT_LIST_HEAD(&aes_dd->list);
+
+ tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
+ (unsigned long)aes_dd);
+ tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
+ (unsigned long)aes_dd);
+
+ crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
+
+ aes_dd->irq = -1;
+
+ /* Get the base address */
+ aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!aes_res) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ aes_dd->phys_base = aes_res->start;
+ aes_phys_size = resource_size(aes_res);
+
+ /* Get the IRQ */
+ aes_dd->irq = platform_get_irq(pdev, 0);
+ if (aes_dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = aes_dd->irq;
+ goto aes_irq_err;
+ }
+
+ err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
+ aes_dd);
+ if (err) {
+ dev_err(dev, "unable to request aes irq.\n");
+ goto aes_irq_err;
+ }
+
+ /* Initializing the clock */
+ aes_dd->iclk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(aes_dd->iclk)) {
+ dev_err(dev, "clock intialization failed.\n");
+ err = PTR_ERR(aes_dd->iclk);
+ goto clk_err;
+ }
+
+ aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
+ if (!aes_dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto aes_io_err;
+ }
+
+ err = atmel_aes_dma_init(aes_dd);
+ if (err)
+ goto err_aes_dma;
+
+ spin_lock(&atmel_aes.lock);
+ list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
+ spin_unlock(&atmel_aes.lock);
+
+ err = atmel_aes_register_algs(aes_dd);
+ if (err)
+ goto err_algs;
+
+ dev_info(dev, "Atmel AES\n");
+
+ return 0;
+
+err_algs:
+ spin_lock(&atmel_aes.lock);
+ list_del(&aes_dd->list);
+ spin_unlock(&atmel_aes.lock);
+ atmel_aes_dma_cleanup(aes_dd);
+err_aes_dma:
+ iounmap(aes_dd->io_base);
+aes_io_err:
+ clk_put(aes_dd->iclk);
+clk_err:
+ free_irq(aes_dd->irq, aes_dd);
+aes_irq_err:
+res_err:
+ tasklet_kill(&aes_dd->done_task);
+ tasklet_kill(&aes_dd->queue_task);
+ kfree(aes_dd);
+ aes_dd = NULL;
+aes_dd_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int __devexit atmel_aes_remove(struct platform_device *pdev)
+{
+ static struct atmel_aes_dev *aes_dd;
+
+ aes_dd = platform_get_drvdata(pdev);
+ if (!aes_dd)
+ return -ENODEV;
+ spin_lock(&atmel_aes.lock);
+ list_del(&aes_dd->list);
+ spin_unlock(&atmel_aes.lock);
+
+ atmel_aes_unregister_algs(aes_dd);
+
+ tasklet_kill(&aes_dd->done_task);
+ tasklet_kill(&aes_dd->queue_task);
+
+ atmel_aes_dma_cleanup(aes_dd);
+
+ iounmap(aes_dd->io_base);
+
+ clk_put(aes_dd->iclk);
+
+ if (aes_dd->irq > 0)
+ free_irq(aes_dd->irq, aes_dd);
+
+ kfree(aes_dd);
+ aes_dd = NULL;
+
+ return 0;
+}
+
+static struct platform_driver atmel_aes_driver = {
+ .probe = atmel_aes_probe,
+ .remove = __devexit_p(atmel_aes_remove),
+ .driver = {
+ .name = "atmel_aes",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(atmel_aes_driver);
+
+MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
new file mode 100644
index 000000000000..dc53a20d7da1
--- /dev/null
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -0,0 +1,46 @@
+#ifndef __ATMEL_SHA_REGS_H__
+#define __ATMEL_SHA_REGS_H__
+
+#define SHA_REG_DIGEST(x) (0x80 + ((x) * 0x04))
+#define SHA_REG_DIN(x) (0x40 + ((x) * 0x04))
+
+#define SHA_CR 0x00
+#define SHA_CR_START (1 << 0)
+#define SHA_CR_FIRST (1 << 4)
+#define SHA_CR_SWRST (1 << 8)
+
+#define SHA_MR 0x04
+#define SHA_MR_MODE_MASK (0x3 << 0)
+#define SHA_MR_MODE_MANUAL 0x0
+#define SHA_MR_MODE_AUTO 0x1
+#define SHA_MR_MODE_PDC 0x2
+#define SHA_MR_DUALBUFF (1 << 3)
+#define SHA_MR_PROCDLY (1 << 4)
+#define SHA_MR_ALGO_SHA1 (0 << 8)
+#define SHA_MR_ALGO_SHA256 (1 << 8)
+
+#define SHA_IER 0x10
+#define SHA_IDR 0x14
+#define SHA_IMR 0x18
+#define SHA_ISR 0x1C
+#define SHA_INT_DATARDY (1 << 0)
+#define SHA_INT_ENDTX (1 << 1)
+#define SHA_INT_TXBUFE (1 << 2)
+#define SHA_INT_URAD (1 << 8)
+#define SHA_ISR_URAT_MASK (0x7 << 12)
+#define SHA_ISR_URAT_IDR (0x0 << 12)
+#define SHA_ISR_URAT_ODR (0x1 << 12)
+#define SHA_ISR_URAT_MR (0x2 << 12)
+#define SHA_ISR_URAT_WO (0x5 << 12)
+
+#define SHA_TPR 0x108
+#define SHA_TCR 0x10C
+#define SHA_TNPR 0x118
+#define SHA_TNCR 0x11C
+#define SHA_PTCR 0x120
+#define SHA_PTCR_TXTEN (1 << 8)
+#define SHA_PTCR_TXTDIS (1 << 9)
+#define SHA_PTSR 0x124
+#define SHA_PTSR_TXTEN (1 << 8)
+
+#endif /* __ATMEL_SHA_REGS_H__ */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
new file mode 100644
index 000000000000..f938b9d79b66
--- /dev/null
+++ b/drivers/crypto/atmel-sha.c
@@ -0,0 +1,1112 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL SHA1/SHA256 HW acceleration.
+ *
+ * Copyright (c) 2012 Eukréa Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-sham.c drivers.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include "atmel-sha-regs.h"
+
+/* SHA flags */
+#define SHA_FLAGS_BUSY BIT(0)
+#define SHA_FLAGS_FINAL BIT(1)
+#define SHA_FLAGS_DMA_ACTIVE BIT(2)
+#define SHA_FLAGS_OUTPUT_READY BIT(3)
+#define SHA_FLAGS_INIT BIT(4)
+#define SHA_FLAGS_CPU BIT(5)
+#define SHA_FLAGS_DMA_READY BIT(6)
+
+#define SHA_FLAGS_FINUP BIT(16)
+#define SHA_FLAGS_SG BIT(17)
+#define SHA_FLAGS_SHA1 BIT(18)
+#define SHA_FLAGS_SHA256 BIT(19)
+#define SHA_FLAGS_ERROR BIT(20)
+#define SHA_FLAGS_PAD BIT(21)
+
+#define SHA_FLAGS_DUALBUFF BIT(24)
+
+#define SHA_OP_UPDATE 1
+#define SHA_OP_FINAL 2
+
+#define SHA_BUFFER_LEN PAGE_SIZE
+
+#define ATMEL_SHA_DMA_THRESHOLD 56
+
+
+struct atmel_sha_dev;
+
+struct atmel_sha_reqctx {
+ struct atmel_sha_dev *dd;
+ unsigned long flags;
+ unsigned long op;
+
+ u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+ size_t digcnt;
+ size_t bufcnt;
+ size_t buflen;
+ dma_addr_t dma_addr;
+
+ /* walk state */
+ struct scatterlist *sg;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* total request */
+
+ u8 buffer[0] __aligned(sizeof(u32));
+};
+
+struct atmel_sha_ctx {
+ struct atmel_sha_dev *dd;
+
+ unsigned long flags;
+
+ /* fallback stuff */
+ struct crypto_shash *fallback;
+
+};
+
+#define ATMEL_SHA_QUEUE_LENGTH 1
+
+struct atmel_sha_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ struct device *dev;
+ struct clk *iclk;
+ int irq;
+ void __iomem *io_base;
+
+ spinlock_t lock;
+ int err;
+ struct tasklet_struct done_task;
+
+ unsigned long flags;
+ struct crypto_queue queue;
+ struct ahash_request *req;
+};
+
+struct atmel_sha_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+static struct atmel_sha_drv atmel_sha = {
+ .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
+};
+
+static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
+{
+ return readl_relaxed(dd->io_base + offset);
+}
+
+static inline void atmel_sha_write(struct atmel_sha_dev *dd,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd)
+{
+ atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF);
+
+ if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF)
+ dd->flags |= SHA_FLAGS_DUALBUFF;
+}
+
+static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
+{
+ size_t count;
+
+ while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
+ count = min(ctx->sg->length - ctx->offset, ctx->total);
+ count = min(count, ctx->buflen - ctx->bufcnt);
+
+ if (count <= 0)
+ break;
+
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
+ ctx->offset, count, 0);
+
+ ctx->bufcnt += count;
+ ctx->offset += count;
+ ctx->total -= count;
+
+ if (ctx->offset == ctx->sg->length) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ else
+ ctx->total = 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The purpose of this padding is to ensure that the padded message
+ * is a multiple of 512 bits. The bit "1" is appended at the end of
+ * the message followed by "padlen-1" zero bits. Then a 64 bits block
+ * equals to the message length in bits is appended.
+ *
+ * padlen is calculated as followed:
+ * - if message length < 56 bytes then padlen = 56 - message length
+ * - else padlen = 64 + 56 - message length
+ */
+static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
+{
+ unsigned int index, padlen;
+ u64 bits;
+ u64 size;
+
+ bits = (ctx->bufcnt + ctx->digcnt + length) << 3;
+ size = cpu_to_be64(bits);
+
+ index = ctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ *(ctx->buffer + ctx->bufcnt) = 0x80;
+ memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
+ memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8);
+ ctx->bufcnt += padlen + 8;
+ ctx->flags |= SHA_FLAGS_PAD;
+}
+
+static int atmel_sha_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct atmel_sha_dev *dd = NULL;
+ struct atmel_sha_dev *tmp;
+
+ spin_lock_bh(&atmel_sha.lock);
+ if (!tctx->dd) {
+ list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
+ dd = tmp;
+ break;
+ }
+ tctx->dd = dd;
+ } else {
+ dd = tctx->dd;
+ }
+
+ spin_unlock_bh(&atmel_sha.lock);
+
+ ctx->dd = dd;
+
+ ctx->flags = 0;
+
+ dev_dbg(dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
+ ctx->flags |= SHA_FLAGS_SHA1;
+ else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
+ ctx->flags |= SHA_FLAGS_SHA256;
+
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+ ctx->buflen = SHA_BUFFER_LEN;
+
+ return 0;
+}
+
+static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
+
+ if (likely(dma)) {
+ atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
+ valmr = SHA_MR_MODE_PDC;
+ if (dd->flags & SHA_FLAGS_DUALBUFF)
+ valmr = SHA_MR_DUALBUFF;
+ } else {
+ atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
+ }
+
+ if (ctx->flags & SHA_FLAGS_SHA256)
+ valmr |= SHA_MR_ALGO_SHA256;
+
+ /* Setting CR_FIRST only for the first iteration */
+ if (!ctx->digcnt)
+ valcr = SHA_CR_FIRST;
+
+ atmel_sha_write(dd, SHA_CR, valcr);
+ atmel_sha_write(dd, SHA_MR, valmr);
+}
+
+static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
+ size_t length, int final)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ int count, len32;
+ const u32 *buffer = (const u32 *)buf;
+
+ dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length, final);
+
+ atmel_sha_write_ctrl(dd, 0);
+
+ /* should be non-zero before next lines to disable clocks later */
+ ctx->digcnt += length;
+
+ if (final)
+ dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ dd->flags |= SHA_FLAGS_CPU;
+
+ for (count = 0; count < len32; count++)
+ atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
+
+ return -EINPROGRESS;
+}
+
+static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
+ size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ int len32;
+
+ dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
+ ctx->digcnt, length1, final);
+
+ len32 = DIV_ROUND_UP(length1, sizeof(u32));
+ atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
+ atmel_sha_write(dd, SHA_TPR, dma_addr1);
+ atmel_sha_write(dd, SHA_TCR, len32);
+
+ len32 = DIV_ROUND_UP(length2, sizeof(u32));
+ atmel_sha_write(dd, SHA_TNPR, dma_addr2);
+ atmel_sha_write(dd, SHA_TNCR, len32);
+
+ atmel_sha_write_ctrl(dd, 1);
+
+ /* should be non-zero before next lines to disable clocks later */
+ ctx->digcnt += length1;
+
+ if (final)
+ dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
+
+ dd->flags |= SHA_FLAGS_DMA_ACTIVE;
+
+ /* Start DMA transfer */
+ atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
+
+ return -EINPROGRESS;
+}
+
+static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ int bufcnt;
+
+ atmel_sha_append_sg(ctx);
+ atmel_sha_fill_padding(ctx, 0);
+
+ bufcnt = ctx->bufcnt;
+ ctx->bufcnt = 0;
+
+ return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
+}
+
+static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
+ struct atmel_sha_reqctx *ctx,
+ size_t length, int final)
+{
+ ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
+ ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+ dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
+ SHA1_BLOCK_SIZE);
+ return -EINVAL;
+ }
+
+ ctx->flags &= ~SHA_FLAGS_SG;
+
+ /* next call does not fail... so no unmap in the case of error */
+ return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final);
+}
+
+static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int final;
+ size_t count;
+
+ atmel_sha_append_sg(ctx);
+
+ final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+ dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
+ ctx->bufcnt, ctx->digcnt, final);
+
+ if (final)
+ atmel_sha_fill_padding(ctx, 0);
+
+ if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return atmel_sha_xmit_dma_map(dd, ctx, count, final);
+ }
+
+ return 0;
+}
+
+static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+ unsigned int length, final, tail;
+ struct scatterlist *sg;
+ unsigned int count;
+
+ if (!ctx->total)
+ return 0;
+
+ if (ctx->bufcnt || ctx->offset)
+ return atmel_sha_update_dma_slow(dd);
+
+ dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
+ ctx->digcnt, ctx->bufcnt, ctx->total);
+
+ sg = ctx->sg;
+
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return atmel_sha_update_dma_slow(dd);
+
+ if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE))
+ /* size is not SHA1_BLOCK_SIZE aligned */
+ return atmel_sha_update_dma_slow(dd);
+
+ length = min(ctx->total, sg->length);
+
+ if (sg_is_last(sg)) {
+ if (!(ctx->flags & SHA_FLAGS_FINUP)) {
+ /* not last sg must be SHA1_BLOCK_SIZE aligned */
+ tail = length & (SHA1_BLOCK_SIZE - 1);
+ length -= tail;
+ if (length == 0) {
+ /* offset where to start slow */
+ ctx->offset = length;
+ return atmel_sha_update_dma_slow(dd);
+ }
+ }
+ }
+
+ ctx->total -= length;
+ ctx->offset = length; /* offset where to start slow */
+
+ final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
+
+ /* Add padding */
+ if (final) {
+ tail = length & (SHA1_BLOCK_SIZE - 1);
+ length -= tail;
+ ctx->total += tail;
+ ctx->offset = length; /* offset where to start slow */
+
+ sg = ctx->sg;
+ atmel_sha_append_sg(ctx);
+
+ atmel_sha_fill_padding(ctx, length);
+
+ ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
+ ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+ dev_err(dd->dev, "dma %u bytes error\n",
+ ctx->buflen + SHA1_BLOCK_SIZE);
+ return -EINVAL;
+ }
+
+ if (length == 0) {
+ ctx->flags &= ~SHA_FLAGS_SG;
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0,
+ 0, final);
+ } else {
+ ctx->sg = sg;
+ if (!dma_map_sg(dd->dev, ctx->sg, 1,
+ DMA_TO_DEVICE)) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ return -EINVAL;
+ }
+
+ ctx->flags |= SHA_FLAGS_SG;
+
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg),
+ length, ctx->dma_addr, count, final);
+ }
+ }
+
+ if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ return -EINVAL;
+ }
+
+ ctx->flags |= SHA_FLAGS_SG;
+
+ /* next call does not fail... so no unmap in the case of error */
+ return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0,
+ 0, final);
+}
+
+static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
+
+ if (ctx->flags & SHA_FLAGS_SG) {
+ dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+ if (ctx->sg->length == ctx->offset) {
+ ctx->sg = sg_next(ctx->sg);
+ if (ctx->sg)
+ ctx->offset = 0;
+ }
+ if (ctx->flags & SHA_FLAGS_PAD)
+ dma_unmap_single(dd->dev, ctx->dma_addr,
+ ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
+ SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
+static int atmel_sha_update_req(struct atmel_sha_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ int err;
+
+ dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
+ ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0);
+
+ if (ctx->flags & SHA_FLAGS_CPU)
+ err = atmel_sha_update_cpu(dd);
+ else
+ err = atmel_sha_update_dma_start(dd);
+
+ /* wait for dma completion before can take more data */
+ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n",
+ err, ctx->digcnt);
+
+ return err;
+}
+
+static int atmel_sha_final_req(struct atmel_sha_dev *dd)
+{
+ struct ahash_request *req = dd->req;
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ int err = 0;
+ int count;
+
+ if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
+ atmel_sha_fill_padding(ctx, 0);
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
+ }
+ /* faster to handle last block with cpu */
+ else {
+ atmel_sha_fill_padding(ctx, 0);
+ count = ctx->bufcnt;
+ ctx->bufcnt = 0;
+ err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
+ }
+
+ dev_dbg(dd->dev, "final_req: err: %d\n", err);
+
+ return err;
+}
+
+static void atmel_sha_copy_hash(struct ahash_request *req)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)ctx->digest;
+ int i;
+
+ if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ else
+ for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+}
+
+static void atmel_sha_copy_ready_hash(struct ahash_request *req)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return;
+
+ if (likely(ctx->flags & SHA_FLAGS_SHA1))
+ memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
+ else
+ memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
+}
+
+static int atmel_sha_finish(struct ahash_request *req)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct atmel_sha_dev *dd = ctx->dd;
+ int err = 0;
+
+ if (ctx->digcnt)
+ atmel_sha_copy_ready_hash(req);
+
+ dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt,
+ ctx->bufcnt);
+
+ return err;
+}
+
+static void atmel_sha_finish_req(struct ahash_request *req, int err)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct atmel_sha_dev *dd = ctx->dd;
+
+ if (!err) {
+ atmel_sha_copy_hash(req);
+ if (SHA_FLAGS_FINAL & dd->flags)
+ err = atmel_sha_finish(req);
+ } else {
+ ctx->flags |= SHA_FLAGS_ERROR;
+ }
+
+ /* atomic operation is not needed here */
+ dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
+ SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
+
+ clk_disable_unprepare(dd->iclk);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+
+ /* handle new request */
+ tasklet_schedule(&dd->done_task);
+}
+
+static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
+{
+ clk_prepare_enable(dd->iclk);
+
+ if (SHA_FLAGS_INIT & dd->flags) {
+ atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
+ atmel_sha_dualbuff_test(dd);
+ dd->flags |= SHA_FLAGS_INIT;
+ dd->err = 0;
+ }
+
+ return 0;
+}
+
+static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct atmel_sha_reqctx *ctx;
+ unsigned long flags;
+ int err = 0, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&dd->queue, req);
+
+ if (SHA_FLAGS_BUSY & dd->flags) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (async_req)
+ dd->flags |= SHA_FLAGS_BUSY;
+
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+ dd->req = req;
+ ctx = ahash_request_ctx(req);
+
+ dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+ ctx->op, req->nbytes);
+
+ err = atmel_sha_hw_init(dd);
+
+ if (err)
+ goto err1;
+
+ if (ctx->op == SHA_OP_UPDATE) {
+ err = atmel_sha_update_req(dd);
+ if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) {
+ /* no final() after finup() */
+ err = atmel_sha_final_req(dd);
+ }
+ } else if (ctx->op == SHA_OP_FINAL) {
+ err = atmel_sha_final_req(dd);
+ }
+
+err1:
+ if (err != -EINPROGRESS)
+ /* done_task will not finish it, so do it here */
+ atmel_sha_finish_req(req, err);
+
+ dev_dbg(dd->dev, "exit, err: %d\n", err);
+
+ return ret;
+}
+
+static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct atmel_sha_dev *dd = tctx->dd;
+
+ ctx->op = op;
+
+ return atmel_sha_handle_queue(dd, req);
+}
+
+static int atmel_sha_update(struct ahash_request *req)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ ctx->total = req->nbytes;
+ ctx->sg = req->src;
+ ctx->offset = 0;
+
+ if (ctx->flags & SHA_FLAGS_FINUP) {
+ if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
+ /* faster to use CPU for short transfers */
+ ctx->flags |= SHA_FLAGS_CPU;
+ } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
+ atmel_sha_append_sg(ctx);
+ return 0;
+ }
+ return atmel_sha_enqueue(req, SHA_OP_UPDATE);
+}
+
+static int atmel_sha_final(struct ahash_request *req)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct atmel_sha_dev *dd = tctx->dd;
+
+ int err = 0;
+
+ ctx->flags |= SHA_FLAGS_FINUP;
+
+ if (ctx->flags & SHA_FLAGS_ERROR)
+ return 0; /* uncompleted hash is not needed */
+
+ if (ctx->bufcnt) {
+ return atmel_sha_enqueue(req, SHA_OP_FINAL);
+ } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
+ err = atmel_sha_hw_init(dd);
+ if (err)
+ goto err1;
+
+ dd->flags |= SHA_FLAGS_BUSY;
+ err = atmel_sha_final_req(dd);
+ } else {
+ /* copy ready hash (+ finalize hmac) */
+ return atmel_sha_finish(req);
+ }
+
+err1:
+ if (err != -EINPROGRESS)
+ /* done_task will not finish it, so do it here */
+ atmel_sha_finish_req(req, err);
+
+ return err;
+}
+
+static int atmel_sha_finup(struct ahash_request *req)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->flags |= SHA_FLAGS_FINUP;
+
+ err1 = atmel_sha_update(req);
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if udpate() failed, except EINPROGRESS
+ */
+ err2 = atmel_sha_final(req);
+
+ return err1 ?: err2;
+}
+
+static int atmel_sha_digest(struct ahash_request *req)
+{
+ return atmel_sha_init(req) ?: atmel_sha_finup(req);
+}
+
+static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+{
+ struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
+ alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct atmel_sha_reqctx) +
+ SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int atmel_sha_cra_init(struct crypto_tfm *tfm)
+{
+ return atmel_sha_cra_init_alg(tfm, NULL);
+}
+
+static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
+{
+ struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+}
+
+static struct ahash_alg sha_algs[] = {
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "atmel-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+{
+ .init = atmel_sha_init,
+ .update = atmel_sha_update,
+ .final = atmel_sha_final,
+ .finup = atmel_sha_finup,
+ .digest = atmel_sha_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "atmel-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_sha_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_sha_cra_init,
+ .cra_exit = atmel_sha_cra_exit,
+ }
+ }
+},
+};
+
+static void atmel_sha_done_task(unsigned long data)
+{
+ struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+ int err = 0;
+
+ if (!(SHA_FLAGS_BUSY & dd->flags)) {
+ atmel_sha_handle_queue(dd, NULL);
+ return;
+ }
+
+ if (SHA_FLAGS_CPU & dd->flags) {
+ if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
+ dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
+ goto finish;
+ }
+ } else if (SHA_FLAGS_DMA_READY & dd->flags) {
+ if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
+ dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
+ atmel_sha_update_dma_stop(dd);
+ if (dd->err) {
+ err = dd->err;
+ goto finish;
+ }
+ }
+ if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
+ /* hash or semi-hash ready */
+ dd->flags &= ~(SHA_FLAGS_DMA_READY |
+ SHA_FLAGS_OUTPUT_READY);
+ err = atmel_sha_update_dma_start(dd);
+ if (err != -EINPROGRESS)
+ goto finish;
+ }
+ }
+ return;
+
+finish:
+ /* finish curent request */
+ atmel_sha_finish_req(dd->req, err);
+}
+
+static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
+{
+ struct atmel_sha_dev *sha_dd = dev_id;
+ u32 reg;
+
+ reg = atmel_sha_read(sha_dd, SHA_ISR);
+ if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
+ atmel_sha_write(sha_dd, SHA_IDR, reg);
+ if (SHA_FLAGS_BUSY & sha_dd->flags) {
+ sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
+ if (!(SHA_FLAGS_CPU & sha_dd->flags))
+ sha_dd->flags |= SHA_FLAGS_DMA_READY;
+ tasklet_schedule(&sha_dd->done_task);
+ } else {
+ dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
+ }
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
+ crypto_unregister_ahash(&sha_algs[i]);
+}
+
+static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
+{
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ err = crypto_register_ahash(&sha_algs[i]);
+ if (err)
+ goto err_sha_algs;
+ }
+
+ return 0;
+
+err_sha_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_ahash(&sha_algs[j]);
+
+ return err;
+}
+
+static int __devinit atmel_sha_probe(struct platform_device *pdev)
+{
+ struct atmel_sha_dev *sha_dd;
+ struct device *dev = &pdev->dev;
+ struct resource *sha_res;
+ unsigned long sha_phys_size;
+ int err;
+
+ sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
+ if (sha_dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ err = -ENOMEM;
+ goto sha_dd_err;
+ }
+
+ sha_dd->dev = dev;
+
+ platform_set_drvdata(pdev, sha_dd);
+
+ INIT_LIST_HEAD(&sha_dd->list);
+
+ tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
+ (unsigned long)sha_dd);
+
+ crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
+
+ sha_dd->irq = -1;
+
+ /* Get the base address */
+ sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!sha_res) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ sha_dd->phys_base = sha_res->start;
+ sha_phys_size = resource_size(sha_res);
+
+ /* Get the IRQ */
+ sha_dd->irq = platform_get_irq(pdev, 0);
+ if (sha_dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = sha_dd->irq;
+ goto res_err;
+ }
+
+ err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
+ sha_dd);
+ if (err) {
+ dev_err(dev, "unable to request sha irq.\n");
+ goto res_err;
+ }
+
+ /* Initializing the clock */
+ sha_dd->iclk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sha_dd->iclk)) {
+ dev_err(dev, "clock intialization failed.\n");
+ err = PTR_ERR(sha_dd->iclk);
+ goto clk_err;
+ }
+
+ sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
+ if (!sha_dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto sha_io_err;
+ }
+
+ spin_lock(&atmel_sha.lock);
+ list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
+ spin_unlock(&atmel_sha.lock);
+
+ err = atmel_sha_register_algs(sha_dd);
+ if (err)
+ goto err_algs;
+
+ dev_info(dev, "Atmel SHA1/SHA256\n");
+
+ return 0;
+
+err_algs:
+ spin_lock(&atmel_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&atmel_sha.lock);
+ iounmap(sha_dd->io_base);
+sha_io_err:
+ clk_put(sha_dd->iclk);
+clk_err:
+ free_irq(sha_dd->irq, sha_dd);
+res_err:
+ tasklet_kill(&sha_dd->done_task);
+ kfree(sha_dd);
+ sha_dd = NULL;
+sha_dd_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int __devexit atmel_sha_remove(struct platform_device *pdev)
+{
+ static struct atmel_sha_dev *sha_dd;
+
+ sha_dd = platform_get_drvdata(pdev);
+ if (!sha_dd)
+ return -ENODEV;
+ spin_lock(&atmel_sha.lock);
+ list_del(&sha_dd->list);
+ spin_unlock(&atmel_sha.lock);
+
+ atmel_sha_unregister_algs(sha_dd);
+
+ tasklet_kill(&sha_dd->done_task);
+
+ iounmap(sha_dd->io_base);
+
+ clk_put(sha_dd->iclk);
+
+ if (sha_dd->irq >= 0)
+ free_irq(sha_dd->irq, sha_dd);
+
+ kfree(sha_dd);
+ sha_dd = NULL;
+
+ return 0;
+}
+
+static struct platform_driver atmel_sha_driver = {
+ .probe = atmel_sha_probe,
+ .remove = __devexit_p(atmel_sha_remove),
+ .driver = {
+ .name = "atmel_sha",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(atmel_sha_driver);
+
+MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
new file mode 100644
index 000000000000..5ac2a900d80c
--- /dev/null
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -0,0 +1,89 @@
+#ifndef __ATMEL_TDES_REGS_H__
+#define __ATMEL_TDES_REGS_H__
+
+#define TDES_CR 0x00
+#define TDES_CR_START (1 << 0)
+#define TDES_CR_SWRST (1 << 8)
+#define TDES_CR_LOADSEED (1 << 16)
+
+#define TDES_MR 0x04
+#define TDES_MR_CYPHER_DEC (0 << 0)
+#define TDES_MR_CYPHER_ENC (1 << 0)
+#define TDES_MR_TDESMOD_MASK (0x3 << 1)
+#define TDES_MR_TDESMOD_DES (0x0 << 1)
+#define TDES_MR_TDESMOD_TDES (0x1 << 1)
+#define TDES_MR_TDESMOD_XTEA (0x2 << 1)
+#define TDES_MR_KEYMOD_3KEY (0 << 4)
+#define TDES_MR_KEYMOD_2KEY (1 << 4)
+#define TDES_MR_SMOD_MASK (0x3 << 8)
+#define TDES_MR_SMOD_MANUAL (0x0 << 8)
+#define TDES_MR_SMOD_AUTO (0x1 << 8)
+#define TDES_MR_SMOD_PDC (0x2 << 8)
+#define TDES_MR_OPMOD_MASK (0x3 << 12)
+#define TDES_MR_OPMOD_ECB (0x0 << 12)
+#define TDES_MR_OPMOD_CBC (0x1 << 12)
+#define TDES_MR_OPMOD_OFB (0x2 << 12)
+#define TDES_MR_OPMOD_CFB (0x3 << 12)
+#define TDES_MR_LOD (0x1 << 15)
+#define TDES_MR_CFBS_MASK (0x3 << 16)
+#define TDES_MR_CFBS_64b (0x0 << 16)
+#define TDES_MR_CFBS_32b (0x1 << 16)
+#define TDES_MR_CFBS_16b (0x2 << 16)
+#define TDES_MR_CFBS_8b (0x3 << 16)
+#define TDES_MR_CKEY_MASK (0xF << 20)
+#define TDES_MR_CKEY_OFFSET 20
+#define TDES_MR_CTYPE_MASK (0x3F << 24)
+#define TDES_MR_CTYPE_OFFSET 24
+
+#define TDES_IER 0x10
+#define TDES_IDR 0x14
+#define TDES_IMR 0x18
+#define TDES_ISR 0x1C
+#define TDES_INT_DATARDY (1 << 0)
+#define TDES_INT_ENDRX (1 << 1)
+#define TDES_INT_ENDTX (1 << 2)
+#define TDES_INT_RXBUFF (1 << 3)
+#define TDES_INT_TXBUFE (1 << 4)
+#define TDES_INT_URAD (1 << 8)
+#define TDES_ISR_URAT_MASK (0x3 << 12)
+#define TDES_ISR_URAT_IDR (0x0 << 12)
+#define TDES_ISR_URAT_ODR (0x1 << 12)
+#define TDES_ISR_URAT_MR (0x2 << 12)
+#define TDES_ISR_URAT_WO (0x3 << 12)
+
+
+#define TDES_KEY1W1R 0x20
+#define TDES_KEY1W2R 0x24
+#define TDES_KEY2W1R 0x28
+#define TDES_KEY2W2R 0x2C
+#define TDES_KEY3W1R 0x30
+#define TDES_KEY3W2R 0x34
+#define TDES_IDATA1R 0x40
+#define TDES_IDATA2R 0x44
+#define TDES_ODATA1R 0x50
+#define TDES_ODATA2R 0x54
+#define TDES_IV1R 0x60
+#define TDES_IV2R 0x64
+
+#define TDES_XTEARNDR 0x70
+#define TDES_XTEARNDR_XTEA_RNDS_MASK (0x3F << 0)
+#define TDES_XTEARNDR_XTEA_RNDS_OFFSET 0
+
+#define TDES_RPR 0x100
+#define TDES_RCR 0x104
+#define TDES_TPR 0x108
+#define TDES_TCR 0x10C
+#define TDES_RNPR 0x118
+#define TDES_RNCR 0x11C
+#define TDES_TNPR 0x118
+#define TDES_TNCR 0x11C
+#define TDES_PTCR 0x120
+#define TDES_PTCR_RXTEN (1 << 0)
+#define TDES_PTCR_RXTDIS (1 << 1)
+#define TDES_PTCR_TXTEN (1 << 8)
+#define TDES_PTCR_TXTDIS (1 << 9)
+#define TDES_PTSR 0x124
+#define TDES_PTSR_RXTEN (1 << 0)
+#define TDES_PTSR_TXTEN (1 << 8)
+
+#endif /* __ATMEL_TDES_REGS_H__ */
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
new file mode 100644
index 000000000000..eb2b61e57e2d
--- /dev/null
+++ b/drivers/crypto/atmel-tdes.c
@@ -0,0 +1,1215 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for ATMEL DES/TDES HW acceleration.
+ *
+ * Copyright (c) 2012 Eukréa Electromatique - ATMEL
+ * Author: Nicolas Royer <nicolas@eukrea.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Some ideas are from omap-aes.c drivers.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include "atmel-tdes-regs.h"
+
+/* TDES flags */
+#define TDES_FLAGS_MODE_MASK 0x007f
+#define TDES_FLAGS_ENCRYPT BIT(0)
+#define TDES_FLAGS_CBC BIT(1)
+#define TDES_FLAGS_CFB BIT(2)
+#define TDES_FLAGS_CFB8 BIT(3)
+#define TDES_FLAGS_CFB16 BIT(4)
+#define TDES_FLAGS_CFB32 BIT(5)
+#define TDES_FLAGS_OFB BIT(6)
+
+#define TDES_FLAGS_INIT BIT(16)
+#define TDES_FLAGS_FAST BIT(17)
+#define TDES_FLAGS_BUSY BIT(18)
+
+#define ATMEL_TDES_QUEUE_LENGTH 1
+
+#define CFB8_BLOCK_SIZE 1
+#define CFB16_BLOCK_SIZE 2
+#define CFB32_BLOCK_SIZE 4
+#define CFB64_BLOCK_SIZE 8
+
+
+struct atmel_tdes_dev;
+
+struct atmel_tdes_ctx {
+ struct atmel_tdes_dev *dd;
+
+ int keylen;
+ u32 key[3*DES_KEY_SIZE / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct atmel_tdes_reqctx {
+ unsigned long mode;
+};
+
+struct atmel_tdes_dev {
+ struct list_head list;
+ unsigned long phys_base;
+ void __iomem *io_base;
+
+ struct atmel_tdes_ctx *ctx;
+ struct device *dev;
+ struct clk *iclk;
+ int irq;
+
+ unsigned long flags;
+ int err;
+
+ spinlock_t lock;
+ struct crypto_queue queue;
+
+ struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
+
+ struct ablkcipher_request *req;
+ size_t total;
+
+ struct scatterlist *in_sg;
+ size_t in_offset;
+ struct scatterlist *out_sg;
+ size_t out_offset;
+
+ size_t buflen;
+ size_t dma_size;
+
+ void *buf_in;
+ int dma_in;
+ dma_addr_t dma_addr_in;
+
+ void *buf_out;
+ int dma_out;
+ dma_addr_t dma_addr_out;
+};
+
+struct atmel_tdes_drv {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+static struct atmel_tdes_drv atmel_tdes = {
+ .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
+};
+
+static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
+ void *buf, size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
+static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
+{
+ return readl_relaxed(dd->io_base + offset);
+}
+
+static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, dd->io_base + offset);
+}
+
+static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
+ u32 *value, int count)
+{
+ for (; count--; value++, offset += 4)
+ atmel_tdes_write(dd, offset, *value);
+}
+
+static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+{
+ struct atmel_tdes_dev *tdes_dd = NULL;
+ struct atmel_tdes_dev *tmp;
+
+ spin_lock_bh(&atmel_tdes.lock);
+ if (!ctx->dd) {
+ list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
+ tdes_dd = tmp;
+ break;
+ }
+ ctx->dd = tdes_dd;
+ } else {
+ tdes_dd = ctx->dd;
+ }
+ spin_unlock_bh(&atmel_tdes.lock);
+
+ return tdes_dd;
+}
+
+static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
+{
+ clk_prepare_enable(dd->iclk);
+
+ if (!(dd->flags & TDES_FLAGS_INIT)) {
+ atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
+ dd->flags |= TDES_FLAGS_INIT;
+ dd->err = 0;
+ }
+
+ return 0;
+}
+
+static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
+{
+ int err;
+ u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
+
+ err = atmel_tdes_hw_init(dd);
+
+ if (err)
+ return err;
+
+ atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+
+ /* MR register must be set before IV registers */
+ if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
+ valmr |= TDES_MR_KEYMOD_3KEY;
+ valmr |= TDES_MR_TDESMOD_TDES;
+ } else if (dd->ctx->keylen > DES_KEY_SIZE) {
+ valmr |= TDES_MR_KEYMOD_2KEY;
+ valmr |= TDES_MR_TDESMOD_TDES;
+ } else {
+ valmr |= TDES_MR_TDESMOD_DES;
+ }
+
+ if (dd->flags & TDES_FLAGS_CBC) {
+ valmr |= TDES_MR_OPMOD_CBC;
+ } else if (dd->flags & TDES_FLAGS_CFB) {
+ valmr |= TDES_MR_OPMOD_CFB;
+
+ if (dd->flags & TDES_FLAGS_CFB8)
+ valmr |= TDES_MR_CFBS_8b;
+ else if (dd->flags & TDES_FLAGS_CFB16)
+ valmr |= TDES_MR_CFBS_16b;
+ else if (dd->flags & TDES_FLAGS_CFB32)
+ valmr |= TDES_MR_CFBS_32b;
+ } else if (dd->flags & TDES_FLAGS_OFB) {
+ valmr |= TDES_MR_OPMOD_OFB;
+ }
+
+ if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
+ valmr |= TDES_MR_CYPHER_ENC;
+
+ atmel_tdes_write(dd, TDES_CR, valcr);
+ atmel_tdes_write(dd, TDES_MR, valmr);
+
+ atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
+ dd->ctx->keylen >> 2);
+
+ if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
+ (dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
+ atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
+ }
+
+ return 0;
+}
+
+static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
+{
+ int err = 0;
+ size_t count;
+
+ atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+
+ if (dd->flags & TDES_FLAGS_FAST) {
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
+
+ /* copy data */
+ count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
+ dd->buf_out, dd->buflen, dd->dma_size, 1);
+ if (count != dd->dma_size) {
+ err = -EINVAL;
+ pr_err("not all data converted: %u\n", count);
+ }
+ }
+
+ return err;
+}
+
+static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd)
+{
+ int err = -ENOMEM;
+
+ dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
+ dd->buflen = PAGE_SIZE;
+ dd->buflen &= ~(DES_BLOCK_SIZE - 1);
+
+ if (!dd->buf_in || !dd->buf_out) {
+ dev_err(dd->dev, "unable to alloc pages.\n");
+ goto err_alloc;
+ }
+
+ /* MAP here */
+ dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
+ dd->buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_in;
+ }
+
+ dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
+ dd->buflen, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
+ dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
+ err = -EINVAL;
+ goto err_map_out;
+ }
+
+ return 0;
+
+err_map_out:
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+err_map_in:
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+err_alloc:
+ if (err)
+ pr_err("error: %d\n", err);
+ return err;
+}
+
+static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
+{
+ dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
+ DMA_TO_DEVICE);
+ free_page((unsigned long)dd->buf_out);
+ free_page((unsigned long)dd->buf_in);
+}
+
+static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
+ dma_addr_t dma_addr_out, int length)
+{
+ struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct atmel_tdes_dev *dd = ctx->dd;
+ int len32;
+
+ dd->dma_size = length;
+
+ if (!(dd->flags & TDES_FLAGS_FAST)) {
+ dma_sync_single_for_device(dd->dev, dma_addr_in, length,
+ DMA_TO_DEVICE);
+ }
+
+ if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
+ len32 = DIV_ROUND_UP(length, sizeof(u8));
+ else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
+ len32 = DIV_ROUND_UP(length, sizeof(u16));
+ else
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
+ atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
+ atmel_tdes_write(dd, TDES_TCR, len32);
+ atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
+ atmel_tdes_write(dd, TDES_RCR, len32);
+
+ /* Enable Interrupt */
+ atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
+
+ /* Start DMA transfer */
+ atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
+
+ return 0;
+}
+
+static int atmel_tdes_crypt_dma_start(struct atmel_tdes_dev *dd)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
+ crypto_ablkcipher_reqtfm(dd->req));
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+
+ if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) {
+ /* check for alignment */
+ in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32));
+ out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32));
+
+ fast = in && out;
+ }
+
+ if (fast) {
+ count = min(dd->total, sg_dma_len(dd->in_sg));
+ count = min(count, sg_dma_len(dd->out_sg));
+
+ if (count != dd->total) {
+ pr_err("request length != buffer length\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ return -EINVAL;
+ }
+
+ err = dma_map_sg(dd->dev, dd->out_sg, 1,
+ DMA_FROM_DEVICE);
+ if (!err) {
+ dev_err(dd->dev, "dma_map_sg() error\n");
+ dma_unmap_sg(dd->dev, dd->in_sg, 1,
+ DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(dd->in_sg);
+ addr_out = sg_dma_address(dd->out_sg);
+
+ dd->flags |= TDES_FLAGS_FAST;
+
+ } else {
+ /* use cache buffers */
+ count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
+ dd->buf_in, dd->buflen, dd->total, 0);
+
+ addr_in = dd->dma_addr_in;
+ addr_out = dd->dma_addr_out;
+
+ dd->flags &= ~TDES_FLAGS_FAST;
+
+ }
+
+ dd->total -= count;
+
+ err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
+ if (err) {
+ dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ }
+
+ return err;
+}
+
+
+static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
+{
+ struct ablkcipher_request *req = dd->req;
+
+ clk_disable_unprepare(dd->iclk);
+
+ dd->flags &= ~TDES_FLAGS_BUSY;
+
+ req->base.complete(&req->base, err);
+}
+
+static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
+ struct ablkcipher_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct atmel_tdes_ctx *ctx;
+ struct atmel_tdes_reqctx *rctx;
+ unsigned long flags;
+ int err, ret = 0;
+
+ spin_lock_irqsave(&dd->lock, flags);
+ if (req)
+ ret = ablkcipher_enqueue_request(&dd->queue, req);
+ if (dd->flags & TDES_FLAGS_BUSY) {
+ spin_unlock_irqrestore(&dd->lock, flags);
+ return ret;
+ }
+ backlog = crypto_get_backlog(&dd->queue);
+ async_req = crypto_dequeue_request(&dd->queue);
+ if (async_req)
+ dd->flags |= TDES_FLAGS_BUSY;
+ spin_unlock_irqrestore(&dd->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ /* assign new request to device */
+ dd->req = req;
+ dd->total = req->nbytes;
+ dd->in_offset = 0;
+ dd->in_sg = req->src;
+ dd->out_offset = 0;
+ dd->out_sg = req->dst;
+
+ rctx = ablkcipher_request_ctx(req);
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ rctx->mode &= TDES_FLAGS_MODE_MASK;
+ dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
+ dd->ctx = ctx;
+ ctx->dd = dd;
+
+ err = atmel_tdes_write_ctrl(dd);
+ if (!err)
+ err = atmel_tdes_crypt_dma_start(dd);
+ if (err) {
+ /* des_task will not finish it, so do it here */
+ atmel_tdes_finish_req(dd, err);
+ tasklet_schedule(&dd->queue_task);
+ }
+
+ return ret;
+}
+
+
+static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct atmel_tdes_dev *dd;
+
+ if (mode & TDES_FLAGS_CFB8) {
+ if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB8 blocks\n");
+ return -EINVAL;
+ }
+ } else if (mode & TDES_FLAGS_CFB16) {
+ if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB16 blocks\n");
+ return -EINVAL;
+ }
+ } else if (mode & TDES_FLAGS_CFB32) {
+ if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of CFB32 blocks\n");
+ return -EINVAL;
+ }
+ } else if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
+ pr_err("request size is not exact amount of DES blocks\n");
+ return -EINVAL;
+ }
+
+ dd = atmel_tdes_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return atmel_tdes_handle_queue(dd, req);
+}
+
+static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ u32 tmp[DES_EXPKEY_WORDS];
+ int err;
+ struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
+
+ struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ if (keylen != DES_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ err = des_ekey(tmp, key);
+ if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ const char *alg_name;
+
+ alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
+
+ /*
+ * HW bug in cfb 3-keys mode.
+ */
+ if (strstr(alg_name, "cfb") && (keylen != 2*DES_KEY_SIZE)) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
+}
+
+static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, 0);
+}
+
+static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
+}
+
+static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
+}
+static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
+}
+
+static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
+}
+
+static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+ TDES_FLAGS_CFB8);
+}
+
+static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
+}
+
+static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+ TDES_FLAGS_CFB16);
+}
+
+static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
+}
+
+static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
+ TDES_FLAGS_CFB32);
+}
+
+static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
+}
+
+static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
+}
+
+static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
+{
+ return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
+}
+
+static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
+
+ return 0;
+}
+
+static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg tdes_algs[] = {
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "atmel-ecb-des",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "atmel-cbc-des",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "cfb(des)",
+ .cra_driver_name = "atmel-cfb-des",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb_encrypt,
+ .decrypt = atmel_tdes_cfb_decrypt,
+ }
+},
+{
+ .cra_name = "cfb8(des)",
+ .cra_driver_name = "atmel-cfb8-des",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB8_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb8_encrypt,
+ .decrypt = atmel_tdes_cfb8_decrypt,
+ }
+},
+{
+ .cra_name = "cfb16(des)",
+ .cra_driver_name = "atmel-cfb16-des",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB16_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb16_encrypt,
+ .decrypt = atmel_tdes_cfb16_decrypt,
+ }
+},
+{
+ .cra_name = "cfb32(des)",
+ .cra_driver_name = "atmel-cfb32-des",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB32_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_cfb32_encrypt,
+ .decrypt = atmel_tdes_cfb32_decrypt,
+ }
+},
+{
+ .cra_name = "ofb(des)",
+ .cra_driver_name = "atmel-ofb-des",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_des_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "atmel-ecb-tdes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2 * DES_KEY_SIZE,
+ .max_keysize = 3 * DES_KEY_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ecb_encrypt,
+ .decrypt = atmel_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "atmel-cbc-tdes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2*DES_KEY_SIZE,
+ .max_keysize = 3*DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cbc_encrypt,
+ .decrypt = atmel_tdes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "cfb(des3_ede)",
+ .cra_driver_name = "atmel-cfb-tdes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2*DES_KEY_SIZE,
+ .max_keysize = 2*DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cfb_encrypt,
+ .decrypt = atmel_tdes_cfb_decrypt,
+ }
+},
+{
+ .cra_name = "cfb8(des3_ede)",
+ .cra_driver_name = "atmel-cfb8-tdes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB8_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2*DES_KEY_SIZE,
+ .max_keysize = 2*DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cfb8_encrypt,
+ .decrypt = atmel_tdes_cfb8_decrypt,
+ }
+},
+{
+ .cra_name = "cfb16(des3_ede)",
+ .cra_driver_name = "atmel-cfb16-tdes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB16_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2*DES_KEY_SIZE,
+ .max_keysize = 2*DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cfb16_encrypt,
+ .decrypt = atmel_tdes_cfb16_decrypt,
+ }
+},
+{
+ .cra_name = "cfb32(des3_ede)",
+ .cra_driver_name = "atmel-cfb32-tdes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CFB32_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2*DES_KEY_SIZE,
+ .max_keysize = 2*DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_cfb32_encrypt,
+ .decrypt = atmel_tdes_cfb32_decrypt,
+ }
+},
+{
+ .cra_name = "ofb(des3_ede)",
+ .cra_driver_name = "atmel-ofb-tdes",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = atmel_tdes_cra_init,
+ .cra_exit = atmel_tdes_cra_exit,
+ .cra_u.ablkcipher = {
+ .min_keysize = 2*DES_KEY_SIZE,
+ .max_keysize = 3*DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = atmel_tdes_setkey,
+ .encrypt = atmel_tdes_ofb_encrypt,
+ .decrypt = atmel_tdes_ofb_decrypt,
+ }
+},
+};
+
+static void atmel_tdes_queue_task(unsigned long data)
+{
+ struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
+
+ atmel_tdes_handle_queue(dd, NULL);
+}
+
+static void atmel_tdes_done_task(unsigned long data)
+{
+ struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
+ int err;
+
+ err = atmel_tdes_crypt_dma_stop(dd);
+
+ err = dd->err ? : err;
+
+ if (dd->total && !err) {
+ err = atmel_tdes_crypt_dma_start(dd);
+ if (!err)
+ return;
+ }
+
+ atmel_tdes_finish_req(dd, err);
+ atmel_tdes_handle_queue(dd, NULL);
+}
+
+static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
+{
+ struct atmel_tdes_dev *tdes_dd = dev_id;
+ u32 reg;
+
+ reg = atmel_tdes_read(tdes_dd, TDES_ISR);
+ if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
+ atmel_tdes_write(tdes_dd, TDES_IDR, reg);
+ if (TDES_FLAGS_BUSY & tdes_dd->flags)
+ tasklet_schedule(&tdes_dd->done_task);
+ else
+ dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
+ crypto_unregister_alg(&tdes_algs[i]);
+}
+
+static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
+{
+ int err, i, j;
+
+ for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
+ INIT_LIST_HEAD(&tdes_algs[i].cra_list);
+ err = crypto_register_alg(&tdes_algs[i]);
+ if (err)
+ goto err_tdes_algs;
+ }
+
+ return 0;
+
+err_tdes_algs:
+ for (j = 0; j < i; j++)
+ crypto_unregister_alg(&tdes_algs[j]);
+
+ return err;
+}
+
+static int __devinit atmel_tdes_probe(struct platform_device *pdev)
+{
+ struct atmel_tdes_dev *tdes_dd;
+ struct device *dev = &pdev->dev;
+ struct resource *tdes_res;
+ unsigned long tdes_phys_size;
+ int err;
+
+ tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
+ if (tdes_dd == NULL) {
+ dev_err(dev, "unable to alloc data struct.\n");
+ err = -ENOMEM;
+ goto tdes_dd_err;
+ }
+
+ tdes_dd->dev = dev;
+
+ platform_set_drvdata(pdev, tdes_dd);
+
+ INIT_LIST_HEAD(&tdes_dd->list);
+
+ tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
+ (unsigned long)tdes_dd);
+ tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
+ (unsigned long)tdes_dd);
+
+ crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
+
+ tdes_dd->irq = -1;
+
+ /* Get the base address */
+ tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!tdes_res) {
+ dev_err(dev, "no MEM resource info\n");
+ err = -ENODEV;
+ goto res_err;
+ }
+ tdes_dd->phys_base = tdes_res->start;
+ tdes_phys_size = resource_size(tdes_res);
+
+ /* Get the IRQ */
+ tdes_dd->irq = platform_get_irq(pdev, 0);
+ if (tdes_dd->irq < 0) {
+ dev_err(dev, "no IRQ resource info\n");
+ err = tdes_dd->irq;
+ goto res_err;
+ }
+
+ err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED,
+ "atmel-tdes", tdes_dd);
+ if (err) {
+ dev_err(dev, "unable to request tdes irq.\n");
+ goto tdes_irq_err;
+ }
+
+ /* Initializing the clock */
+ tdes_dd->iclk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tdes_dd->iclk)) {
+ dev_err(dev, "clock intialization failed.\n");
+ err = PTR_ERR(tdes_dd->iclk);
+ goto clk_err;
+ }
+
+ tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size);
+ if (!tdes_dd->io_base) {
+ dev_err(dev, "can't ioremap\n");
+ err = -ENOMEM;
+ goto tdes_io_err;
+ }
+
+ err = atmel_tdes_dma_init(tdes_dd);
+ if (err)
+ goto err_tdes_dma;
+
+ spin_lock(&atmel_tdes.lock);
+ list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
+ spin_unlock(&atmel_tdes.lock);
+
+ err = atmel_tdes_register_algs(tdes_dd);
+ if (err)
+ goto err_algs;
+
+ dev_info(dev, "Atmel DES/TDES\n");
+
+ return 0;
+
+err_algs:
+ spin_lock(&atmel_tdes.lock);
+ list_del(&tdes_dd->list);
+ spin_unlock(&atmel_tdes.lock);
+ atmel_tdes_dma_cleanup(tdes_dd);
+err_tdes_dma:
+ iounmap(tdes_dd->io_base);
+tdes_io_err:
+ clk_put(tdes_dd->iclk);
+clk_err:
+ free_irq(tdes_dd->irq, tdes_dd);
+tdes_irq_err:
+res_err:
+ tasklet_kill(&tdes_dd->done_task);
+ tasklet_kill(&tdes_dd->queue_task);
+ kfree(tdes_dd);
+ tdes_dd = NULL;
+tdes_dd_err:
+ dev_err(dev, "initialization failed.\n");
+
+ return err;
+}
+
+static int __devexit atmel_tdes_remove(struct platform_device *pdev)
+{
+ static struct atmel_tdes_dev *tdes_dd;
+
+ tdes_dd = platform_get_drvdata(pdev);
+ if (!tdes_dd)
+ return -ENODEV;
+ spin_lock(&atmel_tdes.lock);
+ list_del(&tdes_dd->list);
+ spin_unlock(&atmel_tdes.lock);
+
+ atmel_tdes_unregister_algs(tdes_dd);
+
+ tasklet_kill(&tdes_dd->done_task);
+ tasklet_kill(&tdes_dd->queue_task);
+
+ atmel_tdes_dma_cleanup(tdes_dd);
+
+ iounmap(tdes_dd->io_base);
+
+ clk_put(tdes_dd->iclk);
+
+ if (tdes_dd->irq >= 0)
+ free_irq(tdes_dd->irq, tdes_dd);
+
+ kfree(tdes_dd);
+ tdes_dd = NULL;
+
+ return 0;
+}
+
+static struct platform_driver atmel_tdes_driver = {
+ .probe = atmel_tdes_probe,
+ .remove = __devexit_p(atmel_tdes_remove),
+ .driver = {
+ .name = "atmel_tdes",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(atmel_tdes_driver);
+
+MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
new file mode 100644
index 000000000000..5398580b4313
--- /dev/null
+++ b/drivers/crypto/bfin_crc.c
@@ -0,0 +1,780 @@
+/*
+ * Cryptographic API.
+ *
+ * Support Blackfin CRC HW acceleration.
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+#include <asm/blackfin.h>
+#include <asm/bfin_crc.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#define CRC_CCRYPTO_QUEUE_LENGTH 5
+
+#define DRIVER_NAME "bfin-hmac-crc"
+#define CHKSUM_DIGEST_SIZE 4
+#define CHKSUM_BLOCK_SIZE 1
+
+#define CRC_MAX_DMA_DESC 100
+
+#define CRC_CRYPTO_STATE_UPDATE 1
+#define CRC_CRYPTO_STATE_FINALUPDATE 2
+#define CRC_CRYPTO_STATE_FINISH 3
+
+struct bfin_crypto_crc {
+ struct list_head list;
+ struct device *dev;
+ spinlock_t lock;
+
+ int irq;
+ int dma_ch;
+ u32 poly;
+ volatile struct crc_register *regs;
+
+ struct ahash_request *req; /* current request in operation */
+ struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
+ dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
+ u8 *sg_mid_buf;
+
+ struct tasklet_struct done_task;
+ struct crypto_queue queue; /* waiting requests */
+
+ u8 busy:1; /* crc device in operation flag */
+};
+
+static struct bfin_crypto_crc_list {
+ struct list_head dev_list;
+ spinlock_t lock;
+} crc_list;
+
+struct bfin_crypto_crc_reqctx {
+ struct bfin_crypto_crc *crc;
+
+ unsigned int total; /* total request bytes */
+ size_t sg_buflen; /* bytes for this update */
+ unsigned int sg_nents;
+ struct scatterlist *sg; /* sg list head for this update*/
+ struct scatterlist bufsl[2]; /* chained sg list */
+
+ size_t bufnext_len;
+ size_t buflast_len;
+ u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
+ u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
+
+ u8 flag;
+};
+
+struct bfin_crypto_crc_ctx {
+ struct bfin_crypto_crc *crc;
+ u32 key;
+};
+
+
+/*
+ * derive number of elements in scatterlist
+ */
+static int sg_count(struct scatterlist *sg_list)
+{
+ struct scatterlist *sg = sg_list;
+ int sg_nents = 1;
+
+ if (sg_list == NULL)
+ return 0;
+
+ while (!sg_is_last(sg)) {
+ sg_nents++;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return sg_nents;
+}
+
+/*
+ * get element in scatter list by given index
+ */
+static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
+ unsigned int index)
+{
+ struct scatterlist *sg = NULL;
+ int i;
+
+ for_each_sg(sg_list, sg, nents, i)
+ if (i == index)
+ break;
+
+ return sg;
+}
+
+static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
+{
+ crc->regs->datacntrld = 0;
+ crc->regs->control = MODE_CALC_CRC << OPMODE_OFFSET;
+ crc->regs->curresult = key;
+
+ /* setup CRC interrupts */
+ crc->regs->status = CMPERRI | DCNTEXPI;
+ crc->regs->intrenset = CMPERRI | DCNTEXPI;
+ SSYNC();
+
+ return 0;
+}
+
+static int bfin_crypto_crc_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
+ struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
+ struct bfin_crypto_crc *crc;
+
+ dev_dbg(crc->dev, "crc_init\n");
+ spin_lock_bh(&crc_list.lock);
+ list_for_each_entry(crc, &crc_list.dev_list, list) {
+ crc_ctx->crc = crc;
+ break;
+ }
+ spin_unlock_bh(&crc_list.lock);
+
+ if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
+ dev_dbg(crc->dev, "init: requested sg list is too big > %d\n",
+ CRC_MAX_DMA_DESC);
+ return -EINVAL;
+ }
+
+ ctx->crc = crc;
+ ctx->bufnext_len = 0;
+ ctx->buflast_len = 0;
+ ctx->sg_buflen = 0;
+ ctx->total = 0;
+ ctx->flag = 0;
+
+ /* init crc results */
+ put_unaligned_le32(crc_ctx->key, req->result);
+
+ dev_dbg(crc->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
+}
+
+static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
+{
+ struct scatterlist *sg;
+ struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
+ int i = 0, j = 0;
+ unsigned long dma_config;
+ unsigned int dma_count;
+ unsigned int dma_addr;
+ unsigned int mid_dma_count = 0;
+ int dma_mod;
+
+ dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
+
+ for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
+ dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
+ dma_addr = sg_dma_address(sg);
+ /* deduce extra bytes in last sg */
+ if (sg_is_last(sg))
+ dma_count = sg_dma_len(sg) - ctx->bufnext_len;
+ else
+ dma_count = sg_dma_len(sg);
+
+ if (mid_dma_count) {
+ /* Append last middle dma buffer to 4 bytes with first
+ bytes in current sg buffer. Move addr of current
+ sg and deduce the length of current sg.
+ */
+ memcpy(crc->sg_mid_buf +((i-1) << 2) + mid_dma_count,
+ (void *)dma_addr,
+ CHKSUM_DIGEST_SIZE - mid_dma_count);
+ dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
+ dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
+ }
+ /* chop current sg dma len to multiple of 32 bits */
+ mid_dma_count = dma_count % 4;
+ dma_count &= ~0x3;
+
+ if (dma_addr % 4 == 0) {
+ dma_config |= WDSIZE_32;
+ dma_count >>= 2;
+ dma_mod = 4;
+ } else if (dma_addr % 2 == 0) {
+ dma_config |= WDSIZE_16;
+ dma_count >>= 1;
+ dma_mod = 2;
+ } else {
+ dma_config |= WDSIZE_8;
+ dma_mod = 1;
+ }
+
+ crc->sg_cpu[i].start_addr = dma_addr;
+ crc->sg_cpu[i].cfg = dma_config;
+ crc->sg_cpu[i].x_count = dma_count;
+ crc->sg_cpu[i].x_modify = dma_mod;
+ dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
+ "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
+ i, crc->sg_cpu[i].start_addr,
+ crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
+ crc->sg_cpu[i].x_modify);
+ i++;
+
+ if (mid_dma_count) {
+ /* copy extra bytes to next middle dma buffer */
+ dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
+ DMAEN | PSIZE_32 | WDSIZE_32;
+ memcpy(crc->sg_mid_buf + (i << 2),
+ (void *)(dma_addr + (dma_count << 2)),
+ mid_dma_count);
+ /* setup new dma descriptor for next middle dma */
+ crc->sg_cpu[i].start_addr = dma_map_single(crc->dev,
+ crc->sg_mid_buf + (i << 2),
+ CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
+ crc->sg_cpu[i].cfg = dma_config;
+ crc->sg_cpu[i].x_count = 1;
+ crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
+ dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
+ "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
+ i, crc->sg_cpu[i].start_addr,
+ crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
+ crc->sg_cpu[i].x_modify);
+ i++;
+ }
+ }
+
+ dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
+ /* For final update req, append the buffer for next update as well*/
+ if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
+ ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
+ crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
+ CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
+ crc->sg_cpu[i].cfg = dma_config;
+ crc->sg_cpu[i].x_count = 1;
+ crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
+ dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
+ "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
+ i, crc->sg_cpu[i].start_addr,
+ crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
+ crc->sg_cpu[i].x_modify);
+ i++;
+ }
+
+ if (i == 0)
+ return;
+
+ flush_dcache_range((unsigned int)crc->sg_cpu,
+ (unsigned int)crc->sg_cpu +
+ i * sizeof(struct dma_desc_array));
+
+ /* Set the last descriptor to stop mode */
+ crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
+ crc->sg_cpu[i - 1].cfg |= DI_EN;
+ set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
+ set_dma_x_count(crc->dma_ch, 0);
+ set_dma_x_modify(crc->dma_ch, 0);
+ SSYNC();
+ set_dma_config(crc->dma_ch, dma_config);
+}
+
+static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct bfin_crypto_crc_reqctx *ctx;
+ struct scatterlist *sg;
+ int ret = 0;
+ int nsg, i, j;
+ unsigned int nextlen;
+ unsigned long flags;
+
+ spin_lock_irqsave(&crc->lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&crc->queue, req);
+ if (crc->busy) {
+ spin_unlock_irqrestore(&crc->lock, flags);
+ return ret;
+ }
+ backlog = crypto_get_backlog(&crc->queue);
+ async_req = crypto_dequeue_request(&crc->queue);
+ if (async_req)
+ crc->busy = 1;
+ spin_unlock_irqrestore(&crc->lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+ crc->req = req;
+ ctx = ahash_request_ctx(req);
+ ctx->sg = NULL;
+ ctx->sg_buflen = 0;
+ ctx->sg_nents = 0;
+
+ dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
+ ctx->flag, req->nbytes);
+
+ if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
+ if (ctx->bufnext_len == 0) {
+ crc->busy = 0;
+ return 0;
+ }
+
+ /* Pack last crc update buffer to 32bit */
+ memset(ctx->bufnext + ctx->bufnext_len, 0,
+ CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
+ } else {
+ /* Pack small data which is less than 32bit to buffer for next update. */
+ if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
+ memcpy(ctx->bufnext + ctx->bufnext_len,
+ sg_virt(req->src), req->nbytes);
+ ctx->bufnext_len += req->nbytes;
+ if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
+ ctx->bufnext_len) {
+ goto finish_update;
+ } else {
+ crc->busy = 0;
+ return 0;
+ }
+ }
+
+ if (ctx->bufnext_len) {
+ /* Chain in extra bytes of last update */
+ ctx->buflast_len = ctx->bufnext_len;
+ memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
+
+ nsg = ctx->sg_buflen ? 2 : 1;
+ sg_init_table(ctx->bufsl, nsg);
+ sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
+ if (nsg > 1)
+ scatterwalk_sg_chain(ctx->bufsl, nsg,
+ req->src);
+ ctx->sg = ctx->bufsl;
+ } else
+ ctx->sg = req->src;
+
+ /* Chop crc buffer size to multiple of 32 bit */
+ nsg = ctx->sg_nents = sg_count(ctx->sg);
+ ctx->sg_buflen = ctx->buflast_len + req->nbytes;
+ ctx->bufnext_len = ctx->sg_buflen % 4;
+ ctx->sg_buflen &= ~0x3;
+
+ if (ctx->bufnext_len) {
+ /* copy extra bytes to buffer for next update */
+ memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
+ nextlen = ctx->bufnext_len;
+ for (i = nsg - 1; i >= 0; i--) {
+ sg = sg_get(ctx->sg, nsg, i);
+ j = min(nextlen, sg_dma_len(sg));
+ memcpy(ctx->bufnext + nextlen - j,
+ sg_virt(sg) + sg_dma_len(sg) - j, j);
+ if (j == sg_dma_len(sg))
+ ctx->sg_nents--;
+ nextlen -= j;
+ if (nextlen == 0)
+ break;
+ }
+ }
+ }
+
+finish_update:
+ if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
+ ctx->flag == CRC_CRYPTO_STATE_FINISH))
+ ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
+
+ /* set CRC data count before start DMA */
+ crc->regs->datacnt = ctx->sg_buflen >> 2;
+
+ /* setup and enable CRC DMA */
+ bfin_crypto_crc_config_dma(crc);
+
+ /* finally kick off CRC operation */
+ crc->regs->control |= BLKEN;
+ SSYNC();
+
+ return -EINPROGRESS;
+}
+
+static int bfin_crypto_crc_update(struct ahash_request *req)
+{
+ struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ dev_dbg(ctx->crc->dev, "crc_update\n");
+ ctx->total += req->nbytes;
+ ctx->flag = CRC_CRYPTO_STATE_UPDATE;
+
+ return bfin_crypto_crc_handle_queue(ctx->crc, req);
+}
+
+static int bfin_crypto_crc_final(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
+ struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
+
+ dev_dbg(ctx->crc->dev, "crc_final\n");
+ ctx->flag = CRC_CRYPTO_STATE_FINISH;
+ crc_ctx->key = 0;
+
+ return bfin_crypto_crc_handle_queue(ctx->crc, req);
+}
+
+static int bfin_crypto_crc_finup(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
+ struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
+
+ dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
+ ctx->total += req->nbytes;
+ ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
+ crc_ctx->key = 0;
+
+ return bfin_crypto_crc_handle_queue(ctx->crc, req);
+}
+
+static int bfin_crypto_crc_digest(struct ahash_request *req)
+{
+ int ret;
+
+ ret = bfin_crypto_crc_init(req);
+ if (ret)
+ return ret;
+
+ return bfin_crypto_crc_finup(req);
+}
+
+static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
+
+ dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
+ if (keylen != CHKSUM_DIGEST_SIZE) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ crc_ctx->key = get_unaligned_le32(key);
+
+ return 0;
+}
+
+static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
+{
+ struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
+
+ crc_ctx->key = 0;
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct bfin_crypto_crc_reqctx));
+
+ return 0;
+}
+
+static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct ahash_alg algs = {
+ .init = bfin_crypto_crc_init,
+ .update = bfin_crypto_crc_update,
+ .final = bfin_crypto_crc_final,
+ .finup = bfin_crypto_crc_finup,
+ .digest = bfin_crypto_crc_digest,
+ .setkey = bfin_crypto_crc_setkey,
+ .halg.digestsize = CHKSUM_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(crc32)",
+ .cra_driver_name = DRIVER_NAME,
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = bfin_crypto_crc_cra_init,
+ .cra_exit = bfin_crypto_crc_cra_exit,
+ }
+};
+
+static void bfin_crypto_crc_done_task(unsigned long data)
+{
+ struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
+
+ bfin_crypto_crc_handle_queue(crc, NULL);
+}
+
+static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
+{
+ struct bfin_crypto_crc *crc = dev_id;
+
+ if (crc->regs->status & DCNTEXP) {
+ crc->regs->status = DCNTEXP;
+ SSYNC();
+
+ /* prepare results */
+ put_unaligned_le32(crc->regs->result, crc->req->result);
+
+ crc->regs->control &= ~BLKEN;
+ crc->busy = 0;
+
+ if (crc->req->base.complete)
+ crc->req->base.complete(&crc->req->base, 0);
+
+ tasklet_schedule(&crc->done_task);
+
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+#ifdef CONFIG_PM
+/**
+ * bfin_crypto_crc_suspend - suspend crc device
+ * @pdev: device being suspended
+ * @state: requested suspend state
+ */
+static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
+ int i = 100000;
+
+ while ((crc->regs->control & BLKEN) && --i)
+ cpu_relax();
+
+ if (i == 0)
+ return -EBUSY;
+
+ return 0;
+}
+#else
+# define bfin_crypto_crc_suspend NULL
+#endif
+
+#define bfin_crypto_crc_resume NULL
+
+/**
+ * bfin_crypto_crc_probe - Initialize module
+ *
+ */
+static int __devinit bfin_crypto_crc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct bfin_crypto_crc *crc;
+ unsigned int timeout = 100000;
+ int ret;
+
+ crc = kzalloc(sizeof(*crc), GFP_KERNEL);
+ if (!crc) {
+ dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
+ return -ENOMEM;
+ }
+
+ crc->dev = dev;
+
+ INIT_LIST_HEAD(&crc->list);
+ spin_lock_init(&crc->lock);
+ tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
+ crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_mem;
+ }
+
+ crc->regs = ioremap(res->start, resource_size(res));
+ if (!crc->regs) {
+ dev_err(&pdev->dev, "Cannot map CRC IO\n");
+ ret = -ENXIO;
+ goto out_error_free_mem;
+ }
+
+ crc->irq = platform_get_irq(pdev, 0);
+ if (crc->irq < 0) {
+ dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ ret = request_irq(crc->irq, bfin_crypto_crc_handler, IRQF_SHARED, dev_name(dev), crc);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
+ goto out_error_unmap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No CRC DMA channel specified\n");
+ ret = -ENOENT;
+ goto out_error_irq;
+ }
+ crc->dma_ch = res->start;
+
+ ret = request_dma(crc->dma_ch, dev_name(dev));
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
+ goto out_error_irq;
+ }
+
+ crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
+ if (crc->sg_cpu == NULL) {
+ ret = -ENOMEM;
+ goto out_error_dma;
+ }
+ /*
+ * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
+ * 1 last + 1 next dma descriptors
+ */
+ crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
+
+ crc->regs->control = 0;
+ SSYNC();
+ crc->regs->poly = crc->poly = (u32)pdev->dev.platform_data;
+ SSYNC();
+
+ while (!(crc->regs->status & LUTDONE) && (--timeout) > 0)
+ cpu_relax();
+
+ if (timeout == 0)
+ dev_info(&pdev->dev, "init crc poly timeout\n");
+
+ spin_lock(&crc_list.lock);
+ list_add(&crc->list, &crc_list.dev_list);
+ spin_unlock(&crc_list.lock);
+
+ platform_set_drvdata(pdev, crc);
+
+ ret = crypto_register_ahash(&algs);
+ if (ret) {
+ spin_lock(&crc_list.lock);
+ list_del(&crc->list);
+ spin_unlock(&crc_list.lock);
+ dev_err(&pdev->dev, "Cann't register crypto ahash device\n");
+ goto out_error_dma;
+ }
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+
+out_error_dma:
+ if (crc->sg_cpu)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
+ free_dma(crc->dma_ch);
+out_error_irq:
+ free_irq(crc->irq, crc->dev);
+out_error_unmap:
+ iounmap((void *)crc->regs);
+out_error_free_mem:
+ kfree(crc);
+
+ return ret;
+}
+
+/**
+ * bfin_crypto_crc_remove - Initialize module
+ *
+ */
+static int __devexit bfin_crypto_crc_remove(struct platform_device *pdev)
+{
+ struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
+
+ if (!crc)
+ return -ENODEV;
+
+ spin_lock(&crc_list.lock);
+ list_del(&crc->list);
+ spin_unlock(&crc_list.lock);
+
+ crypto_unregister_ahash(&algs);
+ tasklet_kill(&crc->done_task);
+ iounmap((void *)crc->regs);
+ free_dma(crc->dma_ch);
+ if (crc->irq > 0)
+ free_irq(crc->irq, crc->dev);
+ kfree(crc);
+
+ return 0;
+}
+
+static struct platform_driver bfin_crypto_crc_driver = {
+ .probe = bfin_crypto_crc_probe,
+ .remove = __devexit_p(bfin_crypto_crc_remove),
+ .suspend = bfin_crypto_crc_suspend,
+ .resume = bfin_crypto_crc_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/**
+ * bfin_crypto_crc_mod_init - Initialize module
+ *
+ * Checks the module params and registers the platform driver.
+ * Real work is in the platform probe function.
+ */
+static int __init bfin_crypto_crc_mod_init(void)
+{
+ int ret;
+
+ pr_info("Blackfin hardware CRC crypto driver\n");
+
+ INIT_LIST_HEAD(&crc_list.dev_list);
+ spin_lock_init(&crc_list.lock);
+
+ ret = platform_driver_register(&bfin_crypto_crc_driver);
+ if (ret) {
+ pr_info(KERN_ERR "unable to register driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * bfin_crypto_crc_mod_exit - Deinitialize module
+ */
+static void __exit bfin_crypto_crc_mod_exit(void)
+{
+ platform_driver_unregister(&bfin_crypto_crc_driver);
+}
+
+module_init(bfin_crypto_crc_mod_init);
+module_exit(bfin_crypto_crc_mod_exit);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 2d876bb98ff4..65c7668614ab 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -32,10 +32,13 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
depends on CRYPTO_DEV_FSL_CAAM
- default y
+ default n
help
Enable the Job Ring's interrupt coalescing feature.
+ Note: the driver already provides adequate
+ interrupt coalescing in software.
+
config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
int "Job Ring interrupt coalescing count threshold"
depends on CRYPTO_DEV_FSL_CAAM_INTC
@@ -70,3 +73,28 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
To compile this as a module, choose M here: the module
will be called caamalg.
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API
+ tristate "Register hash algorithm implementations with Crypto API"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ select CRYPTO_AHASH
+ help
+ Selecting this will offload ahash for users of the
+ scatterlist crypto API to the SEC4 via job ring.
+
+ To compile this as a module, choose M here: the module
+ will be called caamhash.
+
+config CRYPTO_DEV_FSL_CAAM_RNG_API
+ tristate "Register caam device for hwrng API"
+ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ select CRYPTO_RNG
+ select HW_RANDOM
+ help
+ Selecting this will register the SEC4 hardware rng to
+ the hw_random API for suppying the kernel entropy pool.
+
+ To compile this as a module, choose M here: the module
+ will be called caamrng.
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index ef39011b4505..b1eb44838db5 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -4,5 +4,7 @@
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
-caam-objs := ctrl.o jr.o error.o
+caam-objs := ctrl.o jr.o error.o key_gen.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4eec389184d3..0c1ea8492eff 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -37,9 +37,10 @@
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | (output buffer) |
+ * | (output length) |
* | SEQ_IN_PTR |
* | (input buffer) |
- * | LOAD (to DECO) |
+ * | (input length) |
* ---------------------
*/
@@ -50,6 +51,8 @@
#include "desc_constr.h"
#include "jr.h"
#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
/*
* crypto alg
@@ -62,7 +65,7 @@
#define CAAM_MAX_IV_LENGTH 16
/* length of descriptors text */
-#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
@@ -143,11 +146,11 @@ static inline void aead_append_ld_iv(u32 *desc, int ivsize)
*/
static inline void ablkcipher_append_src_dst(u32 *desc)
{
- append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
- KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
/*
@@ -452,121 +455,12 @@ static int aead_setauthsize(struct crypto_aead *authenc,
return 0;
}
-struct split_key_result {
- struct completion completion;
- int err;
-};
-
-static void split_key_done(struct device *dev, u32 *desc, u32 err,
- void *context)
+static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
+ u32 authkeylen)
{
- struct split_key_result *res = context;
-
-#ifdef DEBUG
- dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
- if (err) {
- char tmp[CAAM_ERROR_STR_MAX];
-
- dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
- }
-
- res->err = err;
-
- complete(&res->completion);
-}
-
-/*
-get a split ipad/opad key
-
-Split key generation-----------------------------------------------
-
-[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
-[01] 0x04000014 key: class2->keyreg len=20
- @0xffe01000
-[03] 0x84410014 operation: cls2-op sha1 hmac init dec
-[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
-[05] 0xa4000001 jump: class2 local all ->1 [06]
-[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
- @0xffe04000
-*/
-static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
-{
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- struct split_key_result result;
- dma_addr_t dma_addr_in, dma_addr_out;
- int ret = 0;
-
- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
-
- init_job_desc(desc, 0);
-
- dma_addr_in = dma_map_single(jrdev, (void *)key_in, authkeylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_in)) {
- dev_err(jrdev, "unable to map key input memory\n");
- kfree(desc);
- return -ENOMEM;
- }
- append_key(desc, dma_addr_in, authkeylen, CLASS_2 |
- KEY_DEST_CLASS_REG);
-
- /* Sets MDHA up into an HMAC-INIT */
- append_operation(desc, ctx->alg_op | OP_ALG_DECRYPT |
- OP_ALG_AS_INIT);
-
- /*
- * do a FIFO_LOAD of zero, this will trigger the internal key expansion
- into both pads inside MDHA
- */
- append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
-
- /*
- * FIFO_STORE with the explicit split-key content store
- * (0x26 output type)
- */
- dma_addr_out = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_out)) {
- dev_err(jrdev, "unable to map key output memory\n");
- kfree(desc);
- return -ENOMEM;
- }
- append_fifo_store(desc, dma_addr_out, ctx->split_key_len,
- LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, authkeylen, 1);
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
-
- result.err = 0;
- init_completion(&result.completion);
-
- ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
- if (!ret) {
- /* in progress */
- wait_for_completion_interruptible(&result.completion);
- ret = result.err;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
-#endif
- }
-
- dma_unmap_single(jrdev, dma_addr_out, ctx->split_key_pad_len,
- DMA_FROM_DEVICE);
- dma_unmap_single(jrdev, dma_addr_in, authkeylen, DMA_TO_DEVICE);
-
- kfree(desc);
-
- return ret;
+ return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+ ctx->split_key_pad_len, key_in, authkeylen,
+ ctx->alg_op);
}
static int aead_setkey(struct crypto_aead *aead,
@@ -610,7 +504,7 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ret = gen_split_key(ctx, key, authkeylen);
+ ret = gen_split_aead_key(ctx, key, authkeylen);
if (ret) {
goto badkey;
}
@@ -757,72 +651,78 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
}
-struct link_tbl_entry {
- u64 ptr;
- u32 len;
- u8 reserved;
- u8 buf_pool_id;
- u16 offset;
-};
-
/*
* aead_edesc - s/w-extended aead descriptor
* @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @assoc_chained: if source is chained
* @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
* @dst_nents: number of segments in output scatterlist
+ * @dst_chained: if destination is chained
* @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
- * @link_tbl_bytes: length of dma mapped link_tbl space
- * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct aead_edesc {
int assoc_nents;
+ bool assoc_chained;
int src_nents;
+ bool src_chained;
int dst_nents;
+ bool dst_chained;
dma_addr_t iv_dma;
- int link_tbl_bytes;
- dma_addr_t link_tbl_dma;
- struct link_tbl_entry *link_tbl;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
u32 hw_desc[0];
};
/*
* ablkcipher_edesc - s/w-extended ablkcipher descriptor
* @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
* @dst_nents: number of segments in output scatterlist
+ * @dst_chained: if destination is chained
* @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
- * @link_tbl_bytes: length of dma mapped link_tbl space
- * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
int src_nents;
+ bool src_chained;
int dst_nents;
+ bool dst_chained;
dma_addr_t iv_dma;
- int link_tbl_bytes;
- dma_addr_t link_tbl_dma;
- struct link_tbl_entry *link_tbl;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
u32 hw_desc[0];
};
static void caam_unmap(struct device *dev, struct scatterlist *src,
- struct scatterlist *dst, int src_nents, int dst_nents,
- dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
- int link_tbl_bytes)
+ struct scatterlist *dst, int src_nents,
+ bool src_chained, int dst_nents, bool dst_chained,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+ int sec4_sg_bytes)
{
- if (unlikely(dst != src)) {
- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ if (dst != src) {
+ dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
+ src_chained);
+ dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
+ dst_chained);
} else {
- dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sg_chained(dev, src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
}
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
- if (link_tbl_bytes)
- dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
+ if (sec4_sg_bytes)
+ dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
}
@@ -833,12 +733,13 @@ static void aead_unmap(struct device *dev,
struct crypto_aead *aead = crypto_aead_reqtfm(req);
int ivsize = crypto_aead_ivsize(aead);
- dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+ dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
+ DMA_TO_DEVICE, edesc->assoc_chained);
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->link_tbl_dma,
- edesc->link_tbl_bytes);
+ edesc->src_nents, edesc->src_chained, edesc->dst_nents,
+ edesc->dst_chained, edesc->iv_dma, ivsize,
+ edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
static void ablkcipher_unmap(struct device *dev,
@@ -849,9 +750,9 @@ static void ablkcipher_unmap(struct device *dev,
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->link_tbl_dma,
- edesc->link_tbl_bytes);
+ edesc->src_nents, edesc->src_chained, edesc->dst_nents,
+ edesc->dst_chained, edesc->iv_dma, ivsize,
+ edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
@@ -942,7 +843,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
sizeof(struct iphdr) + req->assoclen +
((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
ctx->authsize + 36, 1);
- if (!err && edesc->link_tbl_bytes) {
+ if (!err && edesc->sec4_sg_bytes) {
struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
@@ -1026,50 +927,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
ablkcipher_request_complete(req, err);
}
-static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
- dma_addr_t dma, u32 len, u32 offset)
-{
- link_tbl_ptr->ptr = dma;
- link_tbl_ptr->len = len;
- link_tbl_ptr->reserved = 0;
- link_tbl_ptr->buf_pool_id = 0;
- link_tbl_ptr->offset = offset;
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
- sizeof(struct link_tbl_entry), 1);
-#endif
-}
-
-/*
- * convert scatterlist to h/w link table format
- * but does not have final bit; instead, returns last entry
- */
-static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
- int sg_count, struct link_tbl_entry
- *link_tbl_ptr, u32 offset)
-{
- while (sg_count) {
- sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
- sg_dma_len(sg), offset);
- link_tbl_ptr++;
- sg = sg_next(sg);
- sg_count--;
- }
- return link_tbl_ptr - 1;
-}
-
-/*
- * convert scatterlist to h/w link table format
- * scatterlist must have been previously dma mapped
- */
-static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
- struct link_tbl_entry *link_tbl_ptr, u32 offset)
-{
- link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
- link_tbl_ptr->len |= 0x40000000;
-}
-
/*
* Fill in aead job descriptor
*/
@@ -1085,7 +942,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
u32 *desc = edesc->hw_desc;
u32 out_options = 0, in_options;
dma_addr_t dst_dma, src_dma;
- int len, link_tbl_index = 0;
+ int len, sec4_sg_index = 0;
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1111,9 +968,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
src_dma = sg_dma_address(req->assoc);
in_options = 0;
} else {
- src_dma = edesc->link_tbl_dma;
- link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
- (edesc->src_nents ? : 1);
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
+ (edesc->src_nents ? : 1);
in_options = LDST_SGF;
}
if (encrypt)
@@ -1127,7 +984,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
if (all_contig) {
dst_dma = sg_dma_address(req->src);
} else {
- dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
((edesc->assoc_nents ? : 1) + 1);
out_options = LDST_SGF;
}
@@ -1135,9 +992,9 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
if (!edesc->dst_nents) {
dst_dma = sg_dma_address(req->dst);
} else {
- dst_dma = edesc->link_tbl_dma +
- link_tbl_index *
- sizeof(struct link_tbl_entry);
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
out_options = LDST_SGF;
}
}
@@ -1163,7 +1020,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
u32 *desc = edesc->hw_desc;
u32 out_options = 0, in_options;
dma_addr_t dst_dma, src_dma;
- int len, link_tbl_index = 0;
+ int len, sec4_sg_index = 0;
#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1188,8 +1045,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
src_dma = sg_dma_address(req->assoc);
in_options = 0;
} else {
- src_dma = edesc->link_tbl_dma;
- link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
in_options = LDST_SGF;
}
append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
@@ -1199,13 +1056,13 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
dst_dma = edesc->iv_dma;
} else {
if (likely(req->src == req->dst)) {
- dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
edesc->assoc_nents;
out_options = LDST_SGF;
} else {
- dst_dma = edesc->link_tbl_dma +
- link_tbl_index *
- sizeof(struct link_tbl_entry);
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
out_options = LDST_SGF;
}
}
@@ -1226,7 +1083,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
u32 *desc = edesc->hw_desc;
u32 out_options = 0, in_options;
dma_addr_t dst_dma, src_dma;
- int len, link_tbl_index = 0;
+ int len, sec4_sg_index = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
@@ -1244,8 +1101,8 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
src_dma = edesc->iv_dma;
in_options = 0;
} else {
- src_dma = edesc->link_tbl_dma;
- link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
in_options = LDST_SGF;
}
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
@@ -1254,16 +1111,16 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
if (!edesc->src_nents && iv_contig) {
dst_dma = sg_dma_address(req->src);
} else {
- dst_dma = edesc->link_tbl_dma +
- sizeof(struct link_tbl_entry);
+ dst_dma = edesc->sec4_sg_dma +
+ sizeof(struct sec4_sg_entry);
out_options = LDST_SGF;
}
} else {
if (!edesc->dst_nents) {
dst_dma = sg_dma_address(req->dst);
} else {
- dst_dma = edesc->link_tbl_dma +
- link_tbl_index * sizeof(struct link_tbl_entry);
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
out_options = LDST_SGF;
}
}
@@ -1271,28 +1128,6 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
}
/*
- * derive number of elements in scatterlist
- */
-static int sg_count(struct scatterlist *sg_list, int nbytes)
-{
- struct scatterlist *sg = sg_list;
- int sg_nents = 0;
-
- while (nbytes > 0) {
- sg_nents++;
- nbytes -= sg->length;
- if (!sg_is_last(sg) && (sg + 1)->length == 0)
- BUG(); /* Not support chaining */
- sg = scatterwalk_sg_next(sg);
- }
-
- if (likely(sg_nents == 1))
- return 0;
-
- return sg_nents;
-}
-
-/*
* allocate and map the aead extended descriptor
*/
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -1308,25 +1143,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_addr_t iv_dma = 0;
int sgc;
bool all_contig = true;
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
int ivsize = crypto_aead_ivsize(aead);
- int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
- assoc_nents = sg_count(req->assoc, req->assoclen);
- src_nents = sg_count(req->src, req->cryptlen);
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen);
+ dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
- sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL);
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL, assoc_chained);
if (likely(req->src == req->dst)) {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
} else {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE);
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
}
/* Check if data are contiguous */
@@ -1337,50 +1173,53 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
all_contig = false;
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
- link_tbl_len = assoc_nents + 1 + src_nents;
+ sec4_sg_len = assoc_nents + 1 + src_nents;
}
- link_tbl_len += dst_nents;
+ sec4_sg_len += dst_nents;
- link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- link_tbl_bytes, GFP_DMA | flags);
+ sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->assoc_nents = assoc_nents;
+ edesc->assoc_chained = assoc_chained;
edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
edesc->iv_dma = iv_dma;
- edesc->link_tbl_bytes = link_tbl_bytes;
- edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
- desc_bytes;
- edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
- link_tbl_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
*all_contig_ptr = all_contig;
- link_tbl_index = 0;
+ sec4_sg_index = 0;
if (!all_contig) {
- sg_to_link_tbl(req->assoc,
- (assoc_nents ? : 1),
- edesc->link_tbl +
- link_tbl_index, 0);
- link_tbl_index += assoc_nents ? : 1;
- sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ sg_to_sec4_sg(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents ? : 1;
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
- link_tbl_index += 1;
- sg_to_link_tbl_last(req->src,
- (src_nents ? : 1),
- edesc->link_tbl +
- link_tbl_index, 0);
- link_tbl_index += src_nents ? : 1;
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->src,
+ (src_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += src_nents ? : 1;
}
if (dst_nents) {
- sg_to_link_tbl_last(req->dst, dst_nents,
- edesc->link_tbl + link_tbl_index, 0);
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
}
return edesc;
@@ -1487,24 +1326,25 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
int sgc;
u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
int ivsize = crypto_aead_ivsize(aead);
- int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
- assoc_nents = sg_count(req->assoc, req->assoclen);
- src_nents = sg_count(req->src, req->cryptlen);
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->cryptlen);
+ dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
- sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
- DMA_BIDIRECTIONAL);
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL, assoc_chained);
if (likely(req->src == req->dst)) {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
} else {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE);
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
}
/* Check if data are contiguous */
@@ -1516,58 +1356,61 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
contig &= ~GIV_DST_CONTIG;
if (unlikely(req->src != req->dst)) {
dst_nents = dst_nents ? : 1;
- link_tbl_len += 1;
+ sec4_sg_len += 1;
}
if (!(contig & GIV_SRC_CONTIG)) {
assoc_nents = assoc_nents ? : 1;
src_nents = src_nents ? : 1;
- link_tbl_len += assoc_nents + 1 + src_nents;
+ sec4_sg_len += assoc_nents + 1 + src_nents;
if (likely(req->src == req->dst))
contig &= ~GIV_DST_CONTIG;
}
- link_tbl_len += dst_nents;
+ sec4_sg_len += dst_nents;
- link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
- link_tbl_bytes, GFP_DMA | flags);
+ sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->assoc_nents = assoc_nents;
+ edesc->assoc_chained = assoc_chained;
edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
edesc->iv_dma = iv_dma;
- edesc->link_tbl_bytes = link_tbl_bytes;
- edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
- desc_bytes;
- edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
- link_tbl_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
*contig_ptr = contig;
- link_tbl_index = 0;
+ sec4_sg_index = 0;
if (!(contig & GIV_SRC_CONTIG)) {
- sg_to_link_tbl(req->assoc, assoc_nents,
- edesc->link_tbl +
- link_tbl_index, 0);
- link_tbl_index += assoc_nents;
- sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ sg_to_sec4_sg(req->assoc, assoc_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += assoc_nents;
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
- link_tbl_index += 1;
- sg_to_link_tbl_last(req->src, src_nents,
- edesc->link_tbl +
- link_tbl_index, 0);
- link_tbl_index += src_nents;
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += src_nents;
}
if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
- sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
iv_dma, ivsize, 0);
- link_tbl_index += 1;
- sg_to_link_tbl_last(req->dst, dst_nents,
- edesc->link_tbl + link_tbl_index, 0);
+ sec4_sg_index += 1;
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
}
return edesc;
@@ -1633,27 +1476,28 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
GFP_KERNEL : GFP_ATOMIC;
- int src_nents, dst_nents = 0, link_tbl_bytes;
+ int src_nents, dst_nents = 0, sec4_sg_bytes;
struct ablkcipher_edesc *edesc;
dma_addr_t iv_dma = 0;
bool iv_contig = false;
int sgc;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int link_tbl_index;
+ bool src_chained = false, dst_chained = false;
+ int sec4_sg_index;
- src_nents = sg_count(req->src, req->nbytes);
+ src_nents = sg_count(req->src, req->nbytes, &src_chained);
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->nbytes);
+ if (req->dst != req->src)
+ dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
if (likely(req->src == req->dst)) {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_BIDIRECTIONAL);
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
} else {
- sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
- DMA_TO_DEVICE);
- sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
- DMA_FROM_DEVICE);
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
}
/*
@@ -1665,44 +1509,46 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
iv_contig = true;
else
src_nents = src_nents ? : 1;
- link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
- sizeof(struct link_tbl_entry);
+ sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
- link_tbl_bytes, GFP_DMA | flags);
+ sec4_sg_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
edesc->dst_nents = dst_nents;
- edesc->link_tbl_bytes = link_tbl_bytes;
- edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
- desc_bytes;
+ edesc->dst_chained = dst_chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
- link_tbl_index = 0;
+ sec4_sg_index = 0;
if (!iv_contig) {
- sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
- sg_to_link_tbl_last(req->src, src_nents,
- edesc->link_tbl + 1, 0);
- link_tbl_index += 1 + src_nents;
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg + 1, 0);
+ sec4_sg_index += 1 + src_nents;
}
- if (unlikely(dst_nents)) {
- sg_to_link_tbl_last(req->dst, dst_nents,
- edesc->link_tbl + link_tbl_index, 0);
+ if (dst_nents) {
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
}
- edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
- link_tbl_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
edesc->iv_dma = iv_dma;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
- link_tbl_bytes, 1);
+ print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
#endif
*iv_contig_out = iv_contig;
@@ -2227,7 +2073,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
* distribute tfms across job rings to ensure in-order
* crypto request processing per tfm
*/
- ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
+ ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
/* copy descriptor header template value */
ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
@@ -2264,7 +2110,6 @@ static void __exit caam_algapi_exit(void)
struct device *ctrldev;
struct caam_drv_private *priv;
struct caam_crypto_alg *t_alg, *n;
- int i, err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -2289,13 +2134,6 @@ static void __exit caam_algapi_exit(void)
list_del(&t_alg->entry);
kfree(t_alg);
}
-
- for (i = 0; i < priv->total_jobrs; i++) {
- err = caam_jr_deregister(priv->algapi_jr[i]);
- if (err < 0)
- break;
- }
- kfree(priv->algapi_jr);
}
static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
@@ -2348,7 +2186,7 @@ static int __init caam_algapi_init(void)
{
struct device_node *dev_node;
struct platform_device *pdev;
- struct device *ctrldev, **jrdev;
+ struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
@@ -2369,24 +2207,6 @@ static int __init caam_algapi_init(void)
INIT_LIST_HEAD(&priv->alg_list);
- jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_KERNEL);
- if (!jrdev)
- return -ENOMEM;
-
- for (i = 0; i < priv->total_jobrs; i++) {
- err = caam_jr_register(ctrldev, &jrdev[i]);
- if (err < 0)
- break;
- }
- if (err < 0 && i == 0) {
- dev_err(ctrldev, "algapi error in job ring registration: %d\n",
- err);
- kfree(jrdev);
- return err;
- }
-
- priv->num_jrs_for_algapi = i;
- priv->algapi_jr = jrdev;
atomic_set(&priv->tfm_count, -1);
/* register crypto algorithms the device supports */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
new file mode 100644
index 000000000000..895aaf2bca92
--- /dev/null
+++ b/drivers/crypto/caam/caamhash.c
@@ -0,0 +1,1878 @@
+/*
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship of digest job descriptor or first job descriptor after init to
+ * shared descriptors:
+ *
+ * --------------- ---------------
+ * | JobDesc #1 |-------------------->| ShareDesc |
+ * | *(packet 1) | | (hashKey) |
+ * --------------- | (operation) |
+ * ---------------
+ *
+ * relationship of subsequent job descriptors to shared descriptors:
+ *
+ * --------------- ---------------
+ * | JobDesc #2 |-------------------->| ShareDesc |
+ * | *(packet 2) | |------------->| (hashKey) |
+ * --------------- | |-------->| (operation) |
+ * . | | | (load ctx2) |
+ * . | | ---------------
+ * --------------- | |
+ * | JobDesc #3 |------| |
+ * | *(packet 3) | |
+ * --------------- |
+ * . |
+ * . |
+ * --------------- |
+ * | JobDesc #4 |------------
+ * | *(packet 4) |
+ * ---------------
+ *
+ * The SharedDesc never changes for a connection unless rekeyed, but
+ * each packet will likely be in a different place. So all we need
+ * to know to process the packet is where the input is, where the
+ * output goes, and what context we want to process with. Context is
+ * in the SharedDesc, packet references in the JobDesc.
+ *
+ * So, a job desc looks like:
+ *
+ * ---------------------
+ * | Header |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR |
+ * | (output buffer) |
+ * | (output length) |
+ * | SEQ_IN_PTR |
+ * | (input buffer) |
+ * | (input length) |
+ * ---------------------
+ */
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "key_gen.h"
+
+#define CAAM_CRA_PRIORITY 3000
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* length of descriptors text */
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
+ CAAM_MAX_HASH_KEY_SIZE)
+#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+#ifdef DEBUG
+/* for print_hex_dumps with line references */
+#define xstr(s) str(s)
+#define str(s) #s
+#define debug(format, arg...) printk(format, arg)
+#else
+#define debug(format, arg...)
+#endif
+
+/* ahash per-session context */
+struct caam_hash_ctx {
+ struct device *jrdev;
+ u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
+ u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
+ dma_addr_t sh_desc_update_dma;
+ dma_addr_t sh_desc_update_first_dma;
+ dma_addr_t sh_desc_fin_dma;
+ dma_addr_t sh_desc_digest_dma;
+ dma_addr_t sh_desc_finup_dma;
+ u32 alg_type;
+ u32 alg_op;
+ u8 key[CAAM_MAX_HASH_KEY_SIZE];
+ dma_addr_t key_dma;
+ int ctx_len;
+ unsigned int split_key_len;
+ unsigned int split_key_pad_len;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN];
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+/* Common job descriptor seq in/out ptr routines */
+
+/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
+static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state,
+ int ctx_len)
+{
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
+ ctx_len, DMA_FROM_DEVICE);
+ append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+}
+
+/* Map req->result, and append seq_out_ptr command that points to it */
+static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
+ u8 *result, int digestsize)
+{
+ dma_addr_t dst_dma;
+
+ dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
+ append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+
+ return dst_dma;
+}
+
+/* Map current buffer in state and put it in link table */
+static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
+ struct sec4_sg_entry *sec4_sg,
+ u8 *buf, int buflen)
+{
+ dma_addr_t buf_dma;
+
+ buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+ dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
+
+ return buf_dma;
+}
+
+/* Map req->src and put it in link table */
+static inline void src_map_to_sec4_sg(struct device *jrdev,
+ struct scatterlist *src, int src_nents,
+ struct sec4_sg_entry *sec4_sg,
+ bool chained)
+{
+ dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained);
+ sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
+}
+
+/*
+ * Only put buffer in link table if it contains data, which is possible,
+ * since a buffer has previously been used, and needs to be unmapped,
+ */
+static inline dma_addr_t
+try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
+ u8 *buf, dma_addr_t buf_dma, int buflen,
+ int last_buflen)
+{
+ if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
+ dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
+ if (buflen)
+ buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
+ else
+ buf_dma = 0;
+
+ return buf_dma;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+ struct caam_hash_state *state,
+ int ctx_len,
+ struct sec4_sg_entry *sec4_sg,
+ u32 flag)
+{
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+ dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+}
+
+/* Common shared descriptor commands */
+static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+}
+
+/* Append key if it has been set */
+static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ if (ctx->split_key_len) {
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_ahash(desc, ctx);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+ }
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For ahash read data from seqin following state->caam_ctx,
+ * and write resulting class2 context to seqout, which may be state->caam_ctx
+ * or req->result
+ */
+static inline void ahash_append_load_str(u32 *desc, int digestsize)
+{
+ /* Calculate remaining bytes to read */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+
+/*
+ * For ahash update, final and finup, import context, read and write to seqout
+ */
+static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize,
+ struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_ahash(desc, ctx);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_2_CCB | ctx->ctx_len);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ ahash_append_load_str(desc, digestsize);
+}
+
+/* For ahash firsts and digest, read and write to seqout */
+static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize, struct caam_hash_ctx *ctx)
+{
+ init_sh_desc_key_ahash(desc, ctx);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ */
+ ahash_append_load_str(desc, digestsize);
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct device *jrdev = ctx->jrdev;
+ u32 have_key = 0;
+ u32 *desc;
+
+ if (ctx->split_key_len)
+ have_key = OP_ALG_AAI_HMAC_PRECOMP;
+
+ /* ahash_update shared descriptor */
+ desc = ctx->sh_desc_update;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Import context from software */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_2_CCB | ctx->ctx_len);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
+ OP_ALG_ENCRYPT);
+
+ /* Load data and write to result or context */
+ ahash_append_load_str(desc, ctx->ctx_len);
+
+ ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ /* ahash_update_first shared descriptor */
+ desc = ctx->sh_desc_update_first;
+
+ ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
+ ctx->ctx_len, ctx);
+
+ ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ /* ahash_final shared descriptor */
+ desc = ctx->sh_desc_fin;
+
+ ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /* ahash_finup shared descriptor */
+ desc = ctx->sh_desc_finup;
+
+ ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+ OP_ALG_AS_FINALIZE, digestsize, ctx);
+
+ ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /* ahash_digest shared descriptor */
+ desc = ctx->sh_desc_digest;
+
+ ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
+ digestsize, ctx);
+
+ ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 keylen)
+{
+ return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+ ctx->split_key_pad_len, key_in, keylen,
+ ctx->alg_op);
+}
+
+/* Digest hash size if it is too large */
+static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+{
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t src_dma, dst_dma;
+ int ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+
+ init_job_desc(desc, 0);
+
+ src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, src_dma)) {
+ dev_err(jrdev, "unable to map key input memory\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+ dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ /* Job descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_in_ptr(desc, src_dma, *keylen, 0);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_out_ptr(desc, dst_dma, digestsize, 0);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ result.err = 0;
+ init_completion(&result.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+#endif
+ }
+ *keylen = digestsize;
+
+ dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+
+ kfree(desc);
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+ int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ret = 0;
+ u8 *hashed_key = NULL;
+
+#ifdef DEBUG
+ printk(KERN_ERR "keylen %d\n", keylen);
+#endif
+
+ if (keylen > blocksize) {
+ hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
+ GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+ digestsize);
+ if (ret)
+ goto badkey;
+ key = hashed_key;
+ }
+
+ /* Pick class 2 key length from algorithm submask */
+ ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT] * 2;
+ ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+#ifdef DEBUG
+ printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+ ctx->split_key_len, ctx->split_key_pad_len);
+ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ ret = gen_split_hash_key(ctx, key, keylen);
+ if (ret)
+ goto badkey;
+
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->split_key_pad_len, 1);
+#endif
+
+ ret = ahash_set_sh_desc(ahash);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
+ }
+
+ kfree(hashed_key);
+ return ret;
+badkey:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: physical mapped address of req->result
+ * @sec4_sg_dma: physical mapped address of h/w link table
+ * @chained: if source is chained
+ * @src_nents: number of segments in input scatterlist
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg: pointer to h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ahash_edesc {
+ dma_addr_t dst_dma;
+ dma_addr_t sec4_sg_dma;
+ bool chained;
+ int src_nents;
+ int sec4_sg_bytes;
+ struct sec4_sg_entry *sec4_sg;
+ u32 hw_desc[0];
+};
+
+static inline void ahash_unmap(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+{
+ if (edesc->src_nents)
+ dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
+ DMA_TO_DEVICE, edesc->chained);
+ if (edesc->dst_dma)
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+ if (edesc->sec4_sg_bytes)
+ dma_unmap_single(dev, edesc->sec4_sg_dma,
+ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len, u32 flag)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (state->ctx_dma)
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+#ifdef DEBUG
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ahash_request *req = context;
+ struct ahash_edesc *edesc;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+#ifdef DEBUG
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ int digestsize = crypto_ahash_digestsize(ahash);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ kfree(edesc);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+#endif
+
+ req->base.complete(&req->base, err);
+}
+
+/* submit update job descriptor */
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+ u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+ int *next_buflen = state->current_buf ? &state->buflen_0 :
+ &state->buflen_1, last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ u32 *sh_desc = ctx->sh_desc_update, *desc;
+ dma_addr_t ptr = ctx->sh_desc_update_dma;
+ int src_nents, sec4_sg_bytes, sec4_sg_src_index;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
+ edesc->sec4_sg + 1,
+ buf, state->buf_dma,
+ *buflen, last_buflen);
+
+ if (src_nents) {
+ src_map_to_sec4_sg(jrdev, req->src, src_nents,
+ edesc->sec4_sg + sec4_sg_src_index,
+ chained);
+ if (*next_buflen) {
+ sg_copy_part(next_buf, req->src, to_hash -
+ *buflen, req->nbytes);
+ state->current_buf = !state->current_buf;
+ }
+ } else {
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
+ SEC4_SG_LEN_FIN;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ to_hash, LDST_SGF);
+
+ append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_BIDIRECTIONAL);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ sg_copy(buf + *buflen, req->src, req->nbytes);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_fin, *desc;
+ dma_addr_t ptr = ctx->sh_desc_fin_dma;
+ int sec4_sg_bytes;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = 0;
+ int sh_len;
+
+ sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->src_nents = 0;
+
+ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
+ DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+ last_buflen);
+ (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+ LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_finup, *desc;
+ dma_addr_t ptr = ctx->sh_desc_finup_dma;
+ int sec4_sg_bytes, sec4_sg_src_index;
+ int src_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ src_nents = __sg_count(req->src, req->nbytes, &chained);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
+ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
+ DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+ last_buflen);
+
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
+ sec4_sg_src_index, chained);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ buflen + req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, sec4_sg_bytes;
+ dma_addr_t src_dma;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ u32 options;
+ int sh_len;
+
+ src_nents = sg_count(req->src, req->nbytes, &chained);
+ dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE,
+ chained);
+ sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
+ DESC_JOB_IO_LEN, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit ahash final if it the first job descriptor */
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = 0;
+ int sh_len;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+ edesc->src_nents = 0;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit ahash update if it the first job descriptor after update */
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
+ u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
+ int *next_buflen = state->current_buf ? &state->buflen_0 :
+ &state->buflen_1;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int sec4_sg_bytes, src_nents;
+ struct ahash_edesc *edesc;
+ u32 *desc, *sh_desc = ctx->sh_desc_update_first;
+ dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ src_nents = __sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ sec4_sg_bytes = (1 + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+ state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
+ buf, *buflen);
+ src_map_to_sec4_sg(jrdev, req->src, src_nents,
+ edesc->sec4_sg + 1, chained);
+ if (*next_buflen) {
+ sg_copy_part(next_buf, req->src, to_hash - *buflen,
+ req->nbytes);
+ state->current_buf = !state->current_buf;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
+
+ map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_TO_DEVICE);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ sg_copy(buf + *buflen, req->src, req->nbytes);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+/* submit ahash finup if it the first job descriptor after update */
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
+ int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+ int last_buflen = state->current_buf ? state->buflen_0 :
+ state->buflen_1;
+ u32 *sh_desc = ctx->sh_desc_digest, *desc;
+ dma_addr_t ptr = ctx->sh_desc_digest_dma;
+ int sec4_sg_bytes, sec4_sg_src_index, src_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int sh_len;
+ int ret = 0;
+
+ src_nents = __sg_count(req->src, req->nbytes, &chained);
+ sec4_sg_src_index = 2;
+ sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
+ state->buf_dma, buflen,
+ last_buflen);
+
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
+ chained);
+
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
+ req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+/* submit first update job descriptor after init */
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = state->buf_0 + state->current_buf *
+ CAAM_MAX_HASH_BLOCK_SIZE;
+ int *next_buflen = &state->buflen_0 + state->current_buf;
+ int to_hash;
+ u32 *sh_desc = ctx->sh_desc_update_first, *desc;
+ dma_addr_t ptr = ctx->sh_desc_update_first_dma;
+ int sec4_sg_bytes, src_nents;
+ dma_addr_t src_dma;
+ u32 options;
+ struct ahash_edesc *edesc;
+ bool chained = false;
+ int ret = 0;
+ int sh_len;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ src_nents = sg_count(req->src, req->nbytes - (*next_buflen),
+ &chained);
+ dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, chained);
+ sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+ edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+ "could not allocate extended descriptor\n");
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes,
+ DMA_TO_DEVICE);
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg, 0);
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+ src_dma = sg_dma_address(req->src);
+ options = 0;
+ }
+
+ if (*next_buflen)
+ sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
+
+ sh_len = desc_len(sh_desc);
+ desc = edesc->hw_desc;
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ append_seq_in_ptr(desc, src_dma, to_hash, options);
+
+ map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
+ req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else {
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
+ DMA_TO_DEVICE);
+ kfree(edesc);
+ }
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ sg_copy(next_buf, req->src, req->nbytes);
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+#endif
+
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+
+ state->current_buf = 0;
+
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(struct caam_hash_ctx));
+ memcpy(out + sizeof(struct caam_hash_ctx), state,
+ sizeof(struct caam_hash_state));
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(struct caam_hash_ctx));
+ memcpy(state, in + sizeof(struct caam_hash_ctx),
+ sizeof(struct caam_hash_state));
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+ u32 alg_op;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ struct device *ctrldev;
+ int alg_type;
+ int alg_op;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ int tgt_jr = atomic_inc_return(&priv->tfm_count);
+ int ret = 0;
+
+ /*
+ * distribute tfms across job rings to ensure in-order
+ * crypto request processing per tfm
+ */
+ ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
+
+ /* copy descriptor header template value */
+ ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+ ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+
+ ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ ret = ahash_set_sh_desc(ahash);
+
+ return ret;
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sh_desc_update_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(ctx->sh_desc_update),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_update_first_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(ctx->sh_desc_update_first),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_fin_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
+ if (ctx->sh_desc_digest_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(ctx->sh_desc_digest),
+ DMA_TO_DEVICE);
+ if (ctx->sh_desc_finup_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+}
+
+static void __exit caam_algapi_hash_exit(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ struct caam_hash_alg *t_alg, *n;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node)
+ return;
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return;
+
+ ctrldev = &pdev->dev;
+ of_node_put(dev_node);
+ priv = dev_get_drvdata(ctrldev);
+
+ if (!priv->hash_list.next)
+ return;
+
+ list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
+ crypto_unregister_ahash(&t_alg->ahash_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+}
+
+static struct caam_hash_alg *
+caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+ bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+ if (!t_alg) {
+ dev_err(ctrldev, "failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
+ alg->cra_type = &crypto_ahash_type;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->alg_op = template->alg_op;
+ t_alg->ctrldev = ctrldev;
+
+ return t_alg;
+}
+
+static int __init caam_algapi_hash_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ int i = 0, err = 0;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node)
+ return -ENODEV;
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ INIT_LIST_HEAD(&priv->hash_list);
+
+ atomic_set(&priv->tfm_count, -1);
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ /* TODO: check if h/w supports alg */
+ struct caam_hash_alg *t_alg;
+
+ /* register hmac version */
+ t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(ctrldev, "%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &priv->hash_list);
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(ctrldev, "%s alg allocation failed\n",
+ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+ list_add_tail(&t_alg->entry, &priv->hash_list);
+ }
+
+ return err;
+}
+
+module_init(caam_algapi_hash_init);
+module_exit(caam_algapi_hash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
new file mode 100644
index 000000000000..e2bfe161dece
--- /dev/null
+++ b/drivers/crypto/caam/caamrng.c
@@ -0,0 +1,309 @@
+/*
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+ * relationship between job descriptors to shared descriptors:
+ *
+ * --------------- --------------
+ * | JobDesc #0 |-------------------->| ShareDesc |
+ * | *(buffer 0) | |------------->| (generate) |
+ * --------------- | | (move) |
+ * | | (store) |
+ * --------------- | --------------
+ * | JobDesc #1 |------|
+ * | *(buffer 1) |
+ * ---------------
+ *
+ * A job desc looks like this:
+ *
+ * ---------------------
+ * | Header |
+ * | ShareDesc Pointer |
+ * | SEQ_OUT_PTR |
+ * | (output buffer) |
+ * ---------------------
+ *
+ * The SharedDesc never changes, and each job descriptor points to one of two
+ * buffers for each device, from which the data will be copied into the
+ * requested destination
+ */
+
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+
+#include "compat.h"
+
+#include "regs.h"
+#include "intern.h"
+#include "desc_constr.h"
+#include "jr.h"
+#include "error.h"
+
+/*
+ * Maximum buffer size: maximum number of random, cache-aligned bytes that
+ * will be generated and moved to seq out ptr (extlen not allowed)
+ */
+#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
+ L1_CACHE_BYTES)
+
+/* length of descriptors */
+#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
+#define DESC_RNG_LEN (10 * CAAM_CMD_SZ)
+
+/* Buffer, its dma address and lock */
+struct buf_data {
+ u8 buf[RN_BUF_SIZE];
+ dma_addr_t addr;
+ struct completion filled;
+ u32 hw_desc[DESC_JOB_O_LEN];
+#define BUF_NOT_EMPTY 0
+#define BUF_EMPTY 1
+#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
+ atomic_t empty;
+};
+
+/* rng per-device context */
+struct caam_rng_ctx {
+ struct device *jrdev;
+ dma_addr_t sh_desc_dma;
+ u32 sh_desc[DESC_RNG_LEN];
+ unsigned int cur_buf_idx;
+ int current_buf;
+ struct buf_data bufs[2];
+};
+
+static struct caam_rng_ctx rng_ctx;
+
+static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
+{
+ if (bd->addr)
+ dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
+ DMA_FROM_DEVICE);
+}
+
+static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+{
+ struct device *jrdev = ctx->jrdev;
+
+ if (ctx->sh_desc_dma)
+ dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN,
+ DMA_TO_DEVICE);
+ rng_unmap_buf(jrdev, &ctx->bufs[0]);
+ rng_unmap_buf(jrdev, &ctx->bufs[1]);
+}
+
+static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
+{
+ struct buf_data *bd;
+
+ bd = (struct buf_data *)((char *)desc -
+ offsetof(struct buf_data, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ atomic_set(&bd->empty, BUF_NOT_EMPTY);
+ complete(&bd->filled);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
+#endif
+}
+
+static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
+{
+ struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = bd->hw_desc;
+ int err;
+
+ dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
+ init_completion(&bd->filled);
+ err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
+ if (err)
+ complete(&bd->filled); /* don't wait on failed job*/
+ else
+ atomic_inc(&bd->empty); /* note if pending */
+
+ return err;
+}
+
+static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct caam_rng_ctx *ctx = &rng_ctx;
+ struct buf_data *bd = &ctx->bufs[ctx->current_buf];
+ int next_buf_idx, copied_idx;
+ int err;
+
+ if (atomic_read(&bd->empty)) {
+ /* try to submit job if there wasn't one */
+ if (atomic_read(&bd->empty) == BUF_EMPTY) {
+ err = submit_job(ctx, 1);
+ /* if can't submit job, can't even wait */
+ if (err)
+ return 0;
+ }
+ /* no immediate data, so exit if not waiting */
+ if (!wait)
+ return 0;
+
+ /* waiting for pending job */
+ if (atomic_read(&bd->empty))
+ wait_for_completion(&bd->filled);
+ }
+
+ next_buf_idx = ctx->cur_buf_idx + max;
+ dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
+ __func__, ctx->current_buf, ctx->cur_buf_idx);
+
+ /* if enough data in current buffer */
+ if (next_buf_idx < RN_BUF_SIZE) {
+ memcpy(data, bd->buf + ctx->cur_buf_idx, max);
+ ctx->cur_buf_idx = next_buf_idx;
+ return max;
+ }
+
+ /* else, copy what's left... */
+ copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
+ memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
+ ctx->cur_buf_idx = 0;
+ atomic_set(&bd->empty, BUF_EMPTY);
+
+ /* ...refill... */
+ submit_job(ctx, 1);
+
+ /* and use next buffer */
+ ctx->current_buf = !ctx->current_buf;
+ dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
+
+ /* since there already is some data read, don't wait */
+ return copied_idx + caam_read(rng, data + copied_idx,
+ max - copied_idx, false);
+}
+
+static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+{
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = ctx->sh_desc;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
+ /* Generate random bytes */
+ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
+
+ /* Store bytes */
+ append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
+
+ ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+#endif
+}
+
+static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+{
+ struct device *jrdev = ctx->jrdev;
+ struct buf_data *bd = &ctx->bufs[buf_id];
+ u32 *desc = bd->hw_desc;
+ int sh_len = desc_len(ctx->sh_desc);
+
+ init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+ bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
+ append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+#endif
+}
+
+static void caam_cleanup(struct hwrng *rng)
+{
+ int i;
+ struct buf_data *bd;
+
+ for (i = 0; i < 2; i++) {
+ bd = &rng_ctx.bufs[i];
+ if (atomic_read(&bd->empty) == BUF_PENDING)
+ wait_for_completion(&bd->filled);
+ }
+
+ rng_unmap_ctx(&rng_ctx);
+}
+
+static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+{
+ struct buf_data *bd = &ctx->bufs[buf_id];
+
+ rng_create_job_desc(ctx, buf_id);
+ atomic_set(&bd->empty, BUF_EMPTY);
+ submit_job(ctx, buf_id == ctx->current_buf);
+ wait_for_completion(&bd->filled);
+}
+
+static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+{
+ ctx->jrdev = jrdev;
+ rng_create_sh_desc(ctx);
+ ctx->current_buf = 0;
+ ctx->cur_buf_idx = 0;
+ caam_init_buf(ctx, 0);
+ caam_init_buf(ctx, 1);
+}
+
+static struct hwrng caam_rng = {
+ .name = "rng-caam",
+ .cleanup = caam_cleanup,
+ .read = caam_read,
+};
+
+static void __exit caam_rng_exit(void)
+{
+ hwrng_unregister(&caam_rng);
+}
+
+static int __init caam_rng_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node)
+ return -ENODEV;
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev)
+ return -ENODEV;
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ caam_init_rng(&rng_ctx, priv->jrdev[0]);
+
+ dev_info(priv->jrdev[0], "registering rng-caam\n");
+ return hwrng_register(&caam_rng);
+}
+
+module_init(caam_rng_init);
+module_exit(caam_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
+MODULE_AUTHOR("Freescale Semiconductor - NMG");
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index a63bc65fae86..762aeff626ac 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
+#include <linux/hash.h>
#include <linux/hw_random.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
@@ -33,5 +34,6 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 77557ebcd337..414ba20c05a1 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -2,13 +2,16 @@
* CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "jr.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "ctrl.h"
static int caam_remove(struct platform_device *pdev)
{
@@ -43,10 +46,154 @@ static int caam_remove(struct platform_device *pdev)
return ret;
}
+/*
+ * Descriptor to instantiate RNG State Handle 0 in normal mode and
+ * load the JDKEK, TDKEK and TDSK registers
+ */
+static void build_instantiation_desc(u32 *desc)
+{
+ u32 *jump_cmd;
+
+ init_job_desc(desc, 0);
+
+ /* INIT RNG in non-test mode */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_AS_INIT);
+
+ /* wait for done */
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /*
+ * load 1 to clear written reg:
+ * resets the done interrrupt and returns the RNG to idle.
+ */
+ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+ /* generate secure keys (non-test) */
+ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+ OP_ALG_RNG4_SK);
+}
+
+struct instantiate_result {
+ struct completion completion;
+ int err;
+};
+
+static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct instantiate_result *instantiation = context;
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ instantiation->err = err;
+ complete(&instantiation->completion);
+}
+
+static int instantiate_rng(struct device *jrdev)
+{
+ struct instantiate_result instantiation;
+
+ dma_addr_t desc_dma;
+ u32 *desc;
+ int ret;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
+ return -ENOMEM;
+ }
+
+ build_instantiation_desc(desc);
+ desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
+ init_completion(&instantiation.completion);
+ ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
+ if (!ret) {
+ wait_for_completion_interruptible(&instantiation.completion);
+ ret = instantiation.err;
+ if (ret)
+ dev_err(jrdev, "unable to instantiate RNG\n");
+ }
+
+ dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
+
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * By default, the TRNG runs for 200 clocks per sample;
+ * 800 clocks per sample generates better entropy.
+ */
+static void kick_trng(struct platform_device *pdev)
+{
+ struct device *ctrldev = &pdev->dev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+ struct caam_full __iomem *topregs;
+ struct rng4tst __iomem *r4tst;
+ u32 val;
+
+ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+ r4tst = &topregs->ctrl.r4tst[0];
+
+ /* put RNG4 into program mode */
+ setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ /* 800 clocks per sample */
+ val = rd_reg32(&r4tst->rtsdctl);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) | (800 << RTSDCTL_ENT_DLY_SHIFT);
+ wr_reg32(&r4tst->rtsdctl, val);
+ /* min. freq. count */
+ wr_reg32(&r4tst->rtfrqmin, 400);
+ /* max. freq. count */
+ wr_reg32(&r4tst->rtfrqmax, 6400);
+ /* put RNG4 into run mode */
+ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+}
+
+/**
+ * caam_get_era() - Return the ERA of the SEC on SoC, based
+ * on the SEC_VID register.
+ * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
+ * @caam_id - the value of the SEC_VID register
+ **/
+int caam_get_era(u64 caam_id)
+{
+ struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
+ static const struct {
+ u16 ip_id;
+ u8 maj_rev;
+ u8 era;
+ } caam_eras[] = {
+ {0x0A10, 1, 1},
+ {0x0A10, 2, 2},
+ {0x0A12, 1, 3},
+ {0x0A14, 1, 3},
+ {0x0A14, 2, 4},
+ {0x0A16, 1, 4},
+ {0x0A11, 1, 4}
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
+ if (caam_eras[i].ip_id == sec_vid->ip_id &&
+ caam_eras[i].maj_rev == sec_vid->maj_rev)
+ return caam_eras[i].era;
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(caam_get_era);
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
- int ring, rspec;
+ int ret, ring, rspec;
+ u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
@@ -82,13 +229,18 @@ static int caam_probe(struct platform_device *pdev)
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
- * 36-bit pointers in master configuration register
+ * long pointers in master configuration register
*/
setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
if (sizeof(dma_addr_t) == sizeof(u64))
- dma_set_mask(dev, DMA_BIT_MASK(36));
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ dma_set_mask(dev, DMA_BIT_MASK(40));
+ else
+ dma_set_mask(dev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask(dev, DMA_BIT_MASK(32));
/*
* Detect and enable JobRs
@@ -141,14 +293,29 @@ static int caam_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ /*
+ * RNG4 based SECs (v5+) need special initialization prior
+ * to executing any descriptors
+ */
+ if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) {
+ kick_trng(pdev);
+ ret = instantiate_rng(ctrlpriv->jrdev[0]);
+ if (ret) {
+ caam_remove(pdev);
+ return ret;
+ }
+ }
+
/* NOTE: RTIC detection ought to go here, around Si time */
/* Initialize queue allocator lock */
spin_lock_init(&ctrlpriv->jr_alloc_lock);
+ caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
+
/* Report "alive" for developer to see */
- dev_info(dev, "device ID = 0x%016llx\n",
- rd_reg64(&topregs->ctrl.perfmon.caam_id));
+ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
+ caam_get_era(caam_id));
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
new file mode 100644
index 000000000000..980d44eaaf40
--- /dev/null
+++ b/drivers/crypto/caam/ctrl.h
@@ -0,0 +1,13 @@
+/*
+ * CAAM control-plane driver backend public-level include definitions
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ */
+
+#ifndef CTRL_H
+#define CTRL_H
+
+/* Prototypes for backend-level services exposed to APIs */
+int caam_get_era(u64 caam_id);
+
+#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index a17c2958dab1..f7f833be8c67 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -8,6 +8,16 @@
#ifndef DESC_H
#define DESC_H
+struct sec4_sg_entry {
+ u64 ptr;
+#define SEC4_SG_LEN_FIN 0x40000000
+#define SEC4_SG_LEN_EXT 0x80000000
+ u32 len;
+ u8 reserved;
+ u8 buf_pool_id;
+ u16 offset;
+};
+
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
#define MAX_CAAM_DESCSIZE 64
@@ -1162,6 +1172,11 @@
#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+/* RNG4 set */
+#define OP_ALG_RNG4_SHIFT 4
+#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
+
+#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
#define OP_ALG_AS_SHIFT 2
#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
@@ -1585,20 +1600,4 @@
#define NFIFOENTRY_PLEN_SHIFT 0
#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
-/*
- * PDB internal definitions
- */
-
-/* IPSec ESP CBC Encap/Decap Options */
-#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
-#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
-#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
-#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
-#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
-#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
-#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
-#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
-#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
-#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
-
#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 348b882275f0..c85c1f058401 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -1,7 +1,7 @@
/*
* caam descriptor construction helper functions
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include "desc.h"
@@ -51,7 +51,7 @@ static inline void *sh_desc_pdb(u32 *desc)
static inline void init_desc(u32 *desc, u32 options)
{
- *desc = options | HDR_ONE | 1;
+ *desc = (options | HDR_ONE) + 1;
}
static inline void init_sh_desc(u32 *desc, u32 options)
@@ -62,9 +62,9 @@ static inline void init_sh_desc(u32 *desc, u32 options)
static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
{
- u32 pdb_len = pdb_bytes / CAAM_CMD_SZ + 1;
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
- init_sh_desc(desc, ((pdb_len << HDR_START_IDX_SHIFT) + pdb_len) |
+ init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
options);
}
@@ -117,6 +117,15 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
append_ptr(desc, ptr);
}
+/* Write length after pointer, rather than inside command */
+static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+ unsigned int len, u32 command)
+{
+ append_cmd(desc, command);
+ append_ptr(desc, ptr);
+ append_cmd(desc, len);
+}
+
static inline void append_cmd_data(u32 *desc, void *data, int len,
u32 command)
{
@@ -166,13 +175,22 @@ static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
}
APPEND_CMD_PTR(key, KEY)
-APPEND_CMD_PTR(seq_in_ptr, SEQ_IN_PTR)
-APPEND_CMD_PTR(seq_out_ptr, SEQ_OUT_PTR)
APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(store, STORE)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+ unsigned int len, \
+ u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
+}
+APPEND_SEQ_PTR_INTLEN(in, IN)
+APPEND_SEQ_PTR_INTLEN(out, OUT)
+
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
unsigned int len, u32 options) \
@@ -183,6 +201,33 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
+#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
+static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+ unsigned int len, u32 options) \
+{ \
+ PRINT_POS; \
+ append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
+}
+APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
+APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
+
+/*
+ * Determine whether to store length internally or externally depending on
+ * the size of its type
+ */
+#define APPEND_CMD_PTR_LEN(cmd, op, type) \
+static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+ type len, u32 options) \
+{ \
+ PRINT_POS; \
+ if (sizeof(type) > sizeof(u16)) \
+ append_##cmd##_extlen(desc, ptr, len, options); \
+ else \
+ append_##cmd##_intlen(desc, ptr, len, options); \
+}
+APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
+APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
+
/*
* 2nd variant for commands whose specified immediate length differs
* from length of immediate data provided, e.g., split keys
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 7e2d54bffad6..9955ed9643e6 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -39,18 +39,20 @@ static void report_ccb_status(u32 status, char *outstr)
char *cha_id_list[] = {
"",
"AES",
- "DES, 3DES",
+ "DES",
"ARC4",
- "MD5, SHA-1, SH-224, SHA-256, SHA-384, SHA-512",
+ "MDHA",
"RNG",
"SNOW f8",
- "Kasumi f8, f9",
- "All Public Key Algorithms",
- "CRC",
+ "Kasumi f8/9",
+ "PKHA",
+ "CRCA",
"SNOW f9",
+ "ZUCE",
+ "ZUCA",
};
char *err_id_list[] = {
- "None. No error.",
+ "No error.",
"Mode error.",
"Data size error.",
"Key size error.",
@@ -67,6 +69,20 @@ static void report_ccb_status(u32 status, char *outstr)
"Invalid CHA combination was selected",
"Invalid CHA selected.",
};
+ char *rng_err_id_list[] = {
+ "",
+ "",
+ "",
+ "Instantiate",
+ "Not instantiated",
+ "Test instantiate",
+ "Prediction resistance",
+ "",
+ "Prediction resistance and test request",
+ "Uninstantiate",
+ "",
+ "Secure key generation",
+ };
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
JRSTA_CCBERR_CHAID_SHIFT;
u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
@@ -81,7 +97,13 @@ static void report_ccb_status(u32 status, char *outstr)
cha_id, sizeof("ff"));
}
- if (err_id < ARRAY_SIZE(err_id_list)) {
+ if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
+ err_id < ARRAY_SIZE(rng_err_id_list) &&
+ strlen(rng_err_id_list[err_id])) {
+ /* RNG-only error */
+ SPRINTFCAT(outstr, "%s", rng_err_id_list[err_id],
+ strlen(rng_err_id_list[err_id]));
+ } else if (err_id < ARRAY_SIZE(err_id_list)) {
SPRINTFCAT(outstr, "%s", err_id_list[err_id],
strlen(err_id_list[err_id]));
} else {
@@ -101,10 +123,10 @@ static void report_deco_status(u32 status, char *outstr)
u8 value;
char *error_text;
} desc_error_list[] = {
- { 0x00, "None. No error." },
+ { 0x00, "No error." },
{ 0x01, "SGT Length Error. The descriptor is trying to read "
"more data than is contained in the SGT table." },
- { 0x02, "Reserved." },
+ { 0x02, "SGT Null Entry Error." },
{ 0x03, "Job Ring Control Error. There is a bad value in the "
"Job Ring Control register." },
{ 0x04, "Invalid Descriptor Command. The Descriptor Command "
@@ -116,7 +138,7 @@ static void report_deco_status(u32 status, char *outstr)
{ 0x09, "Invalid OPERATION Command" },
{ 0x0A, "Invalid FIFO LOAD Command" },
{ 0x0B, "Invalid FIFO STORE Command" },
- { 0x0C, "Invalid MOVE Command" },
+ { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
"invalid because the target is not a Job Header "
"Command, or the jump is from a Trusted Descriptor to "
@@ -166,6 +188,8 @@ static void report_deco_status(u32 status, char *outstr)
"(input frame; block ciphers) and IPsec decap (output "
"frame, when doing the next header byte update) and "
"DCRC (output frame)." },
+ { 0x23, "Read Input Frame error" },
+ { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
{ 0x80, "DNR (do not run) error" },
{ 0x81, "undefined protocol command" },
{ 0x82, "invalid setting in PDB" },
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index a34be01b0b29..5cd4c1b268a1 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -43,7 +43,7 @@ struct caam_drv_private_jr {
struct device *parentdev; /* points back to controller dev */
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
- struct tasklet_struct irqtask[NR_CPUS];
+ struct tasklet_struct irqtask;
int irq; /* One per queue */
int assign; /* busy/free */
@@ -86,10 +86,10 @@ struct caam_drv_private {
/* which jr allocated to scatterlist crypto */
atomic_t tfm_count ____cacheline_aligned;
- int num_jrs_for_algapi;
- struct device **algapi_jr;
/* list of registered crypto algorithms (mk generic context handle?) */
struct list_head alg_list;
+ /* list of registered hash algorithms (mk generic context handle?) */
+ struct list_head hash_list;
/*
* debugfs entries for developer view into driver/device
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 340fa322c0f0..53c8c51d5881 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -2,7 +2,7 @@
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
*
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include "compat.h"
@@ -43,7 +43,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
preempt_disable();
- tasklet_schedule(&jrp->irqtask[smp_processor_id()]);
+ tasklet_schedule(&jrp->irqtask);
preempt_enable();
return IRQ_HANDLED;
@@ -58,17 +58,16 @@ static void caam_jr_dequeue(unsigned long devarg)
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
- unsigned long flags;
- spin_lock_irqsave(&jrp->outlock, flags);
+ while (rd_reg32(&jrp->rregs->outring_used)) {
- head = ACCESS_ONCE(jrp->head);
- sw_idx = tail = jrp->tail;
+ head = ACCESS_ONCE(jrp->head);
- while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
- rd_reg32(&jrp->rregs->outring_used)) {
+ spin_lock_bh(&jrp->outlock);
+ sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
+
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
@@ -95,7 +94,8 @@ static void caam_jr_dequeue(unsigned long devarg)
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
userstatus = jrp->outring[hw_idx].jrstatus;
- smp_mb();
+ /* set done */
+ wr_reg32(&jrp->rregs->outring_rmvd, 1);
jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
(JOBR_DEPTH - 1);
@@ -115,22 +115,12 @@ static void caam_jr_dequeue(unsigned long devarg)
jrp->tail = tail;
}
- /* set done */
- wr_reg32(&jrp->rregs->outring_rmvd, 1);
-
- spin_unlock_irqrestore(&jrp->outlock, flags);
+ spin_unlock_bh(&jrp->outlock);
/* Finally, execute user's callback */
usercall(dev, userdesc, userstatus, userarg);
-
- spin_lock_irqsave(&jrp->outlock, flags);
-
- head = ACCESS_ONCE(jrp->head);
- sw_idx = tail = jrp->tail;
}
- spin_unlock_irqrestore(&jrp->outlock, flags);
-
/* reenable / unmask IRQs */
clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
}
@@ -148,23 +138,22 @@ int caam_jr_register(struct device *ctrldev, struct device **rdev)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_drv_private_jr *jrpriv = NULL;
- unsigned long flags;
int ring;
/* Lock, if free ring - assign, unlock */
- spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
+ spin_lock(&ctrlpriv->jr_alloc_lock);
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
if (jrpriv->assign == JOBR_UNASSIGNED) {
jrpriv->assign = JOBR_ASSIGNED;
*rdev = ctrlpriv->jrdev[ring];
- spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+ spin_unlock(&ctrlpriv->jr_alloc_lock);
return ring;
}
}
/* If assigned, write dev where caller needs it */
- spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+ spin_unlock(&ctrlpriv->jr_alloc_lock);
*rdev = NULL;
return -ENODEV;
@@ -182,7 +171,6 @@ int caam_jr_deregister(struct device *rdev)
{
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
struct caam_drv_private *ctrlpriv;
- unsigned long flags;
/* Get the owning controller's private space */
ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
@@ -195,9 +183,9 @@ int caam_jr_deregister(struct device *rdev)
return -EBUSY;
/* Release ring */
- spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags);
+ spin_lock(&ctrlpriv->jr_alloc_lock);
jrpriv->assign = JOBR_UNASSIGNED;
- spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags);
+ spin_unlock(&ctrlpriv->jr_alloc_lock);
return 0;
}
@@ -238,7 +226,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct caam_jrentry_info *head_entry;
- unsigned long flags;
int head, tail, desc_size;
dma_addr_t desc_dma;
@@ -249,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
return -EIO;
}
- spin_lock_irqsave(&jrp->inplock, flags);
+ spin_lock(&jrp->inplock);
head = jrp->head;
tail = ACCESS_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
- spin_unlock_irqrestore(&jrp->inplock, flags);
+ spin_unlock(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
return -EBUSY;
}
@@ -276,11 +263,9 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
- wmb();
-
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
- spin_unlock_irqrestore(&jrp->inplock, flags);
+ spin_unlock(&jrp->inplock);
return 0;
}
@@ -337,11 +322,9 @@ static int caam_jr_init(struct device *dev)
jrp = dev_get_drvdata(dev);
- /* Connect job ring interrupt handler. */
- for_each_possible_cpu(i)
- tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
- (unsigned long)dev);
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+ /* Connect job ring interrupt handler. */
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
"caam-jobr", dev);
if (error) {
@@ -356,10 +339,11 @@ static int caam_jr_init(struct device *dev)
if (error)
return error;
- jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
- GFP_KERNEL | GFP_DMA);
- jrp->outring = kzalloc(sizeof(struct jr_outentry) *
- JOBR_DEPTH, GFP_KERNEL | GFP_DMA);
+ jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ &inpbusaddr, GFP_KERNEL);
+
+ jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+ JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
GFP_KERNEL);
@@ -375,31 +359,6 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- inpbusaddr = dma_map_single(dev, jrp->inpring,
- sizeof(u32 *) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, inpbusaddr)) {
- dev_err(dev, "caam_jr_init(): can't map input ring\n");
- kfree(jrp->inpring);
- kfree(jrp->outring);
- kfree(jrp->entinfo);
- return -EIO;
- }
-
- outbusaddr = dma_map_single(dev, jrp->outring,
- sizeof(struct jr_outentry) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, outbusaddr)) {
- dev_err(dev, "caam_jr_init(): can't map output ring\n");
- dma_unmap_single(dev, inpbusaddr,
- sizeof(u32 *) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
- kfree(jrp->inpring);
- kfree(jrp->outring);
- kfree(jrp->entinfo);
- return -EIO;
- }
-
jrp->inp_ring_write_index = 0;
jrp->out_ring_read_index = 0;
jrp->head = 0;
@@ -431,12 +390,11 @@ int caam_jr_shutdown(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
dma_addr_t inpbusaddr, outbusaddr;
- int ret, i;
+ int ret;
ret = caam_reset_hw_jr(dev);
- for_each_possible_cpu(i)
- tasklet_kill(&jrp->irqtask[i]);
+ tasklet_kill(&jrp->irqtask);
/* Release interrupt */
free_irq(jrp->irq, dev);
@@ -444,13 +402,10 @@ int caam_jr_shutdown(struct device *dev)
/* Free rings */
inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
outbusaddr = rd_reg64(&jrp->rregs->outring_base);
- dma_unmap_single(dev, outbusaddr,
- sizeof(struct jr_outentry) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
- dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH,
- DMA_BIDIRECTIONAL);
- kfree(jrp->outring);
- kfree(jrp->inpring);
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
kfree(jrp->entinfo);
return ret;
@@ -503,6 +458,14 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
dev_set_drvdata(jrdev, jrpriv);
ctrlpriv->jrdev[ring] = jrdev;
+ if (sizeof(dma_addr_t) == sizeof(u64))
+ if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
+ dma_set_mask(jrdev, DMA_BIT_MASK(40));
+ else
+ dma_set_mask(jrdev, DMA_BIT_MASK(36));
+ else
+ dma_set_mask(jrdev, DMA_BIT_MASK(32));
+
/* Identify the interrupt */
jrpriv->irq = of_irq_to_resource(np, 0, NULL);
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
new file mode 100644
index 000000000000..002888185f17
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.c
@@ -0,0 +1,122 @@
+/*
+ * CAAM/SEC 4.x functions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+#include "compat.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "key_gen.h"
+
+void split_key_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct split_key_result *res = context;
+
+#ifdef DEBUG
+ dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+ res->err = err;
+
+ complete(&res->completion);
+}
+EXPORT_SYMBOL(split_key_done);
+/*
+get a split ipad/opad key
+
+Split key generation-----------------------------------------------
+
+[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
+[01] 0x04000014 key: class2->keyreg len=20
+ @0xffe01000
+[03] 0x84410014 operation: cls2-op sha1 hmac init dec
+[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
+[05] 0xa4000001 jump: class2 local all ->1 [06]
+[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
+ @0xffe04000
+*/
+u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ int split_key_pad_len, const u8 *key_in, u32 keylen,
+ u32 alg_op)
+{
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t dma_addr_in, dma_addr_out;
+ int ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+
+ init_job_desc(desc, 0);
+
+ dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_in)) {
+ dev_err(jrdev, "unable to map key input memory\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /* Sets MDHA up into an HMAC-INIT */
+ append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+
+ /*
+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
+ * into both pads inside MDHA
+ */
+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
+
+ /*
+ * FIFO_STORE with the explicit split-key content store
+ * (0x26 output type)
+ */
+ dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+ append_fifo_store(desc, dma_addr_out, split_key_len,
+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+
+ result.err = 0;
+ init_completion(&result.completion);
+
+ ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ if (!ret) {
+ /* in progress */
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ split_key_pad_len, 1);
+#endif
+ }
+
+ dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+
+ kfree(desc);
+
+ return ret;
+}
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
new file mode 100644
index 000000000000..d95d290c6e8b
--- /dev/null
+++ b/drivers/crypto/caam/key_gen.h
@@ -0,0 +1,17 @@
+/*
+ * CAAM/SEC 4.x definitions for handling key-generation jobs
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+struct split_key_result {
+ struct completion completion;
+ int err;
+};
+
+void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
+
+u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ int split_key_pad_len, const u8 *key_in, u32 keylen,
+ u32 alg_op);
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
new file mode 100644
index 000000000000..62950d22ac13
--- /dev/null
+++ b/drivers/crypto/caam/pdb.h
@@ -0,0 +1,401 @@
+/*
+ * CAAM Protocol Data Block (PDB) definition header file
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ */
+
+#ifndef CAAM_PDB_H
+#define CAAM_PDB_H
+
+/*
+ * PDB- IPSec ESP Header Modification Options
+ */
+#define PDBHMO_ESP_DECAP_SHIFT 12
+#define PDBHMO_ESP_ENCAP_SHIFT 4
+/*
+ * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
+ * Options Byte IP version (IPvsn) field:
+ * if IPv4, decrement the inner IP header TTL field (byte 8);
+ * if IPv6 decrement the inner IP header Hop Limit field (byte 7).
+*/
+#define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
+#define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+/*
+ * Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte
+ * from the outer IP header to the inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
+/*
+ * Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from
+ * the PDB, copy the DF bit from the inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/*
+ * PDB - IPSec ESP Encap/Decap Options
+ */
+#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */
+#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */
+#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */
+#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */
+#define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
+#define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */
+#define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */
+#define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */
+#define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */
+#define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */
+
+/*
+ * General IPSec encap/decap PDB definitions
+ */
+struct ipsec_encap_cbc {
+ u32 iv[4];
+};
+
+struct ipsec_encap_ctr {
+ u32 ctr_nonce;
+ u32 ctr_initial;
+ u32 iv[2];
+};
+
+struct ipsec_encap_ccm {
+ u32 salt; /* lower 24 bits */
+ u8 b0_flags;
+ u8 ctr_flags;
+ u16 ctr_initial;
+ u32 iv[2];
+};
+
+struct ipsec_encap_gcm {
+ u32 salt; /* lower 24 bits */
+ u32 rsvd1;
+ u32 iv[2];
+};
+
+struct ipsec_encap_pdb {
+ u8 hmo_rsvd;
+ u8 ip_nh;
+ u8 ip_nh_offset;
+ u8 options;
+ u32 seq_num_ext_hi;
+ u32 seq_num;
+ union {
+ struct ipsec_encap_cbc cbc;
+ struct ipsec_encap_ctr ctr;
+ struct ipsec_encap_ccm ccm;
+ struct ipsec_encap_gcm gcm;
+ };
+ u32 spi;
+ u16 rsvd1;
+ u16 ip_hdr_len;
+ u32 ip_hdr[0]; /* optional IP Header content */
+};
+
+struct ipsec_decap_cbc {
+ u32 rsvd[2];
+};
+
+struct ipsec_decap_ctr {
+ u32 salt;
+ u32 ctr_initial;
+};
+
+struct ipsec_decap_ccm {
+ u32 salt;
+ u8 iv_flags;
+ u8 ctr_flags;
+ u16 ctr_initial;
+};
+
+struct ipsec_decap_gcm {
+ u32 salt;
+ u32 resvd;
+};
+
+struct ipsec_decap_pdb {
+ u16 hmo_ip_hdr_len;
+ u8 ip_nh_offset;
+ u8 options;
+ union {
+ struct ipsec_decap_cbc cbc;
+ struct ipsec_decap_ctr ctr;
+ struct ipsec_decap_ccm ccm;
+ struct ipsec_decap_gcm gcm;
+ };
+ u32 seq_num_ext_hi;
+ u32 seq_num;
+ u32 anti_replay[2];
+ u32 end_index[0];
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+struct ipsec_deco_dpovrd {
+#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80
+ u8 ovrd_ecn;
+ u8 ip_hdr_len;
+ u8 nh_offset;
+ u8 next_header; /* reserved if decap */
+};
+
+/*
+ * IEEE 802.11i WiFi Protocol Data Block
+ */
+#define WIFI_PDBOPTS_FCS 0x01
+#define WIFI_PDBOPTS_AR 0x40
+
+struct wifi_encap_pdb {
+ u16 mac_hdr_len;
+ u8 rsvd;
+ u8 options;
+ u8 iv_flags;
+ u8 pri;
+ u16 pn1;
+ u32 pn2;
+ u16 frm_ctrl_mask;
+ u16 seq_ctrl_mask;
+ u8 rsvd1[2];
+ u8 cnst;
+ u8 key_id;
+ u8 ctr_flags;
+ u8 rsvd2;
+ u16 ctr_init;
+};
+
+struct wifi_decap_pdb {
+ u16 mac_hdr_len;
+ u8 rsvd;
+ u8 options;
+ u8 iv_flags;
+ u8 pri;
+ u16 pn1;
+ u32 pn2;
+ u16 frm_ctrl_mask;
+ u16 seq_ctrl_mask;
+ u8 rsvd1[4];
+ u8 ctr_flags;
+ u8 rsvd2;
+ u16 ctr_init;
+};
+
+/*
+ * IEEE 802.16 WiMAX Protocol Data Block
+ */
+#define WIMAX_PDBOPTS_FCS 0x01
+#define WIMAX_PDBOPTS_AR 0x40 /* decap only */
+
+struct wimax_encap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u32 nonce;
+ u8 b0_flags;
+ u8 ctr_flags;
+ u16 ctr_init;
+ /* begin DECO writeback region */
+ u32 pn;
+ /* end DECO writeback region */
+};
+
+struct wimax_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u32 nonce;
+ u8 iv_flags;
+ u8 ctr_flags;
+ u16 ctr_init;
+ /* begin DECO writeback region */
+ u32 pn;
+ u8 rsvd1[2];
+ u16 antireplay_len;
+ u64 antireplay_scorecard;
+ /* end DECO writeback region */
+};
+
+/*
+ * IEEE 801.AE MacSEC Protocol Data Block
+ */
+#define MACSEC_PDBOPTS_FCS 0x01
+#define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */
+
+struct macsec_encap_pdb {
+ u16 aad_len;
+ u8 rsvd;
+ u8 options;
+ u64 sci;
+ u16 ethertype;
+ u8 tci_an;
+ u8 rsvd1;
+ /* begin DECO writeback region */
+ u32 pn;
+ /* end DECO writeback region */
+};
+
+struct macsec_decap_pdb {
+ u16 aad_len;
+ u8 rsvd;
+ u8 options;
+ u64 sci;
+ u8 rsvd1[3];
+ /* begin DECO writeback region */
+ u8 antireplay_len;
+ u32 pn;
+ u64 antireplay_scorecard;
+ /* end DECO writeback region */
+};
+
+/*
+ * SSL/TLS/DTLS Protocol Data Blocks
+ */
+
+#define TLS_PDBOPTS_ARS32 0x40
+#define TLS_PDBOPTS_ARS64 0xc0
+#define TLS_PDBOPTS_OUTFMT 0x08
+#define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */
+#define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */
+
+struct tls_block_encap_pdb {
+ u8 type;
+ u8 version[2];
+ u8 options;
+ u64 seq_num;
+ u32 iv[4];
+};
+
+struct tls_stream_encap_pdb {
+ u8 type;
+ u8 version[2];
+ u8 options;
+ u64 seq_num;
+ u8 i;
+ u8 j;
+ u8 rsvd1[2];
+};
+
+struct dtls_block_encap_pdb {
+ u8 type;
+ u8 version[2];
+ u8 options;
+ u16 epoch;
+ u16 seq_num[3];
+ u32 iv[4];
+};
+
+struct tls_block_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u64 seq_num;
+ u32 iv[4];
+};
+
+struct tls_stream_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u64 seq_num;
+ u8 i;
+ u8 j;
+ u8 rsvd1[2];
+};
+
+struct dtls_block_decap_pdb {
+ u8 rsvd[3];
+ u8 options;
+ u16 epoch;
+ u16 seq_num[3];
+ u32 iv[4];
+ u64 antireplay_scorecard;
+};
+
+/*
+ * SRTP Protocol Data Blocks
+ */
+#define SRTP_PDBOPTS_MKI 0x08
+#define SRTP_PDBOPTS_AR 0x40
+
+struct srtp_encap_pdb {
+ u8 x_len;
+ u8 mki_len;
+ u8 n_tag;
+ u8 options;
+ u32 cnst0;
+ u8 rsvd[2];
+ u16 cnst1;
+ u16 salt[7];
+ u16 cnst2;
+ u32 rsvd1;
+ u32 roc;
+ u32 opt_mki;
+};
+
+struct srtp_decap_pdb {
+ u8 x_len;
+ u8 mki_len;
+ u8 n_tag;
+ u8 options;
+ u32 cnst0;
+ u8 rsvd[2];
+ u16 cnst1;
+ u16 salt[7];
+ u16 cnst2;
+ u16 rsvd1;
+ u16 seq_num;
+ u32 roc;
+ u64 antireplay_scorecard;
+};
+
+/*
+ * DSA/ECDSA Protocol Data Blocks
+ * Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar
+ * except for the treatment of "w" for verify, "s" for sign,
+ * and the placement of "a,b".
+ */
+#define DSA_PDB_SGF_SHIFT 24
+#define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT)
+#define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT)
+
+#define DSA_PDB_L_SHIFT 7
+#define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT)
+
+#define DSA_PDB_N_MASK 0x7f
+
+struct dsa_sign_pdb {
+ u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
+ u8 *q;
+ u8 *r;
+ u8 *g; /* or Gx,y */
+ u8 *s;
+ u8 *f;
+ u8 *c;
+ u8 *d;
+ u8 *ab; /* ECC only */
+ u8 *u;
+};
+
+struct dsa_verify_pdb {
+ u32 sgf_ln;
+ u8 *q;
+ u8 *r;
+ u8 *g; /* or Gx,y */
+ u8 *w; /* or Wx,y */
+ u8 *f;
+ u8 *c;
+ u8 *d;
+ u8 *tmp; /* temporary data block */
+ u8 *ab; /* only used if ECC processing */
+};
+
+#endif
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index e9f7a70cdd5e..3223fc6d647c 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -117,6 +117,12 @@ struct jr_outentry {
#define CHA_NUM_DECONUM_SHIFT 56
#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
+struct sec_vid {
+ u16 ip_id;
+ u8 maj_rev;
+ u8 min_rev;
+};
+
struct caam_perfmon {
/* Performance Monitor Registers f00-f9f */
u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
@@ -167,7 +173,7 @@ struct partid {
u32 pidr; /* partition ID, DECO */
};
-/* RNG test mode (replicated twice in some configurations) */
+/* RNGB test mode (replicated twice in some configurations) */
/* Padded out to 0x100 */
struct rngtst {
u32 mode; /* RTSTMODEx - Test mode */
@@ -200,6 +206,31 @@ struct rngtst {
u32 rsvd14[15];
};
+/* RNG4 TRNG test registers */
+struct rng4tst {
+#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+ u32 rtmctl; /* misc. control register */
+ u32 rtscmisc; /* statistical check misc. register */
+ u32 rtpkrrng; /* poker range register */
+ union {
+ u32 rtpkrmax; /* PRGM=1: poker max. limit register */
+ u32 rtpkrsq; /* PRGM=0: poker square calc. result register */
+ };
+#define RTSDCTL_ENT_DLY_SHIFT 16
+#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+ u32 rtsdctl; /* seed control register */
+ union {
+ u32 rtsblim; /* PRGM=1: sparse bit limit register */
+ u32 rttotsam; /* PRGM=0: total samples register */
+ };
+ u32 rtfrqmin; /* frequency count min. limit register */
+ union {
+ u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
+ u32 rtfrqcnt; /* PRGM=0: freq. count register */
+ };
+ u32 rsvd1[56];
+};
+
/*
* caam_ctrl - basic core configuration
* starts base + 0x0000 padded out to 0x1000
@@ -249,7 +280,10 @@ struct caam_ctrl {
/* RNG Test/Verification/Debug Access 600-7ff */
/* (Useful in Test/Debug modes only...) */
- struct rngtst rtst[2];
+ union {
+ struct rngtst rtst[2];
+ struct rng4tst r4tst[2];
+ };
u32 rsvd9[448];
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
new file mode 100644
index 000000000000..e0037c8ee243
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -0,0 +1,156 @@
+/*
+ * CAAM/SEC 4.x functions for using scatterlists in caam driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ */
+
+struct sec4_sg_entry;
+
+/*
+ * convert single dma address to h/w link table format
+ */
+static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
+ dma_addr_t dma, u32 len, u32 offset)
+{
+ sec4_sg_ptr->ptr = dma;
+ sec4_sg_ptr->len = len;
+ sec4_sg_ptr->reserved = 0;
+ sec4_sg_ptr->buf_pool_id = 0;
+ sec4_sg_ptr->offset = offset;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
+ sizeof(struct sec4_sg_entry), 1);
+#endif
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct sec4_sg_entry *
+sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
+ struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
+{
+ while (sg_count) {
+ dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ sec4_sg_ptr++;
+ sg = scatterwalk_sg_next(sg);
+ sg_count--;
+ }
+ return sec4_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
+ struct sec4_sg_entry *sec4_sg_ptr,
+ u32 offset)
+{
+ sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
+ sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
+}
+
+/* count number of elements in scatterlist */
+static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
+ bool *chained)
+{
+ struct scatterlist *sg = sg_list;
+ int sg_nents = 0;
+
+ while (nbytes > 0) {
+ sg_nents++;
+ nbytes -= sg->length;
+ if (!sg_is_last(sg) && (sg + 1)->length == 0)
+ *chained = true;
+ sg = scatterwalk_sg_next(sg);
+ }
+
+ return sg_nents;
+}
+
+/* derive number of elements in scatterlist, but return 0 for 1 */
+static inline int sg_count(struct scatterlist *sg_list, int nbytes,
+ bool *chained)
+{
+ int sg_nents = __sg_count(sg_list, nbytes, chained);
+
+ if (likely(sg_nents == 1))
+ return 0;
+
+ return sg_nents;
+}
+
+static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ bool chained)
+{
+ if (unlikely(chained)) {
+ int i;
+ for (i = 0; i < nents; i++) {
+ dma_map_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+ } else {
+ dma_map_sg(dev, sg, nents, dir);
+ }
+ return nents;
+}
+
+static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
+ unsigned int nents, enum dma_data_direction dir,
+ bool chained)
+{
+ if (unlikely(chained)) {
+ int i;
+ for (i = 0; i < nents; i++) {
+ dma_unmap_sg(dev, sg, 1, dir);
+ sg = scatterwalk_sg_next(sg);
+ }
+ } else {
+ dma_unmap_sg(dev, sg, nents, dir);
+ }
+ return nents;
+}
+
+/* Copy from len bytes of sg to dest, starting from beginning */
+static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
+{
+ struct scatterlist *current_sg = sg;
+ int cpy_index = 0, next_cpy_index = current_sg->length;
+
+ while (next_cpy_index < len) {
+ memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
+ current_sg->length);
+ current_sg = scatterwalk_sg_next(current_sg);
+ cpy_index = next_cpy_index;
+ next_cpy_index += current_sg->length;
+ }
+ if (cpy_index < len)
+ memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
+ len - cpy_index);
+}
+
+/* Copy sg data, from to_skip to end, to dest */
+static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
+ int to_skip, unsigned int end)
+{
+ struct scatterlist *current_sg = sg;
+ int sg_index, cpy_index;
+
+ sg_index = current_sg->length;
+ while (sg_index <= to_skip) {
+ current_sg = scatterwalk_sg_next(current_sg);
+ sg_index += current_sg->length;
+ }
+ cpy_index = sg_index - to_skip;
+ memcpy(dest, (u8 *) sg_virt(current_sg) +
+ current_sg->length - cpy_index, cpy_index);
+ current_sg = scatterwalk_sg_next(current_sg);
+ if (end - sg_index)
+ sg_copy(dest + cpy_index, current_sg, end - sg_index);
+}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 1cc6b3f3e262..0d4071754352 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -24,6 +24,7 @@
#define MV_CESA "MV-CESA:"
#define MAX_HW_HASH_SIZE 0xFFFF
+#define MV_CESA_EXPIRE 500 /* msec */
/*
* STM:
@@ -87,6 +88,7 @@ struct crypto_priv {
spinlock_t lock;
struct crypto_queue queue;
enum engine_status eng_st;
+ struct timer_list completion_timer;
struct crypto_async_request *cur_req;
struct req_progress p;
int max_req_size;
@@ -138,6 +140,29 @@ struct mv_req_hash_ctx {
int count_add;
};
+static void mv_completion_timer_callback(unsigned long unused)
+{
+ int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
+
+ printk(KERN_ERR MV_CESA
+ "completion timer expired (CESA %sactive), cleaning up.\n",
+ active ? "" : "in");
+
+ del_timer(&cpg->completion_timer);
+ writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
+ while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
+ printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
+ cpg->eng_st = ENGINE_W_DEQUEUE;
+ wake_up_process(cpg->queue_th);
+}
+
+static void mv_setup_timer(void)
+{
+ setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
+ mod_timer(&cpg->completion_timer,
+ jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
+}
+
static void compute_aes_dec_key(struct mv_ctx *ctx)
{
struct crypto_aes_ctx gen_aes_key;
@@ -273,12 +298,8 @@ static void mv_process_current_q(int first_block)
sizeof(struct sec_accel_config));
/* GO */
+ mv_setup_timer();
writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-
- /*
- * XXX: add timer if the interrupt does not occur for some mystery
- * reason
- */
}
static void mv_crypto_algo_completion(void)
@@ -357,12 +378,8 @@ static void mv_process_hash_current(int first_block)
memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
/* GO */
+ mv_setup_timer();
writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-
- /*
- * XXX: add timer if the interrupt does not occur for some mystery
- * reason
- */
}
static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
@@ -406,6 +423,15 @@ out:
return rc;
}
+static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
+{
+ ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
+ ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
+ ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
+ ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
+ ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+}
+
static void mv_hash_algo_completion(void)
{
struct ahash_request *req = ahash_request_cast(cpg->cur_req);
@@ -420,14 +446,12 @@ static void mv_hash_algo_completion(void)
memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
crypto_ahash_digestsize(crypto_ahash_reqtfm
(req)));
- } else
+ } else {
+ mv_save_digest_state(ctx);
mv_hash_final_fallback(req);
+ }
} else {
- ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
- ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
- ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
- ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
- ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
+ mv_save_digest_state(ctx);
}
}
@@ -888,6 +912,10 @@ irqreturn_t crypto_int(int irq, void *priv)
if (!(val & SEC_INT_ACCEL0_DONE))
return IRQ_NONE;
+ if (!del_timer(&cpg->completion_timer)) {
+ printk(KERN_WARNING MV_CESA
+ "got an interrupt but no pending timer?\n");
+ }
val &= ~SEC_INT_ACCEL0_DONE;
writel(val, cpg->reg + FPGA_INT_STATUS);
writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
@@ -1061,6 +1089,7 @@ static int mv_probe(struct platform_device *pdev)
if (!IS_ERR(cp->clk))
clk_prepare_enable(cp->clk);
+ writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 921039e56f87..efff788d2f1d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -53,117 +53,6 @@
#include "talitos.h"
-#define TALITOS_TIMEOUT 100000
-#define TALITOS_MAX_DATA_LEN 65535
-
-#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
-#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
-#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
-
-/* descriptor pointer entry */
-struct talitos_ptr {
- __be16 len; /* length */
- u8 j_extent; /* jump to sg link table and/or extent */
- u8 eptr; /* extended address */
- __be32 ptr; /* address */
-};
-
-static const struct talitos_ptr zero_entry = {
- .len = 0,
- .j_extent = 0,
- .eptr = 0,
- .ptr = 0
-};
-
-/* descriptor */
-struct talitos_desc {
- __be32 hdr; /* header high bits */
- __be32 hdr_lo; /* header low bits */
- struct talitos_ptr ptr[7]; /* ptr/len pair array */
-};
-
-/**
- * talitos_request - descriptor submission request
- * @desc: descriptor pointer (kernel virtual)
- * @dma_desc: descriptor's physical bus address
- * @callback: whom to call when descriptor processing is done
- * @context: caller context (optional)
- */
-struct talitos_request {
- struct talitos_desc *desc;
- dma_addr_t dma_desc;
- void (*callback) (struct device *dev, struct talitos_desc *desc,
- void *context, int error);
- void *context;
-};
-
-/* per-channel fifo management */
-struct talitos_channel {
- void __iomem *reg;
-
- /* request fifo */
- struct talitos_request *fifo;
-
- /* number of requests pending in channel h/w fifo */
- atomic_t submit_count ____cacheline_aligned;
-
- /* request submission (head) lock */
- spinlock_t head_lock ____cacheline_aligned;
- /* index to next free descriptor request */
- int head;
-
- /* request release (tail) lock */
- spinlock_t tail_lock ____cacheline_aligned;
- /* index to next in-progress/done descriptor request */
- int tail;
-};
-
-struct talitos_private {
- struct device *dev;
- struct platform_device *ofdev;
- void __iomem *reg;
- int irq[2];
-
- /* SEC global registers lock */
- spinlock_t reg_lock ____cacheline_aligned;
-
- /* SEC version geometry (from device tree node) */
- unsigned int num_channels;
- unsigned int chfifo_len;
- unsigned int exec_units;
- unsigned int desc_types;
-
- /* SEC Compatibility info */
- unsigned long features;
-
- /*
- * length of the request fifo
- * fifo_len is chfifo_len rounded up to next power of 2
- * so we can use bitwise ops to wrap
- */
- unsigned int fifo_len;
-
- struct talitos_channel *chan;
-
- /* next channel to be assigned next incoming descriptor */
- atomic_t last_chan ____cacheline_aligned;
-
- /* request callback tasklet */
- struct tasklet_struct done_task[2];
-
- /* list of registered algorithms */
- struct list_head alg_list;
-
- /* hwrng device */
- struct hwrng rng;
-};
-
-/* .features flag */
-#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
-#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
-#define TALITOS_FTR_SHA224_HWINIT 0x00000004
-#define TALITOS_FTR_HMAC_OK 0x00000008
-
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
@@ -303,11 +192,11 @@ static int init_device(struct device *dev)
* callback must check err and feedback in descriptor header
* for device processing status.
*/
-static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
- void (*callback)(struct device *dev,
- struct talitos_desc *desc,
- void *context, int error),
- void *context)
+int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+ void (*callback)(struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error),
+ void *context)
{
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
@@ -348,6 +237,7 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
return -EINPROGRESS;
}
+EXPORT_SYMBOL(talitos_submit);
/*
* process what was done, notify callback of error if not
@@ -733,7 +623,7 @@ static void talitos_unregister_rng(struct device *dev)
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
-#define TALITOS_MAX_KEY_SIZE 64
+#define TALITOS_MAX_KEY_SIZE 96
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define MD5_BLOCK_SIZE 64
@@ -2066,6 +1956,59 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
+ .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+ },
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.crypto = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
@@ -2121,6 +2064,112 @@ static struct talitos_alg_template driver_algs[] = {
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.crypto = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUB |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
+ .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUB |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUB |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
+ .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_aead_type,
+ .cra_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUB |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .alg.crypto = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 3c173954ef29..61a14054aa39 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -28,6 +28,123 @@
*
*/
+#define TALITOS_TIMEOUT 100000
+#define TALITOS_MAX_DATA_LEN 65535
+
+#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
+#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
+#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
+
+/* descriptor pointer entry */
+struct talitos_ptr {
+ __be16 len; /* length */
+ u8 j_extent; /* jump to sg link table and/or extent */
+ u8 eptr; /* extended address */
+ __be32 ptr; /* address */
+};
+
+static const struct talitos_ptr zero_entry = {
+ .len = 0,
+ .j_extent = 0,
+ .eptr = 0,
+ .ptr = 0
+};
+
+/* descriptor */
+struct talitos_desc {
+ __be32 hdr; /* header high bits */
+ __be32 hdr_lo; /* header low bits */
+ struct talitos_ptr ptr[7]; /* ptr/len pair array */
+};
+
+/**
+ * talitos_request - descriptor submission request
+ * @desc: descriptor pointer (kernel virtual)
+ * @dma_desc: descriptor's physical bus address
+ * @callback: whom to call when descriptor processing is done
+ * @context: caller context (optional)
+ */
+struct talitos_request {
+ struct talitos_desc *desc;
+ dma_addr_t dma_desc;
+ void (*callback) (struct device *dev, struct talitos_desc *desc,
+ void *context, int error);
+ void *context;
+};
+
+/* per-channel fifo management */
+struct talitos_channel {
+ void __iomem *reg;
+
+ /* request fifo */
+ struct talitos_request *fifo;
+
+ /* number of requests pending in channel h/w fifo */
+ atomic_t submit_count ____cacheline_aligned;
+
+ /* request submission (head) lock */
+ spinlock_t head_lock ____cacheline_aligned;
+ /* index to next free descriptor request */
+ int head;
+
+ /* request release (tail) lock */
+ spinlock_t tail_lock ____cacheline_aligned;
+ /* index to next in-progress/done descriptor request */
+ int tail;
+};
+
+struct talitos_private {
+ struct device *dev;
+ struct platform_device *ofdev;
+ void __iomem *reg;
+ int irq[2];
+
+ /* SEC global registers lock */
+ spinlock_t reg_lock ____cacheline_aligned;
+
+ /* SEC version geometry (from device tree node) */
+ unsigned int num_channels;
+ unsigned int chfifo_len;
+ unsigned int exec_units;
+ unsigned int desc_types;
+
+ /* SEC Compatibility info */
+ unsigned long features;
+
+ /*
+ * length of the request fifo
+ * fifo_len is chfifo_len rounded up to next power of 2
+ * so we can use bitwise ops to wrap
+ */
+ unsigned int fifo_len;
+
+ struct talitos_channel *chan;
+
+ /* next channel to be assigned next incoming descriptor */
+ atomic_t last_chan ____cacheline_aligned;
+
+ /* request callback tasklet */
+ struct tasklet_struct done_task[2];
+
+ /* list of registered algorithms */
+ struct list_head alg_list;
+
+ /* hwrng device */
+ struct hwrng rng;
+};
+
+extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+ void (*callback)(struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error),
+ void *context);
+
+/* .features flag */
+#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
+#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
+#define TALITOS_FTR_SHA224_HWINIT 0x00000004
+#define TALITOS_FTR_HMAC_OK 0x00000008
+
/*
* TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register
*/
@@ -209,6 +326,12 @@
DESC_HDR_MODE1_MDEU_HMAC)
#define DESC_HDR_MODE1_MDEU_SHA1_HMAC (DESC_HDR_MODE1_MDEU_SHA1 | \
DESC_HDR_MODE1_MDEU_HMAC)
+#define DESC_HDR_MODE1_MDEU_SHA224_HMAC (DESC_HDR_MODE1_MDEU_SHA224 | \
+ DESC_HDR_MODE1_MDEU_HMAC)
+#define DESC_HDR_MODE1_MDEUB_SHA384_HMAC (DESC_HDR_MODE1_MDEUB_SHA384 | \
+ DESC_HDR_MODE1_MDEU_HMAC)
+#define DESC_HDR_MODE1_MDEUB_SHA512_HMAC (DESC_HDR_MODE1_MDEUB_SHA512 | \
+ DESC_HDR_MODE1_MDEU_HMAC)
/* direction of overall data flow (DIR) */
#define DESC_HDR_DIR_INBOUND cpu_to_be32(0x00000002)