summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/842.c70
-rw-r--r--crypto/Kconfig42
-rw-r--r--crypto/Makefile9
-rw-r--r--crypto/acompress.c274
-rw-r--r--crypto/aead.c5
-rw-r--r--crypto/aegis128-core.c9
-rw-r--r--crypto/ahash.c527
-rw-r--r--crypto/algapi.c5
-rw-r--r--crypto/api.c31
-rw-r--r--crypto/asymmetric_keys/public_key.c9
-rw-r--r--crypto/async_tx/async_xor.c26
-rw-r--r--crypto/bpf_crypto_skcipher.c1
-rw-r--r--crypto/chacha_generic.c4
-rw-r--r--crypto/compress.c32
-rw-r--r--crypto/compress.h2
-rw-r--r--crypto/crc32c_generic.c8
-rw-r--r--crypto/crc64_rocksoft_generic.c89
-rw-r--r--crypto/crct10dif_generic.c168
-rw-r--r--crypto/crypto_null.c70
-rw-r--r--crypto/crypto_user.c16
-rw-r--r--crypto/ctr.c10
-rw-r--r--crypto/deflate.c62
-rw-r--r--crypto/ecc.c2
-rw-r--r--crypto/ecdsa-p1363.c2
-rw-r--r--crypto/ecdsa-x962.c4
-rw-r--r--crypto/essiv.c3
-rw-r--r--crypto/hkdf.c573
-rw-r--r--crypto/internal.h20
-rw-r--r--crypto/krb5/Kconfig26
-rw-r--r--crypto/krb5/Makefile18
-rw-r--r--crypto/krb5/internal.h247
-rw-r--r--crypto/krb5/krb5_api.c452
-rw-r--r--crypto/krb5/krb5_kdf.c145
-rw-r--r--crypto/krb5/rfc3961_simplified.c792
-rw-r--r--crypto/krb5/rfc3962_aes.c115
-rw-r--r--crypto/krb5/rfc6803_camellia.c237
-rw-r--r--crypto/krb5/rfc8009_aes2.c362
-rw-r--r--crypto/krb5/selftest.c544
-rw-r--r--crypto/krb5/selftest_data.c291
-rw-r--r--crypto/krb5enc.c504
-rw-r--r--crypto/lrw.c2
-rw-r--r--crypto/lz4.c65
-rw-r--r--crypto/lz4hc.c70
-rw-r--r--crypto/lzo-rle.c74
-rw-r--r--crypto/lzo.c74
-rw-r--r--crypto/pcbc.c28
-rw-r--r--crypto/proc.c3
-rw-r--r--crypto/rsassa-pkcs1.c2
-rw-r--r--crypto/scatterwalk.c114
-rw-r--r--crypto/scompress.c285
-rw-r--r--crypto/skcipher.c151
-rw-r--r--crypto/tcrypt.c239
-rw-r--r--crypto/testmgr.c221
-rw-r--r--crypto/testmgr.h654
-rw-r--r--crypto/xctr.c2
-rw-r--r--crypto/xts.c2
-rw-r--r--crypto/zstd.c60
57 files changed, 6107 insertions, 1745 deletions
diff --git a/crypto/842.c b/crypto/842.c
index e59e54d76960..5fb37a925989 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -18,17 +18,16 @@
* drivers/crypto/nx/nx-842-crypto.c
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/sw842.h>
-#include <crypto/internal/scompress.h>
struct crypto842_ctx {
void *wmem; /* working memory for compress */
};
-static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+static void *crypto842_alloc_ctx(void)
{
void *ctx;
@@ -39,38 +38,11 @@ static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int crypto842_init(struct crypto_tfm *tfm)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->wmem = crypto842_alloc_ctx(NULL);
- if (IS_ERR(ctx->wmem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void crypto842_free_ctx(void *ctx)
{
kfree(ctx);
}
-static void crypto842_exit(struct crypto_tfm *tfm)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto842_free_ctx(NULL, ctx->wmem);
-}
-
-static int crypto842_compress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return sw842_compress(src, slen, dst, dlen, ctx->wmem);
-}
-
static int crypto842_scompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
@@ -78,13 +50,6 @@ static int crypto842_scompress(struct crypto_scomp *tfm,
return sw842_compress(src, slen, dst, dlen, ctx);
}
-static int crypto842_decompress(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return sw842_decompress(src, slen, dst, dlen);
-}
-
static int crypto842_sdecompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
@@ -92,20 +57,6 @@ static int crypto842_sdecompress(struct crypto_scomp *tfm,
return sw842_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "842",
- .cra_driver_name = "842-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct crypto842_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = crypto842_init,
- .cra_exit = crypto842_exit,
- .cra_u = { .compress = {
- .coa_compress = crypto842_compress,
- .coa_decompress = crypto842_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = crypto842_alloc_ctx,
.free_ctx = crypto842_free_ctx,
@@ -121,25 +72,12 @@ static struct scomp_alg scomp = {
static int __init crypto842_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
subsys_initcall(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
module_exit(crypto842_mod_exit);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 74ae5f52b784..dbf97c4e7c59 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -141,6 +141,12 @@ config CRYPTO_ACOMP
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
+config CRYPTO_HKDF
+ tristate
+ select CRYPTO_SHA256 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_SHA512 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_HASH2
+
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_MANAGER2
@@ -228,6 +234,18 @@ config CRYPTO_AUTHENC
This is required for IPSec ESP (XFRM_ESP).
+config CRYPTO_KRB5ENC
+ tristate "Kerberos 5 combined hash+cipher support"
+ select CRYPTO_AEAD
+ select CRYPTO_SKCIPHER
+ select CRYPTO_MANAGER
+ select CRYPTO_HASH
+ select CRYPTO_NULL
+ help
+ Combined hash and cipher support for Kerberos 5 RFC3961 simplified
+ profile. This is required for Kerberos 5-style encryption, used by
+ sunrpc/NFS and rxrpc/AFS.
+
config CRYPTO_TEST
tristate "Testing module"
depends on m || EXPERT
@@ -318,6 +336,7 @@ config CRYPTO_CURVE25519
tristate "Curve25519"
select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
+ select CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 elliptic curve (RFC7748)
@@ -616,6 +635,7 @@ config CRYPTO_ARC4
config CRYPTO_CHACHA20
tristate "ChaCha"
select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_LIB_CHACHA_INTERNAL
select CRYPTO_SKCIPHER
help
The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms
@@ -937,6 +957,7 @@ config CRYPTO_POLY1305
tristate "Poly1305"
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305_GENERIC
+ select CRYPTO_LIB_POLY1305_INTERNAL
help
Poly1305 authenticator algorithm (RFC7539)
@@ -1081,26 +1102,6 @@ config CRYPTO_CRC32
Used by RoCEv2 and f2fs.
-config CRYPTO_CRCT10DIF
- tristate "CRCT10DIF"
- select CRYPTO_HASH
- select CRC_T10DIF
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- CRC algorithm used by the SCSI Block Commands standard.
-
-config CRYPTO_CRC64_ROCKSOFT
- tristate "CRC64 based on Rocksoft Model algorithm"
- depends on CRC64
- select CRYPTO_HASH
- help
- CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm
-
- Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY)
-
- See https://zlib.net/crc_v3.txt
-
endmenu
menu "Compression"
@@ -1460,5 +1461,6 @@ endif
source "drivers/crypto/Kconfig"
source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"
+source "crypto/krb5/Kconfig"
endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index f67e853c4690..0e6ab5ffd3f7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o
+crypto-y := api.o cipher.o
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
@@ -34,6 +34,7 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
obj-$(CONFIG_CRYPTO_SIG2) += sig.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
+obj-$(CONFIG_CRYPTO_HKDF) += hkdf.o
dh_generic-y := dh.o
dh_generic-y += dh_helper.o
@@ -155,10 +156,8 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
CFLAGS_crc32c_generic.o += -DARCH=$(ARCH)
CFLAGS_crc32_generic.o += -DARCH=$(ARCH)
-obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_generic.o
-CFLAGS_crct10dif_generic.o += -DARCH=$(ARCH)
-obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
+obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
@@ -212,3 +211,5 @@ obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
# Key derivation function
#
obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o
+
+obj-$(CONFIG_CRYPTO_KRB5) += krb5/
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 6fdf0ff9f3c0..f7a3fbe5447e 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/page-flags.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -23,6 +24,8 @@ struct crypto_scomp;
static const struct crypto_type crypto_acomp_type;
+static void acomp_reqchain_done(void *data, int err);
+
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct acomp_alg, calg.base);
@@ -58,29 +61,56 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
- alg->exit(acomp);
+ if (alg->exit)
+ alg->exit(acomp);
+
+ if (acomp_is_async(acomp))
+ crypto_free_acomp(acomp->fb);
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
+ struct crypto_acomp *fb = NULL;
+ int err;
+
+ acomp->fb = acomp;
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
+ if (acomp_is_async(acomp)) {
+ fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ err = -EINVAL;
+ if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
+ goto out_free_fb;
+
+ acomp->fb = fb;
+ }
+
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
- acomp->dst_free = alg->dst_free;
acomp->reqsize = alg->reqsize;
- if (alg->exit)
- acomp->base.exit = crypto_acomp_exit_tfm;
+ acomp->base.exit = crypto_acomp_exit_tfm;
+
+ if (!alg->init)
+ return 0;
- if (alg->init)
- return alg->init(acomp);
+ err = alg->init(acomp);
+ if (err)
+ goto out_free_fb;
return 0;
+
+out_free_fb:
+ crypto_free_acomp(fb);
+ return err;
}
static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
@@ -123,35 +153,231 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
-struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
{
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct acomp_req *req;
+ struct acomp_req_chain *state = &req->chain;
+
+ state->compl = req->base.complete;
+ state->data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = state;
+ state->req0 = req;
+}
- req = __acomp_request_alloc(acomp);
- if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
- return crypto_acomp_scomp_alloc_ctx(req);
+static void acomp_restore_req(struct acomp_req *req)
+{
+ struct acomp_req_chain *state = req->base.data;
- return req;
+ req->base.complete = state->compl;
+ req->base.data = state->data;
}
-EXPORT_SYMBOL_GPL(acomp_request_alloc);
-void acomp_request_free(struct acomp_req *req)
+static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct acomp_req *req = state->cur;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+
+ req->base.err = err;
+ state = &req->chain;
+
+ if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
+ acomp_request_set_src_dma(req, state->src, slen);
+ else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO)
+ acomp_request_set_src_folio(req, state->sfolio, state->soff, slen);
+ if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
+ acomp_request_set_dst_dma(req, state->dst, dlen);
+ else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO)
+ acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen);
+}
- if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
- crypto_acomp_scomp_free_ctx(req);
+static void acomp_virt_to_sg(struct acomp_req *req)
+{
+ struct acomp_req_chain *state = &req->chain;
+
+ state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT |
+ CRYPTO_ACOMP_REQ_SRC_FOLIO |
+ CRYPTO_ACOMP_REQ_DST_FOLIO);
+
+ if (acomp_request_src_isvirt(req)) {
+ unsigned int slen = req->slen;
+ const u8 *svirt = req->svirt;
+
+ state->src = svirt;
+ sg_init_one(&state->ssg, svirt, slen);
+ acomp_request_set_src_sg(req, &state->ssg, slen);
+ } else if (acomp_request_src_isfolio(req)) {
+ struct folio *folio = req->sfolio;
+ unsigned int slen = req->slen;
+ size_t off = req->soff;
+
+ state->sfolio = folio;
+ state->soff = off;
+ sg_init_table(&state->ssg, 1);
+ sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE),
+ slen, off % PAGE_SIZE);
+ acomp_request_set_src_sg(req, &state->ssg, slen);
+ }
- if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
- acomp->dst_free(req->dst);
- req->dst = NULL;
+ if (acomp_request_dst_isvirt(req)) {
+ unsigned int dlen = req->dlen;
+ u8 *dvirt = req->dvirt;
+
+ state->dst = dvirt;
+ sg_init_one(&state->dsg, dvirt, dlen);
+ acomp_request_set_dst_sg(req, &state->dsg, dlen);
+ } else if (acomp_request_dst_isfolio(req)) {
+ struct folio *folio = req->dfolio;
+ unsigned int dlen = req->dlen;
+ size_t off = req->doff;
+
+ state->dfolio = folio;
+ state->doff = off;
+ sg_init_table(&state->dsg, 1);
+ sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE),
+ dlen, off % PAGE_SIZE);
+ acomp_request_set_src_sg(req, &state->dsg, dlen);
}
+}
+
+static int acomp_do_nondma(struct acomp_req_chain *state,
+ struct acomp_req *req)
+{
+ u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_SRC_NONDMA |
+ CRYPTO_ACOMP_REQ_DST_VIRT |
+ CRYPTO_ACOMP_REQ_DST_NONDMA;
+ ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req));
+ int err;
+
+ acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL);
+ fbreq->base.flags &= ~keep;
+ fbreq->base.flags |= req->base.flags & keep;
+ fbreq->src = req->src;
+ fbreq->dst = req->dst;
+ fbreq->slen = req->slen;
+ fbreq->dlen = req->dlen;
+
+ if (state->op == crypto_acomp_reqtfm(req)->compress)
+ err = crypto_acomp_compress(fbreq);
+ else
+ err = crypto_acomp_decompress(fbreq);
+
+ req->dlen = fbreq->dlen;
+ return err;
+}
+
+static int acomp_do_one_req(struct acomp_req_chain *state,
+ struct acomp_req *req)
+{
+ state->cur = req;
+
+ if (acomp_request_isnondma(req))
+ return acomp_do_nondma(state, req);
+
+ acomp_virt_to_sg(req);
+ return state->op(req);
+}
+
+static int acomp_reqchain_finish(struct acomp_req *req0, int err, u32 mask)
+{
+ struct acomp_req_chain *state = req0->base.data;
+ struct acomp_req *req = state->cur;
+ struct acomp_req *n;
+
+ acomp_reqchain_virt(state, err);
+
+ if (req != req0)
+ list_add_tail(&req->base.list, &req0->base.list);
- __acomp_request_free(req);
+ list_for_each_entry_safe(req, n, &state->head, base.list) {
+ list_del_init(&req->base.list);
+
+ req->base.flags &= mask;
+ req->base.complete = acomp_reqchain_done;
+ req->base.data = state;
+
+ err = acomp_do_one_req(state, req);
+
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head))
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (err == -EBUSY)
+ goto out;
+
+ acomp_reqchain_virt(state, err);
+ list_add_tail(&req->base.list, &req0->base.list);
+ }
+
+ acomp_restore_req(req0);
+
+out:
+ return err;
+}
+
+static void acomp_reqchain_done(void *data, int err)
+{
+ struct acomp_req_chain *state = data;
+ crypto_completion_t compl = state->compl;
+
+ data = state->data;
+
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head))
+ return;
+ goto notify;
+ }
+
+ err = acomp_reqchain_finish(state->req0, err,
+ CRYPTO_TFM_REQ_MAY_BACKLOG);
+ if (err == -EBUSY)
+ return;
+
+notify:
+ compl(data, err);
+}
+
+static int acomp_do_req_chain(struct acomp_req *req,
+ int (*op)(struct acomp_req *req))
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct acomp_req_chain *state;
+ int err;
+
+ if (crypto_acomp_req_chain(tfm) ||
+ (!acomp_request_chained(req) && acomp_request_issg(req)))
+ return op(req);
+
+ acomp_save_req(req, acomp_reqchain_done);
+ state = req->base.data;
+
+ state->op = op;
+ state->src = NULL;
+ INIT_LIST_HEAD(&state->head);
+ list_splice_init(&req->base.list, &state->head);
+
+ err = acomp_do_one_req(state, req);
+ if (err == -EBUSY || err == -EINPROGRESS)
+ return -EBUSY;
+
+ return acomp_reqchain_finish(req, err, ~0);
+}
+
+int crypto_acomp_compress(struct acomp_req *req)
+{
+ return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_compress);
+
+int crypto_acomp_decompress(struct acomp_req *req)
+{
+ return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
}
-EXPORT_SYMBOL_GPL(acomp_request_free);
+EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
void comp_prepare_alg(struct comp_alg_common *alg)
{
diff --git a/crypto/aead.c b/crypto/aead.c
index cade532413bf..12f5b42171af 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "internal.h"
@@ -156,8 +157,8 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
seq_printf(m, "type : aead\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
+ seq_printf(m, "async : %s\n",
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "ivsize : %u\n", aead->ivsize);
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 6cbff298722b..72f6ee1345ef 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
- unsigned int size = scatterwalk_clamp(&walk, assoclen);
+ unsigned int size = scatterwalk_next(&walk, assoclen);
+ const u8 *src = walk.addr;
unsigned int left = size;
- void *mapped = scatterwalk_map(&walk);
- const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS_BLOCK_SIZE) {
if (pos > 0) {
@@ -308,9 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
pos += left;
assoclen -= size;
- scatterwalk_unmap(mapped);
- scatterwalk_advance(&walk, size);
- scatterwalk_done(&walk, 0, assoclen);
+ scatterwalk_done_src(&walk, size);
}
if (pos > 0) {
diff --git a/crypto/ahash.c b/crypto/ahash.c
index b08b89ec26ec..2d9eec2b2b1c 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -16,11 +16,13 @@
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "hash.h"
@@ -28,7 +30,7 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
struct crypto_hash_walk {
- char *data;
+ const char *data;
unsigned int offset;
unsigned int flags;
@@ -40,6 +42,27 @@ struct crypto_hash_walk {
struct scatterlist *sg;
};
+struct ahash_save_req_state {
+ struct list_head head;
+ struct ahash_request *req0;
+ struct ahash_request *cur;
+ int (*op)(struct ahash_request *req);
+ crypto_completion_t compl;
+ void *data;
+ struct scatterlist sg;
+ const u8 *src;
+ u8 *page;
+ unsigned int offset;
+ unsigned int nbytes;
+};
+
+static void ahash_reqchain_done(void *data, int err);
+static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
+static void ahash_restore_req(struct ahash_request *req);
+static void ahash_def_finup_done1(void *data, int err);
+static int ahash_def_finup_finish1(struct ahash_request *req, int err);
+static int ahash_def_finup(struct ahash_request *req);
+
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int offset = walk->offset;
@@ -58,7 +81,7 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
sg = walk->sg;
walk->offset = sg->offset;
- walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT));
walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
@@ -73,20 +96,29 @@ static int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
+ walk->entrylen = 0;
- if (!walk->total) {
- walk->entrylen = 0;
+ if (!walk->total)
return 0;
+
+ walk->flags = req->base.flags;
+
+ if (ahash_request_isvirt(req)) {
+ walk->data = req->svirt;
+ walk->total = 0;
+ return req->nbytes;
}
walk->sg = req->src;
- walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
+ if ((walk->flags & CRYPTO_AHASH_REQ_VIRT))
+ return err;
+
walk->data -= walk->offset;
kunmap_local(walk->data);
@@ -171,21 +203,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
unsigned int nbytes = req->nbytes;
struct scatterlist *sg;
unsigned int offset;
+ struct page *page;
+ const u8 *data;
int err;
- if (nbytes &&
- (sg = req->src, offset = sg->offset,
- nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
- void *data;
+ data = req->svirt;
+ if (!nbytes || ahash_request_isvirt(req))
+ return crypto_shash_digest(desc, data, nbytes, req->result);
+
+ sg = req->src;
+ if (nbytes > sg->length)
+ return crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
- data = kmap_local_page(sg_page(sg));
- err = crypto_shash_digest(desc, data + offset, nbytes,
- req->result);
- kunmap_local(data);
- } else
- err = crypto_shash_init(desc) ?:
- shash_ahash_finup(req, desc);
+ page = sg_page(sg);
+ offset = sg->offset;
+ data = lowmem_page_address(page) + offset;
+ if (!IS_ENABLED(CONFIG_HIGHMEM))
+ return crypto_shash_digest(desc, data, nbytes, req->result);
+ page = nth_page(page, offset >> PAGE_SHIFT);
+ offset = offset_in_page(offset);
+
+ if (nbytes > (unsigned int)PAGE_SIZE - offset)
+ return crypto_shash_init(desc) ?:
+ shash_ahash_finup(req, desc);
+
+ data = kmap_local_page(page);
+ err = crypto_shash_digest(desc, data + offset, nbytes,
+ req->result);
+ kunmap_local(data);
return err;
}
EXPORT_SYMBOL_GPL(shash_ahash_digest);
@@ -266,89 +313,298 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
+static bool ahash_request_hasvirt(struct ahash_request *req)
+{
+ return ahash_request_isvirt(req);
+}
+
+static int ahash_reqchain_virt(struct ahash_save_req_state *state,
+ int err, u32 mask)
+{
+ struct ahash_request *req = state->cur;
+
+ for (;;) {
+ unsigned len = state->nbytes;
+
+ req->base.err = err;
+
+ if (!state->offset)
+ break;
+
+ if (state->offset == len || err) {
+ u8 *result = req->result;
+
+ ahash_request_set_virt(req, state->src, result, len);
+ state->offset = 0;
+ break;
+ }
+
+ len -= state->offset;
+
+ len = min(PAGE_SIZE, len);
+ memcpy(state->page, state->src + state->offset, len);
+ state->offset += len;
+ req->nbytes = len;
+
+ err = state->op(req);
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head) ||
+ state->offset < state->nbytes)
+ err = -EBUSY;
+ break;
+ }
+
+ if (err == -EBUSY)
+ break;
+ }
+
+ return err;
+}
+
+static int ahash_reqchain_finish(struct ahash_request *req0,
+ struct ahash_save_req_state *state,
+ int err, u32 mask)
+{
+ struct ahash_request *req = state->cur;
+ struct crypto_ahash *tfm;
+ struct ahash_request *n;
+ bool update;
+ u8 *page;
+
+ err = ahash_reqchain_virt(state, err, mask);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ goto out;
+
+ if (req != req0)
+ list_add_tail(&req->base.list, &req0->base.list);
+
+ tfm = crypto_ahash_reqtfm(req);
+ update = state->op == crypto_ahash_alg(tfm)->update;
+
+ list_for_each_entry_safe(req, n, &state->head, base.list) {
+ list_del_init(&req->base.list);
+
+ req->base.flags &= mask;
+ req->base.complete = ahash_reqchain_done;
+ req->base.data = state;
+ state->cur = req;
+
+ if (update && ahash_request_isvirt(req) && req->nbytes) {
+ unsigned len = req->nbytes;
+ u8 *result = req->result;
+
+ state->src = req->svirt;
+ state->nbytes = len;
+
+ len = min(PAGE_SIZE, len);
+
+ memcpy(state->page, req->svirt, len);
+ state->offset = len;
+
+ ahash_request_set_crypt(req, &state->sg, result, len);
+ }
+
+ err = state->op(req);
+
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head) ||
+ state->offset < state->nbytes)
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (err == -EBUSY)
+ goto out;
+
+ err = ahash_reqchain_virt(state, err, mask);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ goto out;
+
+ list_add_tail(&req->base.list, &req0->base.list);
+ }
+
+ page = state->page;
+ if (page) {
+ memset(page, 0, PAGE_SIZE);
+ free_page((unsigned long)page);
+ }
+ ahash_restore_req(req0);
+
+out:
+ return err;
+}
+
+static void ahash_reqchain_done(void *data, int err)
+{
+ struct ahash_save_req_state *state = data;
+ crypto_completion_t compl = state->compl;
+
+ data = state->data;
+
+ if (err == -EINPROGRESS) {
+ if (!list_empty(&state->head) || state->offset < state->nbytes)
+ return;
+ goto notify;
+ }
+
+ err = ahash_reqchain_finish(state->req0, state, err,
+ CRYPTO_TFM_REQ_MAY_BACKLOG);
+ if (err == -EBUSY)
+ return;
+
+notify:
+ compl(data, err);
+}
+
+static int ahash_do_req_chain(struct ahash_request *req,
+ int (*op)(struct ahash_request *req))
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ bool update = op == crypto_ahash_alg(tfm)->update;
+ struct ahash_save_req_state *state;
+ struct ahash_save_req_state state0;
+ u8 *page = NULL;
+ int err;
+
+ if (crypto_ahash_req_chain(tfm) ||
+ (!ahash_request_chained(req) &&
+ (!update || !ahash_request_isvirt(req))))
+ return op(req);
+
+ if (update && ahash_request_hasvirt(req)) {
+ gfp_t gfp;
+ u32 flags;
+
+ flags = ahash_request_flags(req);
+ gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ page = (void *)__get_free_page(gfp);
+ err = -ENOMEM;
+ if (!page)
+ goto out_set_chain;
+ }
+
+ state = &state0;
+ if (ahash_is_async(tfm)) {
+ err = ahash_save_req(req, ahash_reqchain_done);
+ if (err)
+ goto out_free_page;
+
+ state = req->base.data;
+ }
+
+ state->op = op;
+ state->cur = req;
+ state->page = page;
+ state->offset = 0;
+ state->nbytes = 0;
+ INIT_LIST_HEAD(&state->head);
+
+ if (page)
+ sg_init_one(&state->sg, page, PAGE_SIZE);
+
+ if (update && ahash_request_isvirt(req) && req->nbytes) {
+ unsigned len = req->nbytes;
+ u8 *result = req->result;
+
+ state->src = req->svirt;
+ state->nbytes = len;
+
+ len = min(PAGE_SIZE, len);
+
+ memcpy(page, req->svirt, len);
+ state->offset = len;
+
+ ahash_request_set_crypt(req, &state->sg, result, len);
+ }
+
+ err = op(req);
+ if (err == -EBUSY || err == -EINPROGRESS)
+ return -EBUSY;
+
+ return ahash_reqchain_finish(req, state, err, ~0);
+
+out_free_page:
+ free_page((unsigned long)page);
+
+out_set_chain:
+ req->base.err = err;
+ return err;
+}
+
int crypto_ahash_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash))
- return crypto_shash_init(prepare_shash_desc(req, tfm));
+ if (likely(tfm->using_shash)) {
+ int err;
+
+ err = crypto_shash_init(prepare_shash_desc(req, tfm));
+ req->base.err = err;
+ return err;
+ }
+
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- return crypto_ahash_alg(tfm)->init(req);
+
+ return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
}
EXPORT_SYMBOL_GPL(crypto_ahash_init);
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
- bool has_state)
+static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request *subreq;
- unsigned int subreq_size;
- unsigned int reqsize;
- u8 *result;
+ struct ahash_save_req_state *state;
gfp_t gfp;
u32 flags;
- subreq_size = sizeof(*subreq);
- reqsize = crypto_ahash_reqsize(tfm);
- reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
- subreq_size += reqsize;
- subreq_size += ds;
+ if (!ahash_is_async(tfm))
+ return 0;
flags = ahash_request_flags(req);
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
- subreq = kmalloc(subreq_size, gfp);
- if (!subreq)
+ state = kmalloc(sizeof(*state), gfp);
+ if (!state)
return -ENOMEM;
- ahash_request_set_tfm(subreq, tfm);
- ahash_request_set_callback(subreq, flags, cplt, req);
-
- result = (u8 *)(subreq + 1) + reqsize;
-
- ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
-
- if (has_state) {
- void *state;
-
- state = kmalloc(crypto_ahash_statesize(tfm), gfp);
- if (!state) {
- kfree(subreq);
- return -ENOMEM;
- }
-
- crypto_ahash_export(req, state);
- crypto_ahash_import(subreq, state);
- kfree_sensitive(state);
- }
-
- req->priv = subreq;
+ state->compl = req->base.complete;
+ state->data = req->base.data;
+ req->base.complete = cplt;
+ req->base.data = state;
+ state->req0 = req;
return 0;
}
-static void ahash_restore_req(struct ahash_request *req, int err)
+static void ahash_restore_req(struct ahash_request *req)
{
- struct ahash_request *subreq = req->priv;
+ struct ahash_save_req_state *state;
+ struct crypto_ahash *tfm;
- if (!err)
- memcpy(req->result, subreq->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+ tfm = crypto_ahash_reqtfm(req);
+ if (!ahash_is_async(tfm))
+ return;
- req->priv = NULL;
+ state = req->base.data;
- kfree_sensitive(subreq);
+ req->base.complete = state->compl;
+ req->base.data = state->data;
+ kfree(state);
}
int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash))
- return shash_ahash_update(req, ahash_request_ctx(req));
+ if (likely(tfm->using_shash)) {
+ int err;
+
+ err = shash_ahash_update(req, ahash_request_ctx(req));
+ req->base.err = err;
+ return err;
+ }
- return crypto_ahash_alg(tfm)->update(req);
+ return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
}
EXPORT_SYMBOL_GPL(crypto_ahash_update);
@@ -356,10 +612,15 @@ int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash))
- return crypto_shash_final(ahash_request_ctx(req), req->result);
+ if (likely(tfm->using_shash)) {
+ int err;
- return crypto_ahash_alg(tfm)->final(req);
+ err = crypto_shash_final(ahash_request_ctx(req), req->result);
+ req->base.err = err;
+ return err;
+ }
+
+ return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
@@ -367,86 +628,164 @@ int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash))
- return shash_ahash_finup(req, ahash_request_ctx(req));
+ if (likely(tfm->using_shash)) {
+ int err;
+
+ err = shash_ahash_finup(req, ahash_request_ctx(req));
+ req->base.err = err;
+ return err;
+ }
+
+ if (!crypto_ahash_alg(tfm)->finup ||
+ (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)))
+ return ahash_def_finup(req);
- return crypto_ahash_alg(tfm)->finup(req);
+ return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
+static int ahash_def_digest_finish(struct ahash_request *req, int err)
+{
+ struct crypto_ahash *tfm;
+
+ if (err)
+ goto out;
+
+ tfm = crypto_ahash_reqtfm(req);
+ if (ahash_is_async(tfm))
+ req->base.complete = ahash_def_finup_done1;
+
+ err = crypto_ahash_update(req);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return err;
+
+ return ahash_def_finup_finish1(req, err);
+
+out:
+ ahash_restore_req(req);
+ return err;
+}
+
+static void ahash_def_digest_done(void *data, int err)
+{
+ struct ahash_save_req_state *state0 = data;
+ struct ahash_save_req_state state;
+ struct ahash_request *areq;
+
+ state = *state0;
+ areq = state.req0;
+ if (err == -EINPROGRESS)
+ goto out;
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = ahash_def_digest_finish(areq, err);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return;
+
+out:
+ state.compl(state.data, err);
+}
+
+static int ahash_def_digest(struct ahash_request *req)
+{
+ int err;
+
+ err = ahash_save_req(req, ahash_def_digest_done);
+ if (err)
+ return err;
+
+ err = crypto_ahash_init(req);
+ if (err == -EINPROGRESS || err == -EBUSY)
+ return err;
+
+ return ahash_def_digest_finish(req, err);
+}
+
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- if (likely(tfm->using_shash))
- return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+ if (likely(tfm->using_shash)) {
+ int err;
+
+ err = shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+ req->base.err = err;
+ return err;
+ }
+
+ if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))
+ return ahash_def_digest(req);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
- return crypto_ahash_alg(tfm)->digest(req);
+ return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
static void ahash_def_finup_done2(void *data, int err)
{
- struct ahash_request *areq = data;
+ struct ahash_save_req_state *state = data;
+ struct ahash_request *areq = state->req0;
if (err == -EINPROGRESS)
return;
- ahash_restore_req(areq, err);
-
+ ahash_restore_req(areq);
ahash_request_complete(areq, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
- struct ahash_request *subreq = req->priv;
+ struct crypto_ahash *tfm;
if (err)
goto out;
- subreq->base.complete = ahash_def_finup_done2;
+ tfm = crypto_ahash_reqtfm(req);
+ if (ahash_is_async(tfm))
+ req->base.complete = ahash_def_finup_done2;
- err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq);
+ err = crypto_ahash_final(req);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
out:
- ahash_restore_req(req, err);
+ ahash_restore_req(req);
return err;
}
static void ahash_def_finup_done1(void *data, int err)
{
- struct ahash_request *areq = data;
- struct ahash_request *subreq;
+ struct ahash_save_req_state *state0 = data;
+ struct ahash_save_req_state state;
+ struct ahash_request *areq;
+ state = *state0;
+ areq = state.req0;
if (err == -EINPROGRESS)
goto out;
- subreq = areq->priv;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = ahash_def_finup_finish1(areq, err);
if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
- ahash_request_complete(areq, err);
+ state.compl(state.data, err);
}
static int ahash_def_finup(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
int err;
- err = ahash_save_req(req, ahash_def_finup_done1, true);
+ err = ahash_save_req(req, ahash_def_finup_done1);
if (err)
return err;
- err = crypto_ahash_alg(tfm)->update(req->priv);
+ err = crypto_ahash_update(req);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
@@ -489,6 +828,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);
crypto_ahash_set_statesize(hash, alg->halg.statesize);
+ crypto_ahash_set_reqsize(hash, alg->reqsize);
if (tfm->__crt_alg->cra_type == &crypto_shash_type)
return crypto_init_ahash_using_shash(tfm);
@@ -536,8 +876,8 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
+ seq_printf(m, "async : %s\n",
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n",
__crypto_hash_alg_common(alg)->digestsize);
@@ -654,6 +994,9 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
if (alg->halg.statesize == 0)
return -EINVAL;
+ if (alg->reqsize && alg->reqsize < alg->halg.statesize)
+ return -EINVAL;
+
err = hash_prepare_alg(&alg->halg);
if (err)
return err;
@@ -661,8 +1004,6 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
base->cra_type = &crypto_ahash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
- if (!alg->finup)
- alg->finup = ahash_def_finup;
if (!alg->setkey)
alg->setkey = ahash_nosetkey;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5318c214debb..ea9ed9580aa8 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -464,8 +464,7 @@ void crypto_unregister_alg(struct crypto_alg *alg)
if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
return;
- if (alg->cra_destroy)
- alg->cra_destroy(alg);
+ crypto_alg_put(alg);
crypto_remove_final(&list);
}
@@ -955,7 +954,7 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
queue->backlog = queue->backlog->next;
request = queue->list.next;
- list_del(request);
+ list_del_init(request);
return list_entry(request, struct crypto_async_request, list);
}
diff --git a/crypto/api.c b/crypto/api.c
index bfd177a4313a..3416e98128a0 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -36,7 +36,8 @@ EXPORT_SYMBOL_GPL(crypto_chain);
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
#endif
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
+ u32 type, u32 mask);
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
u32 mask);
@@ -145,7 +146,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (alg != &larval->alg) {
kfree(larval);
if (crypto_is_larval(alg))
- alg = crypto_larval_wait(alg);
+ alg = crypto_larval_wait(alg, type, mask);
}
return alg;
@@ -197,7 +198,8 @@ static void crypto_start_test(struct crypto_larval *larval)
crypto_schedule_test(larval);
}
-static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg,
+ u32 type, u32 mask)
{
struct crypto_larval *larval;
long time_left;
@@ -219,12 +221,7 @@ again:
crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
} else if (!alg) {
- u32 type;
- u32 mask;
-
alg = &larval->alg;
- type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
- mask = larval->mask;
alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
ERR_PTR(-EAGAIN);
} else if (IS_ERR(alg))
@@ -304,7 +301,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
}
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
- alg = crypto_larval_wait(alg);
+ alg = crypto_larval_wait(alg, type, mask);
else if (alg)
;
else if (!(mask & CRYPTO_ALG_TESTED))
@@ -352,7 +349,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP)
- alg = crypto_larval_wait(larval);
+ alg = crypto_larval_wait(larval, type, mask);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
@@ -386,10 +383,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg);
break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- len += crypto_compress_ctxsize(alg);
- break;
}
return len;
@@ -710,5 +703,15 @@ void crypto_req_done(void *data, int err)
}
EXPORT_SYMBOL_GPL(crypto_req_done);
+void crypto_destroy_alg(struct crypto_alg *alg)
+{
+ if (alg->cra_type && alg->cra_type->destroy)
+ alg->cra_type->destroy(alg);
+
+ if (alg->cra_destroy)
+ alg->cra_destroy(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_destroy_alg);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index bbd07a9022e6..bf165d321440 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -267,7 +267,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
struct crypto_sig *sig;
char *key, *ptr;
bool issig;
- int ksz;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -302,8 +301,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
if (ret)
goto error_free_tfm;
-
- ksz = crypto_sig_keysize(sig);
} else {
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {
@@ -317,8 +314,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret)
goto error_free_tfm;
-
- ksz = crypto_akcipher_maxsize(tfm);
}
ret = -EINVAL;
@@ -347,8 +342,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
BUG();
}
- if (ret == 0)
- ret = ksz;
+ if (!issig && ret == 0)
+ ret = crypto_akcipher_maxsize(tfm);
error_free_tfm:
if (issig)
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 1a3855284091..2c499654a36c 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -389,32 +389,6 @@ async_xor_val_offs(struct page *dest, unsigned int offset,
}
EXPORT_SYMBOL_GPL(async_xor_val_offs);
-/**
- * async_xor_val - attempt a xor parity check with a dma engine.
- * @dest: destination page used if the xor is performed synchronously
- * @src_list: array of source pages
- * @offset: offset in pages to start transaction
- * @src_cnt: number of source pages
- * @len: length in bytes
- * @result: 0 if sum == 0 else non-zero
- * @submit: submission / completion modifiers
- *
- * honored flags: ASYNC_TX_ACK
- *
- * src_list note: if the dest is also a source it must be at index zero.
- * The contents of this array will be overwritten if a scribble region
- * is not specified.
- */
-struct dma_async_tx_descriptor *
-async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum sum_check_flags *result,
- struct async_submit_ctl *submit)
-{
- return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt,
- len, result, submit);
-}
-EXPORT_SYMBOL_GPL(async_xor_val);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
MODULE_LICENSE("GPL");
diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c
index b5e657415770..a88798d3e8c8 100644
--- a/crypto/bpf_crypto_skcipher.c
+++ b/crypto/bpf_crypto_skcipher.c
@@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void)
module_init(bpf_crypto_skcipher_init);
module_exit(bpf_crypto_skcipher_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Symmetric key cipher support for BPF");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
index ba7fcb47f9aa..1fb9fbd302c6 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha_generic.c
@@ -21,7 +21,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
err = skcipher_walk_virt(&walk, req, false);
- chacha_init_generic(state, ctx->key, iv);
+ chacha_init(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -54,7 +54,7 @@ static int crypto_xchacha_crypt(struct skcipher_request *req)
u8 real_iv[16];
/* Compute the subkey given the original key and first 128 nonce bits */
- chacha_init_generic(state, ctx->key, req->iv);
+ chacha_init(state, ctx->key, req->iv);
hchacha_block_generic(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
diff --git a/crypto/compress.c b/crypto/compress.c
deleted file mode 100644
index 9048fe390c46..000000000000
--- a/crypto/compress.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cryptographic API.
- *
- * Compression operations.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- */
-#include <linux/crypto.h>
-#include "internal.h"
-
-int crypto_comp_compress(struct crypto_comp *comp,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_tfm *tfm = crypto_comp_tfm(comp);
-
- return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst,
- dlen);
-}
-EXPORT_SYMBOL_GPL(crypto_comp_compress);
-
-int crypto_comp_decompress(struct crypto_comp *comp,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_tfm *tfm = crypto_comp_tfm(comp);
-
- return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst,
- dlen);
-}
-EXPORT_SYMBOL_GPL(crypto_comp_decompress);
diff --git a/crypto/compress.h b/crypto/compress.h
index c3cedfb5e606..f7737a1fcbbd 100644
--- a/crypto/compress.h
+++ b/crypto/compress.h
@@ -15,8 +15,6 @@ struct acomp_req;
struct comp_alg_common;
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
void comp_prepare_alg(struct comp_alg_common *alg);
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 985da981d6e2..b1a36d32dc50 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -85,7 +85,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = crc32c_le_base(ctx->crc, data, length);
+ ctx->crc = crc32c_base(ctx->crc, data, length);
return 0;
}
@@ -94,7 +94,7 @@ static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = __crc32c_le(ctx->crc, data, length);
+ ctx->crc = crc32c(ctx->crc, data, length);
return 0;
}
@@ -108,14 +108,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~crc32c_le_base(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c_base(*crcp, data, len), out);
return 0;
}
static int __chksum_finup_arch(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- put_unaligned_le32(~__crc32c_le(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c(*crcp, data, len), out);
return 0;
}
diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c
deleted file mode 100644
index ce0f3059b912..000000000000
--- a/crypto/crc64_rocksoft_generic.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/crc64.h>
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-#include <linux/unaligned.h>
-
-static int chksum_init(struct shash_desc *desc)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = crc64_rocksoft_generic(*crc, data, length);
-
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le64(*crc, out);
- return 0;
-}
-
-static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out)
-{
- crc = crc64_rocksoft_generic(crc, data, len);
- put_unaligned_le64(crc, out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- return __chksum_finup(*crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static struct shash_alg alg = {
- .digestsize = sizeof(u64),
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(u64),
- .base = {
- .cra_name = CRC64_ROCKSOFT_STRING,
- .cra_driver_name = "crc64-rocksoft-generic",
- .cra_priority = 200,
- .cra_blocksize = 1,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init crc64_rocksoft_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit crc64_rocksoft_exit(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crc64_rocksoft_init);
-module_exit(crc64_rocksoft_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Rocksoft model CRC64 calculation.");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
deleted file mode 100644
index 259cb01932cb..000000000000
--- a/crypto/crct10dif_generic.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform
- *
- * Copyright (c) 2007 Oracle Corporation. All rights reserved.
- * Written by Martin K. Petersen <martin.petersen@oracle.com>
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/crc-t10dif.h>
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-struct chksum_desc_ctx {
- __u16 crc;
-};
-
-/*
- * Steps through buffer one byte at a time, calculates reflected
- * crc using table.
- */
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc_t10dif_update(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__u16 *)out = ctx->crc;
- return 0;
-}
-
-static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
-{
- *(__u16 *)out = crc_t10dif_generic(crc, data, len);
- return 0;
-}
-
-static int __chksum_finup_arch(__u16 crc, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__u16 *)out = crc_t10dif_update(crc, data, len);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_finup_arch(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup_arch(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static int chksum_digest_arch(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup_arch(0, data, length, out);
-}
-
-static struct shash_alg algs[] = {{
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update_arch,
- .final = chksum_final,
- .finup = chksum_finup_arch,
- .digest = chksum_digest_arch,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-" __stringify(ARCH),
- .base.cra_priority = 150,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}};
-
-static int num_algs;
-
-static int __init crct10dif_mod_init(void)
-{
- /* register the arch flavor only if it differs from the generic one */
- num_algs = 1 + crc_t10dif_is_optimized();
-
- return crypto_register_shashes(algs, num_algs);
-}
-
-static void __exit crct10dif_mod_fini(void)
-{
- crypto_unregister_shashes(algs, num_algs);
-}
-
-subsys_initcall(crct10dif_mod_init);
-module_exit(crct10dif_mod_fini);
-
-MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
-MODULE_DESCRIPTION("T10 DIF CRC calculation.");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("crct10dif");
-MODULE_ALIAS_CRYPTO("crct10dif-generic");
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 5b84b0f7cc17..ced90f88ee07 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -17,23 +17,13 @@
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mm.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
-static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
+static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock);
static struct crypto_sync_skcipher *crypto_default_null_skcipher;
static int crypto_default_null_skcipher_refcnt;
-static int null_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- if (slen > *dlen)
- return -EINVAL;
- memcpy(dst, src, slen);
- *dlen = slen;
- return 0;
-}
-
static int null_init(struct shash_desc *desc)
{
return 0;
@@ -121,7 +111,7 @@ static struct skcipher_alg skcipher_null = {
.decrypt = null_skcipher_crypt,
};
-static struct crypto_alg null_algs[] = { {
+static struct crypto_alg cipher_null = {
.cra_name = "cipher_null",
.cra_driver_name = "cipher_null-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
@@ -134,41 +124,39 @@ static struct crypto_alg null_algs[] = { {
.cia_setkey = null_setkey,
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
-}, {
- .cra_name = "compress_null",
- .cra_driver_name = "compress_null-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_blocksize = NULL_BLOCK_SIZE,
- .cra_ctxsize = 0,
- .cra_module = THIS_MODULE,
- .cra_u = { .compress = {
- .coa_compress = null_compress,
- .coa_decompress = null_compress } }
-} };
+};
-MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
{
+ struct crypto_sync_skcipher *ntfm = NULL;
struct crypto_sync_skcipher *tfm;
- mutex_lock(&crypto_default_null_skcipher_lock);
+ spin_lock_bh(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
- tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
- if (IS_ERR(tfm))
- goto unlock;
-
- crypto_default_null_skcipher = tfm;
+ spin_unlock_bh(&crypto_default_null_skcipher_lock);
+
+ ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
+ if (IS_ERR(ntfm))
+ return ntfm;
+
+ spin_lock_bh(&crypto_default_null_skcipher_lock);
+ tfm = crypto_default_null_skcipher;
+ if (!tfm) {
+ tfm = ntfm;
+ ntfm = NULL;
+ crypto_default_null_skcipher = tfm;
+ }
}
crypto_default_null_skcipher_refcnt++;
+ spin_unlock_bh(&crypto_default_null_skcipher_lock);
-unlock:
- mutex_unlock(&crypto_default_null_skcipher_lock);
+ crypto_free_sync_skcipher(ntfm);
return tfm;
}
@@ -176,12 +164,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
void crypto_put_default_null_skcipher(void)
{
- mutex_lock(&crypto_default_null_skcipher_lock);
+ struct crypto_sync_skcipher *tfm = NULL;
+
+ spin_lock_bh(&crypto_default_null_skcipher_lock);
if (!--crypto_default_null_skcipher_refcnt) {
- crypto_free_sync_skcipher(crypto_default_null_skcipher);
+ tfm = crypto_default_null_skcipher;
crypto_default_null_skcipher = NULL;
}
- mutex_unlock(&crypto_default_null_skcipher_lock);
+ spin_unlock_bh(&crypto_default_null_skcipher_lock);
+
+ crypto_free_sync_skcipher(tfm);
}
EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
@@ -189,7 +181,7 @@ static int __init crypto_null_mod_init(void)
{
int ret = 0;
- ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs));
+ ret = crypto_register_alg(&cipher_null);
if (ret < 0)
goto out;
@@ -206,14 +198,14 @@ static int __init crypto_null_mod_init(void)
out_unregister_shash:
crypto_unregister_shash(&digest_null);
out_unregister_algs:
- crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
+ crypto_unregister_alg(&cipher_null);
out:
return ret;
}
static void __exit crypto_null_mod_fini(void)
{
- crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
+ crypto_unregister_alg(&cipher_null);
crypto_unregister_shash(&digest_null);
crypto_unregister_skcipher(&skcipher_null);
}
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 6c571834e86a..aad429bef03e 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -84,17 +84,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
sizeof(rcipher), &rcipher);
}
-static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_comp rcomp;
-
- memset(&rcomp, 0, sizeof(rcomp));
-
- strscpy(rcomp.type, "compression", sizeof(rcomp.type));
-
- return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
-}
-
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
@@ -136,11 +125,6 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- if (crypto_report_comp(skb, alg))
- goto nla_put_failure;
-
- break;
}
out:
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 73c0d6e53b2f..97a947b0a876 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -33,7 +33,7 @@ static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
u8 *ctrblk = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
@@ -50,7 +50,7 @@ static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
@@ -77,20 +77,20 @@ static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
+ u8 *dst = walk->dst.virt.addr;
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
- crypto_xor(src, keystream, bsize);
+ crypto_xor(dst, keystream, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 98e8bcb81a6a..5c346c544093 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -112,7 +112,7 @@ out:
return ret;
}
-static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
+static void *deflate_alloc_ctx(void)
{
struct deflate_ctx *ctx;
int ret;
@@ -130,32 +130,18 @@ static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int deflate_init(struct crypto_tfm *tfm)
-{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __deflate_init(ctx);
-}
-
static void __deflate_exit(void *ctx)
{
deflate_comp_exit(ctx);
deflate_decomp_exit(ctx);
}
-static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void deflate_free_ctx(void *ctx)
{
__deflate_exit(ctx);
kfree_sensitive(ctx);
}
-static void deflate_exit(struct crypto_tfm *tfm)
-{
- struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
-
- __deflate_exit(ctx);
-}
-
static int __deflate_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -185,14 +171,6 @@ out:
return ret;
}
-static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
-
- return __deflate_compress(src, slen, dst, dlen, dctx);
-}
-
static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -241,14 +219,6 @@ out:
return ret;
}
-static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
-
- return __deflate_decompress(src, slen, dst, dlen, dctx);
-}
-
static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -256,19 +226,6 @@ static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __deflate_decompress(src, slen, dst, dlen, ctx);
}
-static struct crypto_alg alg = {
- .cra_name = "deflate",
- .cra_driver_name = "deflate-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct deflate_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = deflate_init,
- .cra_exit = deflate_exit,
- .cra_u = { .compress = {
- .coa_compress = deflate_compress,
- .coa_decompress = deflate_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = deflate_alloc_ctx,
.free_ctx = deflate_free_ctx,
@@ -283,24 +240,11 @@ static struct scomp_alg scomp = {
static int __init deflate_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit deflate_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 50ad2d4ed672..6cf9a945fc6c 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(ecc_get_curve);
void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
u64 *out, unsigned int ndigits)
{
- int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64));
+ int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64));
unsigned int o = nbytes & 7;
__be64 msd = 0;
diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c
index eaae7214d69b..4454f1f8f33f 100644
--- a/crypto/ecdsa-p1363.c
+++ b/crypto/ecdsa-p1363.c
@@ -22,7 +22,7 @@ static int ecdsa_p1363_verify(struct crypto_sig *tfm,
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int keylen = crypto_sig_keysize(ctx->child);
- unsigned int ndigits = DIV_ROUND_UP(keylen, sizeof(u64));
+ unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64));
struct ecdsa_raw_sig sig;
if (slen != 2 * keylen)
diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c
index 6a77c13e192b..90a04f4b9a2f 100644
--- a/crypto/ecdsa-x962.c
+++ b/crypto/ecdsa-x962.c
@@ -81,8 +81,8 @@ static int ecdsa_x962_verify(struct crypto_sig *tfm,
struct ecdsa_x962_signature_ctx sig_ctx;
int err;
- sig_ctx.ndigits = DIV_ROUND_UP(crypto_sig_keysize(ctx->child),
- sizeof(u64));
+ sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child),
+ sizeof(u64));
err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen);
if (err < 0)
diff --git a/crypto/essiv.c b/crypto/essiv.c
index 1c00c3324058..ec0ec8992c2d 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -405,8 +405,7 @@ static bool parse_cipher_name(char *essiv_cipher_name, const char *cra_name)
if (len >= CRYPTO_MAX_ALG_NAME)
return false;
- memcpy(essiv_cipher_name, p, len);
- essiv_cipher_name[len] = '\0';
+ strscpy(essiv_cipher_name, p, len + 1);
return true;
}
diff --git a/crypto/hkdf.c b/crypto/hkdf.c
new file mode 100644
index 000000000000..2434c5c42545
--- /dev/null
+++ b/crypto/hkdf.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation
+ * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
+ * "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <crypto/hkdf.h>
+#include <linux/module.h>
+
+/*
+ * HKDF consists of two steps:
+ *
+ * 1. HKDF-Extract: extract a pseudorandom key from the input keying material
+ * and optional salt.
+ * 2. HKDF-Expand: expand the pseudorandom key into output keying material of
+ * any length, parameterized by an application-specific info string.
+ *
+ */
+
+/**
+ * hkdf_extract - HKDF-Extract (RFC 5869 section 2.2)
+ * @hmac_tfm: an HMAC transform using the hash function desired for HKDF. The
+ * caller is responsible for setting the @prk afterwards.
+ * @ikm: input keying material
+ * @ikmlen: length of @ikm
+ * @salt: input salt value
+ * @saltlen: length of @salt
+ * @prk: resulting pseudorandom key
+ *
+ * Extracts a pseudorandom key @prk from the input keying material
+ * @ikm with length @ikmlen and salt @salt with length @saltlen.
+ * The length of @prk is given by the digest size of @hmac_tfm.
+ * For an 'unsalted' version of HKDF-Extract @salt must be set
+ * to all zeroes and @saltlen must be set to the length of @prk.
+ *
+ * Returns 0 on success with the pseudorandom key stored in @prk,
+ * or a negative errno value otherwise.
+ */
+int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
+ u8 *prk)
+{
+ int err;
+
+ err = crypto_shash_setkey(hmac_tfm, salt, saltlen);
+ if (!err)
+ err = crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_extract);
+
+/**
+ * hkdf_expand - HKDF-Expand (RFC 5869 section 2.3)
+ * @hmac_tfm: hash context keyed with pseudorandom key
+ * @info: application-specific information
+ * @infolen: length of @info
+ * @okm: output keying material
+ * @okmlen: length of @okm
+ *
+ * This expands the pseudorandom key, which was already keyed into @hmac_tfm,
+ * into @okmlen bytes of output keying material parameterized by the
+ * application-specific @info of length @infolen bytes.
+ * This is thread-safe and may be called by multiple threads in parallel.
+ *
+ * Returns 0 on success with output keying material stored in @okm,
+ * or a negative errno value otherwise.
+ */
+int hkdf_expand(struct crypto_shash *hmac_tfm,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen)
+{
+ SHASH_DESC_ON_STACK(desc, hmac_tfm);
+ unsigned int i, hashlen = crypto_shash_digestsize(hmac_tfm);
+ int err;
+ const u8 *prev = NULL;
+ u8 counter = 1;
+ u8 tmp[HASH_MAX_DIGESTSIZE] = {};
+
+ if (WARN_ON(okmlen > 255 * hashlen))
+ return -EINVAL;
+
+ desc->tfm = hmac_tfm;
+
+ for (i = 0; i < okmlen; i += hashlen) {
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+
+ if (prev) {
+ err = crypto_shash_update(desc, prev, hashlen);
+ if (err)
+ goto out;
+ }
+
+ if (infolen) {
+ err = crypto_shash_update(desc, info, infolen);
+ if (err)
+ goto out;
+ }
+
+ BUILD_BUG_ON(sizeof(counter) != 1);
+ if (okmlen - i < hashlen) {
+ err = crypto_shash_finup(desc, &counter, 1, tmp);
+ if (err)
+ goto out;
+ memcpy(&okm[i], tmp, okmlen - i);
+ memzero_explicit(tmp, sizeof(tmp));
+ } else {
+ err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
+ if (err)
+ goto out;
+ }
+ counter++;
+ prev = &okm[i];
+ }
+ err = 0;
+out:
+ if (unlikely(err))
+ memzero_explicit(okm, okmlen); /* so caller doesn't need to */
+ shash_desc_zero(desc);
+ memzero_explicit(tmp, HASH_MAX_DIGESTSIZE);
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_expand);
+
+struct hkdf_testvec {
+ const char *test;
+ const u8 *ikm;
+ const u8 *salt;
+ const u8 *info;
+ const u8 *prk;
+ const u8 *okm;
+ u16 ikm_size;
+ u16 salt_size;
+ u16 info_size;
+ u16 prk_size;
+ u16 okm_size;
+};
+
+/*
+ * HKDF test vectors from RFC5869
+ *
+ * Additional HKDF test vectors from
+ * https://github.com/brycx/Test-Vector-Generation/blob/master/HKDF/hkdf-hmac-sha2-test-vectors.md
+ */
+static const struct hkdf_testvec hkdf_sha256_tv[] = {
+ {
+ .test = "basic hdkf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63"
+ "\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5",
+ .prk_size = 32,
+ .okm = "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a"
+ "\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf"
+ "\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x06\xa6\xb8\x8c\x58\x53\x36\x1a\x06\x10\x4c\x9c\xeb\x35\xb4\x5c"
+ "\xef\x76\x00\x14\x90\x46\x71\x01\x4a\x19\x3f\x40\xc1\x5f\xc2\x44",
+ .prk_size = 32,
+ .okm = "\xb1\x1e\x39\x8d\xc8\x03\x27\xa1\xc8\xe7\xf7\x8c\x59\x6a\x49\x34"
+ "\x4f\x01\x2e\xda\x2d\x4e\xfa\xd8\xa0\x50\xcc\x4c\x19\xaf\xa9\x7c"
+ "\x59\x04\x5a\x99\xca\xc7\x82\x72\x71\xcb\x41\xc6\x5e\x59\x0e\x09"
+ "\xda\x32\x75\x60\x0c\x2f\x09\xb8\x36\x77\x93\xa9\xac\xa3\xdb\x71"
+ "\xcc\x30\xc5\x81\x79\xec\x3e\x87\xc1\x4c\x01\xd5\xc1\xf3\x43\x4f"
+ "\x1d\x87",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x19\xef\x24\xa3\x2c\x71\x7b\x16\x7f\x33\xa9\x1d\x6f\x64\x8b\xdf"
+ "\x96\x59\x67\x76\xaf\xdb\x63\x77\xac\x43\x4c\x1c\x29\x3c\xcb\x04",
+ .prk_size = 32,
+ .okm = "\x8d\xa4\xe7\x75\xa5\x63\xc1\x8f\x71\x5f\x80\x2a\x06\x3c\x5a\x31"
+ "\xb8\xa1\x1f\x5c\x5e\xe1\x87\x9e\xc3\x45\x4e\x5f\x3c\x73\x8d\x2d"
+ "\x9d\x20\x13\x95\xfa\xa4\xb6\x1a\x96\xc8",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x82\x65\xf6\x9d\x7f\xf7\xe5\x01\x37\x93\x01\x5c\xa0\xef\x92\x0c"
+ "\xb1\x68\x21\x99\xc8\xbc\x3a\x00\xda\x0c\xab\x47\xb7\xb0\x0f\xdf",
+ .prk_size = 32,
+ .okm = "\x58\xdc\xe1\x0d\x58\x01\xcd\xfd\xa8\x31\x72\x6b\xfe\xbc\xb7\x43"
+ "\xd1\x4a\x7e\xe8\x3a\xa0\x57\xa9\x3d\x59\xb0\xa1\x31\x7f\xf0\x9d"
+ "\x10\x5c\xce\xcf\x53\x56\x92\xb1\x4d\xd5",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 32,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xaa\x84\x1e\x1f\x35\x74\xf3\x2d\x13\xfb\xa8\x00\x5f\xcd\x9b\x8d"
+ "\x77\x67\x82\xa5\xdf\xa1\x92\x38\x92\xfd\x8b\x63\x5d\x3a\x89\xdf",
+ .prk_size = 32,
+ .okm = "\x59\x68\x99\x17\x9a\xb1\xbc\x00\xa7\xc0\x37\x86\xff\x43\xee\x53"
+ "\x50\x04\xbe\x2b\xb9\xbe\x68\xbc\x14\x06\x63\x6f\x54\xbd\x33\x8a"
+ "\x66\xa2\x37\xba\x2a\xcb\xce\xe3\xc9\xa7",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha384_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x70\x4b\x39\x99\x07\x79\xce\x1d\xc5\x48\x05\x2c\x7d\xc3\x9f\x30"
+ "\x35\x70\xdd\x13\xfb\x39\xf7\xac\xc5\x64\x68\x0b\xef\x80\xe8\xde"
+ "\xc7\x0e\xe9\xa7\xe1\xf3\xe2\x93\xef\x68\xec\xeb\x07\x2a\x5a\xde",
+ .prk_size = 48,
+ .okm = "\x9b\x50\x97\xa8\x60\x38\xb8\x05\x30\x90\x76\xa4\x4b\x3a\x9f\x38"
+ "\x06\x3e\x25\xb5\x16\xdc\xbf\x36\x9f\x39\x4c\xfa\xb4\x36\x85\xf7"
+ "\x48\xb6\x45\x77\x63\xe4\xf0\x20\x4f\xc5",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\xb3\x19\xf6\x83\x1d\xff\x93\x14\xef\xb6\x43\xba\xa2\x92\x63\xb3"
+ "\x0e\x4a\x8d\x77\x9f\xe3\x1e\x9c\x90\x1e\xfd\x7d\xe7\x37\xc8\x5b"
+ "\x62\xe6\x76\xd4\xdc\x87\xb0\x89\x5c\x6a\x7d\xc9\x7b\x52\xce\xbb",
+ .prk_size = 48,
+ .okm = "\x48\x4c\xa0\x52\xb8\xcc\x72\x4f\xd1\xc4\xec\x64\xd5\x7b\x4e\x81"
+ "\x8c\x7e\x25\xa8\xe0\xf4\x56\x9e\xd7\x2a\x6a\x05\xfe\x06\x49\xee"
+ "\xbf\x69\xf8\xd5\xc8\x32\x85\x6b\xf4\xe4\xfb\xc1\x79\x67\xd5\x49"
+ "\x75\x32\x4a\x94\x98\x7f\x7f\x41\x83\x58\x17\xd8\x99\x4f\xdb\xd6"
+ "\xf4\xc0\x9c\x55\x00\xdc\xa2\x4a\x56\x22\x2f\xea\x53\xd8\x96\x7a"
+ "\x8b\x2e",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x10\xe4\x0c\xf0\x72\xa4\xc5\x62\x6e\x43\xdd\x22\xc1\xcf\x72\x7d"
+ "\x4b\xb1\x40\x97\x5c\x9a\xd0\xcb\xc8\xe4\x5b\x40\x06\x8f\x8f\x0b"
+ "\xa5\x7c\xdb\x59\x8a\xf9\xdf\xa6\x96\x3a\x96\x89\x9a\xf0\x47\xe5",
+ .prk_size = 48,
+ .okm = "\xc8\xc9\x6e\x71\x0f\x89\xb0\xd7\x99\x0b\xca\x68\xbc\xde\xc8\xcf"
+ "\x85\x40\x62\xe5\x4c\x73\xa7\xab\xc7\x43\xfa\xde\x9b\x24\x2d\xaa"
+ "\xcc\x1c\xea\x56\x70\x41\x5b\x52\x84\x9c",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x6d\x31\x69\x98\x28\x79\x80\x88\xb3\x59\xda\xd5\x0b\x8f\x01\xb0"
+ "\x15\xf1\x7a\xa3\xbd\x4e\x27\xa6\xe9\xf8\x73\xb7\x15\x85\xca\x6a"
+ "\x00\xd1\xf0\x82\x12\x8a\xdb\x3c\xf0\x53\x0b\x57\xc0\xf9\xac\x72",
+ .prk_size = 48,
+ .okm = "\xfb\x7e\x67\x43\xeb\x42\xcd\xe9\x6f\x1b\x70\x77\x89\x52\xab\x75"
+ "\x48\xca\xfe\x53\x24\x9f\x7f\xfe\x14\x97\xa1\x63\x5b\x20\x1f\xf1"
+ "\x85\xb9\x3e\x95\x19\x92\xd8\x58\xf1\x1a",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 48,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x9d\x2d\xa5\x06\x6f\x05\xd1\x6c\x59\xfe\xdf\x6c\x5f\x32\xc7\x5e"
+ "\xda\x9a\x47\xa7\x9c\x93\x6a\xa4\x4c\xb7\x63\xa8\xe2\x2f\xfb\xfc"
+ "\xd8\xfe\x55\x43\x58\x53\x47\x21\x90\x39\xd1\x68\x28\x36\x33\xf5",
+ .prk_size = 48,
+ .okm = "\x6a\xd7\xc7\x26\xc8\x40\x09\x54\x6a\x76\xe0\x54\x5d\xf2\x66\x78"
+ "\x7e\x2b\x2c\xd6\xca\x43\x73\xa1\xf3\x14\x50\xa7\xbd\xf9\x48\x2b"
+ "\xfa\xb8\x11\xf5\x54\x20\x0e\xad\x8f\x53",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha512_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x66\x57\x99\x82\x37\x37\xde\xd0\x4a\x88\xe4\x7e\x54\xa5\x89\x0b"
+ "\xb2\xc3\xd2\x47\xc7\xa4\x25\x4a\x8e\x61\x35\x07\x23\x59\x0a\x26"
+ "\xc3\x62\x38\x12\x7d\x86\x61\xb8\x8c\xf8\x0e\xf8\x02\xd5\x7e\x2f"
+ "\x7c\xeb\xcf\x1e\x00\xe0\x83\x84\x8b\xe1\x99\x29\xc6\x1b\x42\x37",
+ .prk_size = 64,
+ .okm = "\x83\x23\x90\x08\x6c\xda\x71\xfb\x47\x62\x5b\xb5\xce\xb1\x68\xe4"
+ "\xc8\xe2\x6a\x1a\x16\xed\x34\xd9\xfc\x7f\xe9\x2c\x14\x81\x57\x93"
+ "\x38\xda\x36\x2c\xb8\xd9\xf9\x25\xd7\xcb",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x35\x67\x25\x42\x90\x7d\x4e\x14\x2c\x00\xe8\x44\x99\xe7\x4e\x1d"
+ "\xe0\x8b\xe8\x65\x35\xf9\x24\xe0\x22\x80\x4a\xd7\x75\xdd\xe2\x7e"
+ "\xc8\x6c\xd1\xe5\xb7\xd1\x78\xc7\x44\x89\xbd\xbe\xb3\x07\x12\xbe"
+ "\xb8\x2d\x4f\x97\x41\x6c\x5a\x94\xea\x81\xeb\xdf\x3e\x62\x9e\x4a",
+ .prk_size = 64,
+ .okm = "\xce\x6c\x97\x19\x28\x05\xb3\x46\xe6\x16\x1e\x82\x1e\xd1\x65\x67"
+ "\x3b\x84\xf4\x00\xa2\xb5\x14\xb2\xfe\x23\xd8\x4c\xd1\x89\xdd\xf1"
+ "\xb6\x95\xb4\x8c\xbd\x1c\x83\x88\x44\x11\x37\xb3\xce\x28\xf1\x6a"
+ "\xa6\x4b\xa3\x3b\xa4\x66\xb2\x4d\xf6\xcf\xcb\x02\x1e\xcf\xf2\x35"
+ "\xf6\xa2\x05\x6c\xe3\xaf\x1d\xe4\x4d\x57\x20\x97\xa8\x50\x5d\x9e"
+ "\x7a\x93",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xfd\x20\x0c\x49\x87\xac\x49\x13\x13\xbd\x4a\x2a\x13\x28\x71\x21"
+ "\x24\x72\x39\xe1\x1c\x9e\xf8\x28\x02\x04\x4b\x66\xef\x35\x7e\x5b"
+ "\x19\x44\x98\xd0\x68\x26\x11\x38\x23\x48\x57\x2a\x7b\x16\x11\xde"
+ "\x54\x76\x40\x94\x28\x63\x20\x57\x8a\x86\x3f\x36\x56\x2b\x0d\xf6",
+ .prk_size = 64,
+ .okm = "\xf5\xfa\x02\xb1\x82\x98\xa7\x2a\x8c\x23\x89\x8a\x87\x03\x47\x2c"
+ "\x6e\xb1\x79\xdc\x20\x4c\x03\x42\x5c\x97\x0e\x3b\x16\x4b\xf9\x0f"
+ "\xff\x22\xd0\x48\x36\xd0\xe2\x34\x3b\xac",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x67\x40\x9c\x9c\xac\x28\xb5\x2e\xe9\xfa\xd9\x1c\x2f\xda\x99\x9f"
+ "\x7c\xa2\x2e\x34\x34\xf0\xae\x77\x28\x63\x83\x65\x68\xad\x6a\x7f"
+ "\x10\xcf\x11\x3b\xfd\xdd\x56\x01\x29\xa5\x94\xa8\xf5\x23\x85\xc2"
+ "\xd6\x61\xd7\x85\xd2\x9c\xe9\x3a\x11\x40\x0c\x92\x06\x83\x18\x1d",
+ .prk_size = 64,
+ .okm = "\x74\x13\xe8\x99\x7e\x02\x06\x10\xfb\xf6\x82\x3f\x2c\xe1\x4b\xff"
+ "\x01\x87\x5d\xb1\xca\x55\xf6\x8c\xfc\xf3\x95\x4d\xc8\xaf\xf5\x35"
+ "\x59\xbd\x5e\x30\x28\xb0\x80\xf7\xc0\x68",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 64,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x53\x46\xb3\x76\xbf\x3a\xa9\xf8\x4f\x8f\x6e\xd5\xb1\xc4\xf4\x89"
+ "\x17\x2e\x24\x4d\xac\x30\x3d\x12\xf6\x8e\xcc\x76\x6e\xa6\x00\xaa"
+ "\x88\x49\x5e\x7f\xb6\x05\x80\x31\x22\xfa\x13\x69\x24\xa8\x40\xb1"
+ "\xf0\x71\x9d\x2d\x5f\x68\xe2\x9b\x24\x22\x99\xd7\x58\xed\x68\x0c",
+ .prk_size = 64,
+ .okm = "\x14\x07\xd4\x60\x13\xd9\x8b\xc6\xde\xce\xfc\xfe\xe5\x5f\x0f\x90"
+ "\xb0\xc7\xf6\x3d\x68\xeb\x1a\x80\xea\xf0\x7e\x95\x3c\xfc\x0a\x3a"
+ "\x52\x40\xa1\x55\xd6\xe4\xda\xa9\x65\xbb",
+ .okm_size = 42,
+ }
+};
+
+static int hkdf_test(const char *shash, const struct hkdf_testvec *tv)
+{ struct crypto_shash *tfm = NULL;
+ u8 *prk = NULL, *okm = NULL;
+ unsigned int prk_size;
+ const char *driver;
+ int err;
+
+ tfm = crypto_alloc_shash(shash, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("%s(%s): failed to allocate transform: %ld\n",
+ tv->test, shash, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ driver = crypto_shash_driver_name(tfm);
+
+ prk_size = crypto_shash_digestsize(tfm);
+ prk = kzalloc(prk_size, GFP_KERNEL);
+ if (!prk) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (tv->prk_size != prk_size) {
+ pr_err("%s(%s): prk size mismatch (vec %u, digest %u\n",
+ tv->test, driver, tv->prk_size, prk_size);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ err = hkdf_extract(tfm, tv->ikm, tv->ikm_size,
+ tv->salt, tv->salt_size, prk);
+ if (err) {
+ pr_err("%s(%s): hkdf_extract failed with %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ if (memcmp(prk, tv->prk, tv->prk_size)) {
+ pr_err("%s(%s): hkdf_extract prk mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "prk: ", DUMP_PREFIX_NONE,
+ 16, 1, prk, tv->prk_size, false);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ okm = kzalloc(tv->okm_size, GFP_KERNEL);
+ if (!okm) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = crypto_shash_setkey(tfm, tv->prk, tv->prk_size);
+ if (err) {
+ pr_err("%s(%s): failed to set prk, error %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ err = hkdf_expand(tfm, tv->info, tv->info_size,
+ okm, tv->okm_size);
+ if (err) {
+ pr_err("%s(%s): hkdf_expand() failed with %d\n",
+ tv->test, driver, err);
+ } else if (memcmp(okm, tv->okm, tv->okm_size)) {
+ pr_err("%s(%s): hkdf_expand() okm mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "okm: ", DUMP_PREFIX_NONE,
+ 16, 1, okm, tv->okm_size, false);
+ err = -EINVAL;
+ }
+out_free:
+ kfree(okm);
+ kfree(prk);
+ crypto_free_shash(tfm);
+ return err;
+}
+
+static int __init crypto_hkdf_module_init(void)
+{
+ int ret = 0, i;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) {
+ ret = hkdf_test("hmac(sha256)", &hkdf_sha256_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha384_tv); i++) {
+ ret = hkdf_test("hmac(sha384)", &hkdf_sha384_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha512_tv); i++) {
+ ret = hkdf_test("hmac(sha512)", &hkdf_sha512_tv[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit crypto_hkdf_module_exit(void) {}
+
+module_init(crypto_hkdf_module_init);
+module_exit(crypto_hkdf_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HMAC-based Key Derivation Function (HKDF)");
diff --git a/crypto/internal.h b/crypto/internal.h
index 46b661be0f90..11567ea24fc3 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -33,6 +33,21 @@ struct crypto_larval {
bool test_started;
};
+struct crypto_type {
+ unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
+ unsigned int (*extsize)(struct crypto_alg *alg);
+ int (*init_tfm)(struct crypto_tfm *tfm);
+ void (*show)(struct seq_file *m, struct crypto_alg *alg);
+ int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
+ void (*free)(struct crypto_instance *inst);
+ void (*destroy)(struct crypto_alg *alg);
+
+ unsigned int type;
+ unsigned int maskclear;
+ unsigned int maskset;
+ unsigned int tfmsize;
+};
+
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -113,6 +128,7 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend, int node);
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm);
+void crypto_destroy_alg(struct crypto_alg *alg);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
@@ -149,8 +165,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
static inline void crypto_alg_put(struct crypto_alg *alg)
{
- if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
- alg->cra_destroy(alg);
+ if (refcount_dec_and_test(&alg->cra_refcnt))
+ crypto_destroy_alg(alg);
}
static inline int crypto_tmpl_get(struct crypto_template *tmpl)
diff --git a/crypto/krb5/Kconfig b/crypto/krb5/Kconfig
new file mode 100644
index 000000000000..4d0476e13f3c
--- /dev/null
+++ b/crypto/krb5/Kconfig
@@ -0,0 +1,26 @@
+config CRYPTO_KRB5
+ tristate "Kerberos 5 crypto"
+ select CRYPTO_MANAGER
+ select CRYPTO_KRB5ENC
+ select CRYPTO_AUTHENC
+ select CRYPTO_SKCIPHER
+ select CRYPTO_HASH_INFO
+ select CRYPTO_HMAC
+ select CRYPTO_CMAC
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_CBC
+ select CRYPTO_CTS
+ select CRYPTO_AES
+ select CRYPTO_CAMELLIA
+ help
+ Provide a library for provision of Kerberos-5-based crypto. This is
+ intended for network filesystems to use.
+
+config CRYPTO_KRB5_SELFTESTS
+ bool "Kerberos 5 crypto selftests"
+ depends on CRYPTO_KRB5
+ help
+ Turn on some self-testing for the kerberos 5 crypto functions. These
+ will be performed on module load or boot, if compiled in.
diff --git a/crypto/krb5/Makefile b/crypto/krb5/Makefile
new file mode 100644
index 000000000000..d38890c0b247
--- /dev/null
+++ b/crypto/krb5/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for asymmetric cryptographic keys
+#
+
+krb5-y += \
+ krb5_kdf.o \
+ krb5_api.o \
+ rfc3961_simplified.o \
+ rfc3962_aes.o \
+ rfc6803_camellia.o \
+ rfc8009_aes2.o
+
+krb5-$(CONFIG_CRYPTO_KRB5_SELFTESTS) += \
+ selftest.o \
+ selftest_data.o
+
+obj-$(CONFIG_CRYPTO_KRB5) += krb5.o
diff --git a/crypto/krb5/internal.h b/crypto/krb5/internal.h
new file mode 100644
index 000000000000..a59084ffafe8
--- /dev/null
+++ b/crypto/krb5/internal.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Kerberos5 crypto internals
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/scatterlist.h>
+#include <crypto/krb5.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+
+/*
+ * Profile used for key derivation and encryption.
+ */
+struct krb5_crypto_profile {
+ /* Pseudo-random function */
+ int (*calc_PRF)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp);
+
+ /* Checksum key derivation */
+ int (*calc_Kc)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Kc,
+ gfp_t gfp);
+
+ /* Encryption key derivation */
+ int (*calc_Ke)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Ke,
+ gfp_t gfp);
+
+ /* Integrity key derivation */
+ int (*calc_Ki)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *Ki,
+ gfp_t gfp);
+
+ /* Derive the keys needed for an encryption AEAD object. */
+ int (*derive_encrypt_keys)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Directly load the keys needed for an encryption AEAD object. */
+ int (*load_encrypt_keys)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Derive the key needed for a checksum hash object. */
+ int (*derive_checksum_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Directly load the keys needed for a checksum hash object. */
+ int (*load_checksum_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+
+ /* Encrypt data in-place, inserting confounder and checksum. */
+ ssize_t (*encrypt)(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+
+ /* Decrypt data in-place, removing confounder and checksum */
+ int (*decrypt)(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+ /* Generate a MIC on part of a packet, inserting the checksum */
+ ssize_t (*get_mic)(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len);
+
+ /* Verify the MIC on a piece of data, removing the checksum */
+ int (*verify_mic)(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+};
+
+/*
+ * Crypto size/alignment rounding convenience macros.
+ */
+#define crypto_roundup(X) ((unsigned int)round_up((X), CRYPTO_MINALIGN))
+
+#define krb5_aead_size(TFM) \
+ crypto_roundup(sizeof(struct aead_request) + crypto_aead_reqsize(TFM))
+#define krb5_aead_ivsize(TFM) \
+ crypto_roundup(crypto_aead_ivsize(TFM))
+#define krb5_shash_size(TFM) \
+ crypto_roundup(sizeof(struct shash_desc) + crypto_shash_descsize(TFM))
+#define krb5_digest_size(TFM) \
+ crypto_roundup(crypto_shash_digestsize(TFM))
+#define round16(x) (((x) + 15) & ~15)
+
+/*
+ * Self-testing data.
+ */
+struct krb5_prf_test {
+ u32 etype;
+ const char *name, *key, *octet, *prf;
+};
+
+struct krb5_key_test_one {
+ u32 use;
+ const char *key;
+};
+
+struct krb5_key_test {
+ u32 etype;
+ const char *name, *key;
+ struct krb5_key_test_one Kc, Ke, Ki;
+};
+
+struct krb5_enc_test {
+ u32 etype;
+ u32 usage;
+ const char *name, *plain, *conf, *K0, *Ke, *Ki, *ct;
+};
+
+struct krb5_mic_test {
+ u32 etype;
+ u32 usage;
+ const char *name, *plain, *K0, *Kc, *mic;
+};
+
+/*
+ * krb5_api.c
+ */
+struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *keys,
+ gfp_t gfp);
+struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ gfp_t gfp);
+
+/*
+ * krb5_kdf.c
+ */
+int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp);
+
+/*
+ * rfc3961_simplified.c
+ */
+extern const struct krb5_crypto_profile rfc3961_simplified_profile;
+
+int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg,
+ size_t offset, size_t len);
+int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int authenc_load_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+int rfc3961_load_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp);
+ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+int krb5_aead_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len);
+int rfc3961_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+/*
+ * rfc3962_aes.c
+ */
+extern const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96;
+extern const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96;
+
+/*
+ * rfc6803_camellia.c
+ */
+extern const struct krb5_enctype krb5_camellia128_cts_cmac;
+extern const struct krb5_enctype krb5_camellia256_cts_cmac;
+
+/*
+ * rfc8009_aes2.c
+ */
+extern const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128;
+extern const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192;
+
+/*
+ * selftest.c
+ */
+#ifdef CONFIG_CRYPTO_KRB5_SELFTESTS
+int krb5_selftest(void);
+#else
+static inline int krb5_selftest(void) { return 0; }
+#endif
+
+/*
+ * selftest_data.c
+ */
+extern const struct krb5_prf_test krb5_prf_tests[];
+extern const struct krb5_key_test krb5_key_tests[];
+extern const struct krb5_enc_test krb5_enc_tests[];
+extern const struct krb5_mic_test krb5_mic_tests[];
diff --git a/crypto/krb5/krb5_api.c b/crypto/krb5/krb5_api.c
new file mode 100644
index 000000000000..23026d4206c8
--- /dev/null
+++ b/crypto/krb5/krb5_api.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos 5 crypto library.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include "internal.h"
+
+MODULE_DESCRIPTION("Kerberos 5 crypto");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+static const struct krb5_enctype *const krb5_supported_enctypes[] = {
+ &krb5_aes128_cts_hmac_sha1_96,
+ &krb5_aes256_cts_hmac_sha1_96,
+ &krb5_aes128_cts_hmac_sha256_128,
+ &krb5_aes256_cts_hmac_sha384_192,
+ &krb5_camellia128_cts_cmac,
+ &krb5_camellia256_cts_cmac,
+};
+
+/**
+ * crypto_krb5_find_enctype - Find the handler for a Kerberos5 encryption type
+ * @enctype: The standard Kerberos encryption type number
+ *
+ * Look up a Kerberos encryption type by number. If successful, returns a
+ * pointer to the type tables; returns NULL otherwise.
+ */
+const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype)
+{
+ const struct krb5_enctype *krb5;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(krb5_supported_enctypes); i++) {
+ krb5 = krb5_supported_enctypes[i];
+ if (krb5->etype == enctype)
+ return krb5;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(crypto_krb5_find_enctype);
+
+/**
+ * crypto_krb5_how_much_buffer - Work out how much buffer is required for an amount of data
+ * @krb5: The encoding to use.
+ * @mode: The mode in which to operated (checksum/encrypt)
+ * @data_size: How much data we want to allow for
+ * @_offset: Where to place the offset into the buffer
+ *
+ * Calculate how much buffer space is required to wrap a given amount of data.
+ * This allows for a confounder, padding and checksum as appropriate. The
+ * amount of buffer required is returned and the offset into the buffer at
+ * which the data will start is placed in *_offset.
+ */
+size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t data_size, size_t *_offset)
+{
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ *_offset = krb5->cksum_len;
+ return krb5->cksum_len + data_size;
+
+ case KRB5_ENCRYPT_MODE:
+ *_offset = krb5->conf_len;
+ return krb5->conf_len + data_size + krb5->cksum_len;
+
+ default:
+ WARN_ON(1);
+ *_offset = 0;
+ return 0;
+ }
+}
+EXPORT_SYMBOL(crypto_krb5_how_much_buffer);
+
+/**
+ * crypto_krb5_how_much_data - Work out how much data can fit in an amount of buffer
+ * @krb5: The encoding to use.
+ * @mode: The mode in which to operated (checksum/encrypt)
+ * @_buffer_size: How much buffer we want to allow for (may be reduced)
+ * @_offset: Where to place the offset into the buffer
+ *
+ * Calculate how much data can be fitted into given amount of buffer. This
+ * allows for a confounder, padding and checksum as appropriate. The amount of
+ * data that will fit is returned, the amount of buffer required is shrunk to
+ * allow for alignment and the offset into the buffer at which the data will
+ * start is placed in *_offset.
+ */
+size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_buffer_size, size_t *_offset)
+{
+ size_t buffer_size = *_buffer_size, data_size;
+
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ if (WARN_ON(buffer_size < krb5->cksum_len + 1))
+ goto bad;
+ *_offset = krb5->cksum_len;
+ return buffer_size - krb5->cksum_len;
+
+ case KRB5_ENCRYPT_MODE:
+ if (WARN_ON(buffer_size < krb5->conf_len + 1 + krb5->cksum_len))
+ goto bad;
+ data_size = buffer_size - krb5->cksum_len;
+ *_offset = krb5->conf_len;
+ return data_size - krb5->conf_len;
+
+ default:
+ WARN_ON(1);
+ goto bad;
+ }
+
+bad:
+ *_offset = 0;
+ return 0;
+}
+EXPORT_SYMBOL(crypto_krb5_how_much_data);
+
+/**
+ * crypto_krb5_where_is_the_data - Find the data in a decrypted message
+ * @krb5: The encoding to use.
+ * @mode: Mode of operation
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Find the offset and size of the data in a secure message so that this
+ * information can be used in the metadata buffer which will get added to the
+ * digest by crypto_krb5_verify_mic().
+ */
+void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_offset, size_t *_len)
+{
+ switch (mode) {
+ case KRB5_CHECKSUM_MODE:
+ *_offset += krb5->cksum_len;
+ *_len -= krb5->cksum_len;
+ return;
+ case KRB5_ENCRYPT_MODE:
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ return;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+EXPORT_SYMBOL(crypto_krb5_where_is_the_data);
+
+/*
+ * Prepare the encryption with derived key data.
+ */
+struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *keys,
+ gfp_t gfp)
+{
+ struct crypto_aead *ci = NULL;
+ int ret = -ENOMEM;
+
+ ci = crypto_alloc_aead(krb5->encrypt_name, 0, 0);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ if (ret == -ENOENT)
+ ret = -ENOPKG;
+ goto err;
+ }
+
+ ret = crypto_aead_setkey(ci, keys->data, keys->len);
+ if (ret < 0) {
+ pr_err("Couldn't set AEAD key %s: %d\n", krb5->encrypt_name, ret);
+ goto err_ci;
+ }
+
+ ret = crypto_aead_setauthsize(ci, krb5->cksum_len);
+ if (ret < 0) {
+ pr_err("Couldn't set AEAD authsize %s: %d\n", krb5->encrypt_name, ret);
+ goto err_ci;
+ }
+
+ return ci;
+err_ci:
+ crypto_free_aead(ci);
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * crypto_krb5_prepare_encryption - Prepare AEAD crypto object for encryption-mode
+ * @krb5: The encoding to use.
+ * @TK: The transport key to use.
+ * @usage: The usage constant for key derivation.
+ * @gfp: Allocation flags.
+ *
+ * Allocate a crypto object that does all the necessary crypto, key it and set
+ * its parameters and return the crypto handle to it. This can then be used to
+ * dispatch encrypt and decrypt operations.
+ */
+struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp)
+{
+ struct crypto_aead *ci = NULL;
+ struct krb5_buffer keys = {};
+ int ret;
+
+ ret = krb5->profile->derive_encrypt_keys(krb5, TK, usage, &keys, gfp);
+ if (ret < 0)
+ goto err;
+
+ ci = krb5_prepare_encryption(krb5, &keys, gfp);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ goto err;
+ }
+
+ kfree(keys.data);
+ return ci;
+err:
+ kfree(keys.data);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(crypto_krb5_prepare_encryption);
+
+/*
+ * Prepare the checksum with derived key data.
+ */
+struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ gfp_t gfp)
+{
+ struct crypto_shash *ci = NULL;
+ int ret = -ENOMEM;
+
+ ci = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ if (ret == -ENOENT)
+ ret = -ENOPKG;
+ goto err;
+ }
+
+ ret = crypto_shash_setkey(ci, Kc->data, Kc->len);
+ if (ret < 0) {
+ pr_err("Couldn't set shash key %s: %d\n", krb5->cksum_name, ret);
+ goto err_ci;
+ }
+
+ return ci;
+err_ci:
+ crypto_free_shash(ci);
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * crypto_krb5_prepare_checksum - Prepare AEAD crypto object for checksum-mode
+ * @krb5: The encoding to use.
+ * @TK: The transport key to use.
+ * @usage: The usage constant for key derivation.
+ * @gfp: Allocation flags.
+ *
+ * Allocate a crypto object that does all the necessary crypto, key it and set
+ * its parameters and return the crypto handle to it. This can then be used to
+ * dispatch get_mic and verify_mic operations.
+ */
+struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp)
+{
+ struct crypto_shash *ci = NULL;
+ struct krb5_buffer keys = {};
+ int ret;
+
+ ret = krb5->profile->derive_checksum_key(krb5, TK, usage, &keys, gfp);
+ if (ret < 0) {
+ pr_err("get_Kc failed %d\n", ret);
+ goto err;
+ }
+
+ ci = krb5_prepare_checksum(krb5, &keys, gfp);
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ goto err;
+ }
+
+ kfree(keys.data);
+ return ci;
+err:
+ kfree(keys.data);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(crypto_krb5_prepare_checksum);
+
+/**
+ * crypto_krb5_encrypt - Apply Kerberos encryption and integrity.
+ * @krb5: The encoding to use.
+ * @aead: The keyed crypto object to use.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @sg_len: The size of the buffer.
+ * @data_offset: The offset of the data in the @sg buffer.
+ * @data_len: The length of the data.
+ * @preconfounded: True if the confounder is already inserted.
+ *
+ * Using the specified Kerberos encoding, insert a confounder and padding as
+ * needed, encrypt this and the data in place and insert an integrity checksum
+ * into the buffer.
+ *
+ * The buffer must include space for the confounder, the checksum and any
+ * padding required. The caller can preinsert the confounder into the buffer
+ * (for testing, for example).
+ *
+ * The resulting secured blob may be less than the size of the buffer.
+ *
+ * Returns the size of the secure blob if successful, -ENOMEM on an allocation
+ * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the confounder
+ * is too short or the data is misaligned. Other errors may also be returned
+ * from the crypto layer.
+ */
+ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ if (WARN_ON(data_offset > sg_len ||
+ data_len > sg_len ||
+ data_offset > sg_len - data_len))
+ return -EMSGSIZE;
+ return krb5->profile->encrypt(krb5, aead, sg, nr_sg, sg_len,
+ data_offset, data_len, preconfounded);
+}
+EXPORT_SYMBOL(crypto_krb5_encrypt);
+
+/**
+ * crypto_krb5_decrypt - Validate and remove Kerberos encryption and integrity.
+ * @krb5: The encoding to use.
+ * @aead: The keyed crypto object to use.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Using the specified Kerberos encoding, check and remove the integrity
+ * checksum and decrypt the secure region, stripping off the confounder.
+ *
+ * If successful, @_offset and @_len are updated to outline the region in which
+ * the data plus the trailing padding are stored. The caller is responsible
+ * for working out how much padding there is and removing it.
+ *
+ * Returns the 0 if successful, -ENOMEM on an allocation failure; sets
+ * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG
+ * if the integrity checksum doesn't match). Other errors may also be returned
+ * from the crypto layer.
+ */
+int crypto_krb5_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ return krb5->profile->decrypt(krb5, aead, sg, nr_sg, _offset, _len);
+}
+EXPORT_SYMBOL(crypto_krb5_decrypt);
+
+/**
+ * crypto_krb5_get_mic - Apply Kerberos integrity checksum.
+ * @krb5: The encoding to use.
+ * @shash: The keyed hash to use.
+ * @metadata: Metadata to add into the hash before adding the data.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @sg_len: The size of the buffer.
+ * @data_offset: The offset of the data in the @sg buffer.
+ * @data_len: The length of the data.
+ *
+ * Using the specified Kerberos encoding, calculate and insert an integrity
+ * checksum into the buffer.
+ *
+ * The buffer must include space for the checksum at the front.
+ *
+ * Returns the size of the secure blob if successful, -ENOMEM on an allocation
+ * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the gap for
+ * the checksum is too short. Other errors may also be returned from the
+ * crypto layer.
+ */
+ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len)
+{
+ if (WARN_ON(data_offset > sg_len ||
+ data_len > sg_len ||
+ data_offset > sg_len - data_len))
+ return -EMSGSIZE;
+ return krb5->profile->get_mic(krb5, shash, metadata, sg, nr_sg, sg_len,
+ data_offset, data_len);
+}
+EXPORT_SYMBOL(crypto_krb5_get_mic);
+
+/**
+ * crypto_krb5_verify_mic - Validate and remove Kerberos integrity checksum.
+ * @krb5: The encoding to use.
+ * @shash: The keyed hash to use.
+ * @metadata: Metadata to add into the hash before adding the data.
+ * @sg: Scatterlist defining the crypto buffer.
+ * @nr_sg: The number of elements in @sg.
+ * @_offset: Offset of the secure blob in the buffer; updated to data offset.
+ * @_len: The length of the secure blob; updated to data length.
+ *
+ * Using the specified Kerberos encoding, check and remove the integrity
+ * checksum.
+ *
+ * If successful, @_offset and @_len are updated to outline the region in which
+ * the data is stored.
+ *
+ * Returns the 0 if successful, -ENOMEM on an allocation failure; sets
+ * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG
+ * if the checksum doesn't match). Other errors may also be returned from the
+ * crypto layer.
+ */
+int crypto_krb5_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ return krb5->profile->verify_mic(krb5, shash, metadata, sg, nr_sg,
+ _offset, _len);
+}
+EXPORT_SYMBOL(crypto_krb5_verify_mic);
+
+static int __init crypto_krb5_init(void)
+{
+ return krb5_selftest();
+}
+module_init(crypto_krb5_init);
+
+static void __exit crypto_krb5_exit(void)
+{
+}
+module_exit(crypto_krb5_exit);
diff --git a/crypto/krb5/krb5_kdf.c b/crypto/krb5/krb5_kdf.c
new file mode 100644
index 000000000000..6699e5469d1b
--- /dev/null
+++ b/crypto/krb5/krb5_kdf.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos key derivation.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+/**
+ * crypto_krb5_calc_PRFplus - Calculate PRF+ [RFC4402]
+ * @krb5: The encryption type to use
+ * @K: The protocol key for the pseudo-random function
+ * @L: The length of the output
+ * @S: The input octet string
+ * @result: Result buffer, sized to krb5->prf_len
+ * @gfp: Allocation restrictions
+ *
+ * Calculate the kerberos pseudo-random function, PRF+() by the following
+ * method:
+ *
+ * PRF+(K, L, S) = truncate(L, T1 || T2 || .. || Tn)
+ * Tn = PRF(K, n || S)
+ * [rfc4402 sec 2]
+ */
+int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *K,
+ unsigned int L,
+ const struct krb5_buffer *S,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct krb5_buffer T_series, Tn, n_S;
+ void *buffer;
+ int ret, n = 1;
+
+ Tn.len = krb5->prf_len;
+ T_series.len = 0;
+ n_S.len = 4 + S->len;
+
+ buffer = kzalloc(round16(L + Tn.len) + round16(n_S.len), gfp);
+ if (!buffer)
+ return -ENOMEM;
+
+ T_series.data = buffer;
+ n_S.data = buffer + round16(L + Tn.len);
+ memcpy(n_S.data + 4, S->data, S->len);
+
+ while (T_series.len < L) {
+ *(__be32 *)(n_S.data) = htonl(n);
+ Tn.data = T_series.data + Tn.len * (n - 1);
+ ret = krb5->profile->calc_PRF(krb5, K, &n_S, &Tn, gfp);
+ if (ret < 0)
+ goto err;
+ T_series.len += Tn.len;
+ n++;
+ }
+
+ /* Truncate to L */
+ memcpy(result->data, T_series.data, L);
+ ret = 0;
+
+err:
+ kfree_sensitive(buffer);
+ return ret;
+}
+EXPORT_SYMBOL(crypto_krb5_calc_PRFplus);
+
+/**
+ * krb5_derive_Kc - Derive key Kc and install into a hash
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Kc checksumming key. The key is stored into the
+ * prepared buffer.
+ */
+int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_CHECKSUM;
+
+ key->len = krb5->Kc_len;
+ return krb5->profile->calc_Kc(krb5, TK, &usage_constant, key, gfp);
+}
+
+/**
+ * krb5_derive_Ke - Derive key Ke and install into an skcipher
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Ke encryption key. The key is stored into the prepared
+ * buffer.
+ */
+int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_ENCRYPTION;
+
+ key->len = krb5->Ke_len;
+ return krb5->profile->calc_Ke(krb5, TK, &usage_constant, key, gfp);
+}
+
+/**
+ * krb5_derive_Ki - Derive key Ki and install into a hash
+ * @krb5: The encryption type to use
+ * @TK: The base key
+ * @usage: The key usage number
+ * @key: Prepped buffer to store the key into
+ * @gfp: Allocation restrictions
+ *
+ * Derive the Kerberos Ki integrity checksum key. The key is stored into the
+ * prepared buffer.
+ */
+int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK,
+ u32 usage, struct krb5_buffer *key, gfp_t gfp)
+{
+ u8 buf[5] __aligned(CRYPTO_MINALIGN);
+ struct krb5_buffer usage_constant = { .len = 5, .data = buf };
+
+ *(__be32 *)buf = cpu_to_be32(usage);
+ buf[4] = KEY_USAGE_SEED_INTEGRITY;
+
+ key->len = krb5->Ki_len;
+ return krb5->profile->calc_Ki(krb5, TK, &usage_constant, key, gfp);
+}
diff --git a/crypto/krb5/rfc3961_simplified.c b/crypto/krb5/rfc3961_simplified.c
new file mode 100644
index 000000000000..79180d28baa9
--- /dev/null
+++ b/crypto/krb5/rfc3961_simplified.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* rfc3961 Kerberos 5 simplified crypto profile.
+ *
+ * Parts borrowed from net/sunrpc/auth_gss/.
+ */
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/lcm.h>
+#include <linux/rtnetlink.h>
+#include <crypto/authenc.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+/* Maximum blocksize for the supported crypto algorithms */
+#define KRB5_MAX_BLOCKSIZE (16)
+
+int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg,
+ size_t offset, size_t len)
+{
+ struct sg_mapping_iter miter;
+ size_t i, n;
+ int ret = 0;
+
+ sg_miter_start(&miter, sg, sg_nents(sg),
+ SG_MITER_FROM_SG | SG_MITER_LOCAL);
+ for (i = 0; i < len; i += n) {
+ sg_miter_next(&miter);
+ n = min(miter.length, len - i);
+ ret = crypto_shash_update(desc, miter.addr, n);
+ if (ret < 0)
+ break;
+ }
+ sg_miter_stop(&miter);
+ return ret;
+}
+
+static int rfc3961_do_encrypt(struct crypto_sync_skcipher *tfm, void *iv,
+ const struct krb5_buffer *in, struct krb5_buffer *out)
+{
+ struct scatterlist sg[1];
+ u8 local_iv[KRB5_MAX_BLOCKSIZE] __aligned(KRB5_MAX_BLOCKSIZE) = {0};
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ int ret;
+
+ if (WARN_ON(in->len != out->len))
+ return -EINVAL;
+ if (out->len % crypto_sync_skcipher_blocksize(tfm) != 0)
+ return -EINVAL;
+
+ if (crypto_sync_skcipher_ivsize(tfm) > KRB5_MAX_BLOCKSIZE)
+ return -EINVAL;
+
+ if (iv)
+ memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
+
+ memcpy(out->data, in->data, out->len);
+ sg_init_one(sg, out->data, out->len);
+
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg, sg, out->len, local_iv);
+
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return ret;
+}
+
+/*
+ * Calculate an unkeyed basic hash.
+ */
+static int rfc3961_calc_H(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *data,
+ struct krb5_buffer *digest,
+ gfp_t gfp)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ size_t desc_size;
+ int ret = -ENOMEM;
+
+ tfm = crypto_alloc_shash(krb5->hash_name, 0, 0);
+ if (IS_ERR(tfm))
+ return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
+
+ desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+
+ desc = kzalloc(desc_size, gfp);
+ if (!desc)
+ goto error_tfm;
+
+ digest->len = crypto_shash_digestsize(tfm);
+ digest->data = kzalloc(digest->len, gfp);
+ if (!digest->data)
+ goto error_desc;
+
+ desc->tfm = tfm;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error_digest;
+
+ ret = crypto_shash_finup(desc, data->data, data->len, digest->data);
+ if (ret < 0)
+ goto error_digest;
+
+ goto error_desc;
+
+error_digest:
+ kfree_sensitive(digest->data);
+error_desc:
+ kfree_sensitive(desc);
+error_tfm:
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+/*
+ * This is the n-fold function as described in rfc3961, sec 5.1
+ * Taken from MIT Kerberos and modified.
+ */
+static void rfc3961_nfold(const struct krb5_buffer *source, struct krb5_buffer *result)
+{
+ const u8 *in = source->data;
+ u8 *out = result->data;
+ unsigned long ulcm;
+ unsigned int inbits, outbits;
+ int byte, i, msbit;
+
+ /* the code below is more readable if I make these bytes instead of bits */
+ inbits = source->len;
+ outbits = result->len;
+
+ /* first compute lcm(n,k) */
+ ulcm = lcm(inbits, outbits);
+
+ /* now do the real work */
+ memset(out, 0, outbits);
+ byte = 0;
+
+ /* this will end up cycling through k lcm(k,n)/k times, which
+ * is correct.
+ */
+ for (i = ulcm-1; i >= 0; i--) {
+ /* compute the msbit in k which gets added into this byte */
+ msbit = (
+ /* first, start with the msbit in the first,
+ * unrotated byte
+ */
+ ((inbits << 3) - 1) +
+ /* then, for each byte, shift to the right
+ * for each repetition
+ */
+ (((inbits << 3) + 13) * (i/inbits)) +
+ /* last, pick out the correct byte within
+ * that shifted repetition
+ */
+ ((inbits - (i % inbits)) << 3)
+ ) % (inbits << 3);
+
+ /* pull out the byte value itself */
+ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) |
+ (in[((inbits) - (msbit >> 3)) % inbits]))
+ >> ((msbit & 7) + 1)) & 0xff;
+
+ /* do the addition */
+ byte += out[i % outbits];
+ out[i % outbits] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+
+ /* if there's a carry bit left over, add it back in */
+ if (byte) {
+ for (i = outbits - 1; i >= 0; i--) {
+ /* do the addition */
+ byte += out[i];
+ out[i] = byte & 0xff;
+
+ /* keep around the carry bit, if any */
+ byte >>= 8;
+ }
+ }
+}
+
+/*
+ * Calculate a derived key, DK(Base Key, Well-Known Constant)
+ *
+ * DK(Key, Constant) = random-to-key(DR(Key, Constant))
+ * DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state))
+ * K1 = E(Key, n-fold(Constant), initial-cipher-state)
+ * K2 = E(Key, K1, initial-cipher-state)
+ * K3 = E(Key, K2, initial-cipher-state)
+ * K4 = ...
+ * DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...)
+ * [rfc3961 sec 5.1]
+ */
+static int rfc3961_calc_DK(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *inkey,
+ const struct krb5_buffer *in_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ unsigned int blocksize, keybytes, keylength, n;
+ struct krb5_buffer inblock, outblock, rawkey;
+ struct crypto_sync_skcipher *cipher;
+ int ret = -EINVAL;
+
+ blocksize = krb5->block_len;
+ keybytes = krb5->key_bytes;
+ keylength = krb5->key_len;
+
+ if (inkey->len != keylength || result->len != keylength)
+ return -EINVAL;
+ if (!krb5->random_to_key && result->len != keybytes)
+ return -EINVAL;
+
+ cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher);
+ goto err_return;
+ }
+ ret = crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len);
+ if (ret < 0)
+ goto err_free_cipher;
+
+ ret = -ENOMEM;
+ inblock.data = kzalloc(blocksize * 2 + keybytes, gfp);
+ if (!inblock.data)
+ goto err_free_cipher;
+
+ inblock.len = blocksize;
+ outblock.data = inblock.data + blocksize;
+ outblock.len = blocksize;
+ rawkey.data = outblock.data + blocksize;
+ rawkey.len = keybytes;
+
+ /* initialize the input block */
+
+ if (in_constant->len == inblock.len)
+ memcpy(inblock.data, in_constant->data, inblock.len);
+ else
+ rfc3961_nfold(in_constant, &inblock);
+
+ /* loop encrypting the blocks until enough key bytes are generated */
+ n = 0;
+ while (n < rawkey.len) {
+ rfc3961_do_encrypt(cipher, NULL, &inblock, &outblock);
+
+ if (keybytes - n <= outblock.len) {
+ memcpy(rawkey.data + n, outblock.data, keybytes - n);
+ break;
+ }
+
+ memcpy(rawkey.data + n, outblock.data, outblock.len);
+ memcpy(inblock.data, outblock.data, outblock.len);
+ n += outblock.len;
+ }
+
+ /* postprocess the key */
+ if (!krb5->random_to_key) {
+ /* Identity random-to-key function. */
+ memcpy(result->data, rawkey.data, rawkey.len);
+ ret = 0;
+ } else {
+ ret = krb5->random_to_key(krb5, &rawkey, result);
+ }
+
+ kfree_sensitive(inblock.data);
+err_free_cipher:
+ crypto_free_sync_skcipher(cipher);
+err_return:
+ return ret;
+}
+
+/*
+ * Calculate single encryption, E()
+ *
+ * E(Key, octets)
+ */
+static int rfc3961_calc_E(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *in_data,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_sync_skcipher *cipher;
+ int ret;
+
+ cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0);
+ if (IS_ERR(cipher)) {
+ ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher);
+ goto err;
+ }
+
+ ret = crypto_sync_skcipher_setkey(cipher, key->data, key->len);
+ if (ret < 0)
+ goto err_free;
+
+ ret = rfc3961_do_encrypt(cipher, NULL, in_data, result);
+
+err_free:
+ crypto_free_sync_skcipher(cipher);
+err:
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * tmp1 = H(octet-string)
+ * tmp2 = truncate tmp1 to multiple of m
+ * PRF = E(DK(protocol-key, prfconstant), tmp2, initial-cipher-state)
+ *
+ * The "prfconstant" used in the PRF operation is the three-octet string
+ * "prf".
+ * [rfc3961 sec 5.3]
+ */
+static int rfc3961_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+ struct krb5_buffer derived_key;
+ struct krb5_buffer tmp1, tmp2;
+ unsigned int m = krb5->block_len;
+ void *buffer;
+ int ret;
+
+ if (result->len != krb5->prf_len)
+ return -EINVAL;
+
+ tmp1.len = krb5->hash_len;
+ derived_key.len = krb5->key_bytes;
+ buffer = kzalloc(round16(tmp1.len) + round16(derived_key.len), gfp);
+ if (!buffer)
+ return -ENOMEM;
+
+ tmp1.data = buffer;
+ derived_key.data = buffer + round16(tmp1.len);
+
+ ret = rfc3961_calc_H(krb5, octet_string, &tmp1, gfp);
+ if (ret < 0)
+ goto err;
+
+ tmp2.len = tmp1.len & ~(m - 1);
+ tmp2.data = tmp1.data;
+
+ ret = rfc3961_calc_DK(krb5, protocol_key, &prfconstant, &derived_key, gfp);
+ if (ret < 0)
+ goto err;
+
+ ret = rfc3961_calc_E(krb5, &derived_key, &tmp2, result, gfp);
+
+err:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Derive the Ke and Ki keys and package them into a key parameter that can be
+ * given to the setkey of a authenc AEAD crypto object.
+ */
+int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ struct crypto_authenc_key_param *param;
+ struct krb5_buffer Ke, Ki;
+ struct rtattr *rta;
+ int ret;
+
+ Ke.len = krb5->Ke_len;
+ Ki.len = krb5->Ki_len;
+ setkey->len = RTA_LENGTH(sizeof(*param)) + Ke.len + Ki.len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ rta = setkey->data;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->enckeylen = htonl(Ke.len);
+
+ Ki.data = (void *)(param + 1);
+ Ke.data = Ki.data + Ki.len;
+
+ ret = krb5_derive_Ke(krb5, TK, usage, &Ke, gfp);
+ if (ret < 0) {
+ pr_err("get_Ke failed %d\n", ret);
+ return ret;
+ }
+ ret = krb5_derive_Ki(krb5, TK, usage, &Ki, gfp);
+ if (ret < 0)
+ pr_err("get_Ki failed %d\n", ret);
+ return ret;
+}
+
+/*
+ * Package predefined Ke and Ki keys and into a key parameter that can be given
+ * to the setkey of an authenc AEAD crypto object.
+ */
+int authenc_load_encrypt_keys(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Ke,
+ const struct krb5_buffer *Ki,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ struct crypto_authenc_key_param *param;
+ struct rtattr *rta;
+
+ setkey->len = RTA_LENGTH(sizeof(*param)) + Ke->len + Ki->len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ rta = setkey->data;
+ rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+ rta->rta_len = RTA_LENGTH(sizeof(*param));
+ param = RTA_DATA(rta);
+ param->enckeylen = htonl(Ke->len);
+ memcpy((void *)(param + 1), Ki->data, Ki->len);
+ memcpy((void *)(param + 1) + Ki->len, Ke->data, Ke->len);
+ return 0;
+}
+
+/*
+ * Derive the Kc key for checksum-only mode and package it into a key parameter
+ * that can be given to the setkey of a hash crypto object.
+ */
+int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ unsigned int usage,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ int ret;
+
+ setkey->len = krb5->Kc_len;
+ setkey->data = kzalloc(setkey->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+
+ ret = krb5_derive_Kc(krb5, TK, usage, setkey, gfp);
+ if (ret < 0)
+ pr_err("get_Kc failed %d\n", ret);
+ return ret;
+}
+
+/*
+ * Package a predefined Kc key for checksum-only mode into a key parameter that
+ * can be given to the setkey of a hash crypto object.
+ */
+int rfc3961_load_checksum_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *Kc,
+ struct krb5_buffer *setkey,
+ gfp_t gfp)
+{
+ setkey->len = krb5->Kc_len;
+ setkey->data = kmemdup(Kc->data, Kc->len, GFP_KERNEL);
+ if (!setkey->data)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * Apply encryption and checksumming functions to part of a scatterlist.
+ */
+ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ struct aead_request *req;
+ ssize_t ret, done;
+ size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset;
+ void *buffer;
+ u8 *iv;
+
+ if (WARN_ON(data_offset != krb5->conf_len))
+ return -EINVAL; /* Data is in wrong place */
+
+ secure_offset = 0;
+ base_len = krb5->conf_len + data_len;
+ pad_len = 0;
+ secure_len = base_len + pad_len;
+ cksum_offset = secure_len;
+ if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len))
+ return -EFAULT;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Insert the confounder into the buffer */
+ ret = -EFAULT;
+ if (!preconfounded) {
+ get_random_bytes(buffer, krb5->conf_len);
+ done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len,
+ secure_offset);
+ if (done != krb5->conf_len)
+ goto error;
+ }
+
+ /* We may need to pad out to the crypto blocksize. */
+ if (pad_len) {
+ done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len);
+ if (done != pad_len)
+ goto error;
+ }
+
+ /* Hash and encrypt the message. */
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_crypt(req, sg, sg, secure_len, iv);
+ ret = crypto_aead_encrypt(req);
+ if (ret < 0)
+ goto error;
+
+ ret = secure_len + krb5->cksum_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Apply decryption and checksumming functions to a message. The offset and
+ * length are updated to reflect the actual content of the encrypted region.
+ */
+int krb5_aead_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct aead_request *req;
+ size_t bsize;
+ void *buffer;
+ int ret;
+ u8 *iv;
+
+ if (WARN_ON(*_offset != 0))
+ return -EINVAL; /* Can't set offset on aead */
+
+ if (*_len < krb5->conf_len + krb5->cksum_len)
+ return -EPROTO;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Decrypt the message and verify its checksum. */
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_crypt(req, sg, sg, *_len, iv);
+ ret = crypto_aead_decrypt(req);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the boundaries of the data. */
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Generate a checksum over some metadata and part of an skbuff and insert the
+ * MIC into the skbuff immediately prior to the data.
+ */
+ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len)
+{
+ struct shash_desc *desc;
+ ssize_t ret, done;
+ size_t bsize;
+ void *buffer, *digest;
+
+ if (WARN_ON(data_offset != krb5->cksum_len))
+ return -EMSGSIZE;
+
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ /* Calculate the MIC with key Kc and store it into the skb */
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ if (metadata) {
+ ret = crypto_shash_update(desc, metadata->data, metadata->len);
+ if (ret < 0)
+ goto error;
+ }
+
+ ret = crypto_shash_update_sg(desc, sg, data_offset, data_len);
+ if (ret < 0)
+ goto error;
+
+ digest = buffer + krb5_shash_size(shash);
+ ret = crypto_shash_final(desc, digest);
+ if (ret < 0)
+ goto error;
+
+ ret = -EFAULT;
+ done = sg_pcopy_from_buffer(sg, nr_sg, digest, krb5->cksum_len,
+ data_offset - krb5->cksum_len);
+ if (done != krb5->cksum_len)
+ goto error;
+
+ ret = krb5->cksum_len + data_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Check the MIC on a region of an skbuff. The offset and length are updated
+ * to reflect the actual content of the secure region.
+ */
+int rfc3961_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct shash_desc *desc;
+ ssize_t done;
+ size_t bsize, data_offset, data_len, offset = *_offset, len = *_len;
+ void *buffer = NULL;
+ int ret;
+ u8 *cksum, *cksum2;
+
+ if (len < krb5->cksum_len)
+ return -EPROTO;
+ data_offset = offset + krb5->cksum_len;
+ data_len = len - krb5->cksum_len;
+
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ cksum = buffer +
+ krb5_shash_size(shash);
+ cksum2 = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+
+ /* Calculate the MIC */
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ if (metadata) {
+ ret = crypto_shash_update(desc, metadata->data, metadata->len);
+ if (ret < 0)
+ goto error;
+ }
+
+ crypto_shash_update_sg(desc, sg, data_offset, data_len);
+ crypto_shash_final(desc, cksum);
+
+ ret = -EFAULT;
+ done = sg_pcopy_to_buffer(sg, nr_sg, cksum2, krb5->cksum_len, offset);
+ if (done != krb5->cksum_len)
+ goto error;
+
+ if (memcmp(cksum, cksum2, krb5->cksum_len) != 0) {
+ ret = -EBADMSG;
+ goto error;
+ }
+
+ *_offset += krb5->cksum_len;
+ *_len -= krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+const struct krb5_crypto_profile rfc3961_simplified_profile = {
+ .calc_PRF = rfc3961_calc_PRF,
+ .calc_Kc = rfc3961_calc_DK,
+ .calc_Ke = rfc3961_calc_DK,
+ .calc_Ki = rfc3961_calc_DK,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = krb5_aead_encrypt,
+ .decrypt = krb5_aead_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
diff --git a/crypto/krb5/rfc3962_aes.c b/crypto/krb5/rfc3962_aes.c
new file mode 100644
index 000000000000..5cbf8f4638b9
--- /dev/null
+++ b/crypto/krb5/rfc3962_aes.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* rfc3962 Advanced Encryption Standard (AES) Encryption for Kerberos 5
+ *
+ * Parts borrowed from net/sunrpc/auth_gss/.
+ */
+/*
+ * COPYRIGHT (c) 2008
+ * The Regents of the University of Michigan
+ * ALL RIGHTS RESERVED
+ *
+ * Permission is granted to use, copy, create derivative works
+ * and redistribute this software and such derivative works
+ * for any purpose, so long as the name of The University of
+ * Michigan is not used in any advertising or publicity
+ * pertaining to the use of distribution of this software
+ * without specific, written prior authorization. If the
+ * above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any
+ * portion of this software, then the disclaimer below must
+ * also be included.
+ *
+ * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
+ * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
+ * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
+ * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
+ * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
+ * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
+ * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
+ * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
+ * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGES.
+ */
+
+/*
+ * Copyright (C) 1998 by the FundsXpress, INC.
+ *
+ * All rights reserved.
+ *
+ * Export of this software from the United States of America may require
+ * a specific license from the United States Government. It is the
+ * responsibility of any person or organization contemplating export to
+ * obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of FundsXpress. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. FundsXpress makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/*
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "internal.h"
+
+const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96 = {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128,
+ .name = "aes128-cts-hmac-sha1-96",
+ .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha1)",
+ .hash_name = "sha1",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 12,
+ .hash_len = 20,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc3961_simplified_profile,
+};
+
+const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96 = {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256,
+ .name = "aes256-cts-hmac-sha1-96",
+ .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha1)",
+ .hash_name = "sha1",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 32,
+ .Ke_len = 32,
+ .Ki_len = 32,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 12,
+ .hash_len = 20,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc3961_simplified_profile,
+};
diff --git a/crypto/krb5/rfc6803_camellia.c b/crypto/krb5/rfc6803_camellia.c
new file mode 100644
index 000000000000..77cd4ce023f1
--- /dev/null
+++ b/crypto/krb5/rfc6803_camellia.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rfc6803 Camellia Encryption for Kerberos 5
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Calculate the key derivation function KDF-FEEDBACK_CMAC(key, constant)
+ *
+ * n = ceiling(k / 128)
+ * K(0) = zeros
+ * K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k)
+ * DR(key, constant) = k-truncate(K(1) | K(2) | ... | K(n))
+ * KDF-FEEDBACK-CMAC(key, constant) = random-to-key(DR(key, constant))
+ *
+ * [rfc6803 sec 3]
+ */
+static int rfc6803_calc_KDF_FEEDBACK_CMAC(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_shash *shash;
+ struct krb5_buffer K, data;
+ struct shash_desc *desc;
+ __be32 tmp;
+ size_t bsize, offset, seg;
+ void *buffer;
+ u32 i = 0, k = result->len * 8;
+ u8 *p;
+ int ret = -ENOMEM;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+ ret = crypto_shash_setkey(shash, key->data, key->len);
+ if (ret < 0)
+ goto error_shash;
+
+ ret = -ENOMEM;
+ K.len = crypto_shash_digestsize(shash);
+ data.len = K.len + 4 + constant->len + 1 + 4;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(K.len) +
+ crypto_roundup(data.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto error_shash;
+
+ desc = buffer;
+ desc->tfm = shash;
+
+ K.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ data.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(K.len);
+
+ p = data.data + K.len + 4;
+ memcpy(p, constant->data, constant->len);
+ p += constant->len;
+ *p++ = 0x00;
+ tmp = htonl(k);
+ memcpy(p, &tmp, 4);
+ p += 4;
+
+ ret = -EINVAL;
+ if (WARN_ON(p - (u8 *)data.data != data.len))
+ goto error;
+
+ offset = 0;
+ do {
+ i++;
+ p = data.data;
+ memcpy(p, K.data, K.len);
+ p += K.len;
+ *(__be32 *)p = htonl(i);
+
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+ ret = crypto_shash_finup(desc, data.data, data.len, K.data);
+ if (ret < 0)
+ goto error;
+
+ seg = min_t(size_t, result->len - offset, K.len);
+ memcpy(result->data + offset, K.data, seg);
+ offset += seg;
+ } while (offset < result->len);
+
+error:
+ kfree_sensitive(buffer);
+error_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * Kp = KDF-FEEDBACK-CMAC(protocol-key, "prf")
+ * PRF = CMAC(Kp, octet-string)
+ * [rfc6803 sec 6]
+ */
+static int rfc6803_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *protocol_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+ struct crypto_shash *shash;
+ struct krb5_buffer Kp;
+ struct shash_desc *desc;
+ size_t bsize;
+ void *buffer;
+ int ret;
+
+ Kp.len = krb5->prf_len;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+
+ ret = -EINVAL;
+ if (result->len != crypto_shash_digestsize(shash))
+ goto out_shash;
+
+ ret = -ENOMEM;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(Kp.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto out_shash;
+
+ Kp.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+
+ ret = rfc6803_calc_KDF_FEEDBACK_CMAC(krb5, protocol_key, &prfconstant,
+ &Kp, gfp);
+ if (ret < 0)
+ goto out;
+
+ ret = crypto_shash_setkey(shash, Kp.data, Kp.len);
+ if (ret < 0)
+ goto out;
+
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto out;
+
+ ret = crypto_shash_finup(desc, octet_string->data, octet_string->len, result->data);
+ if (ret < 0)
+ goto out;
+
+out:
+ kfree_sensitive(buffer);
+out_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+
+static const struct krb5_crypto_profile rfc6803_crypto_profile = {
+ .calc_PRF = rfc6803_calc_PRF,
+ .calc_Kc = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .calc_Ke = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .calc_Ki = rfc6803_calc_KDF_FEEDBACK_CMAC,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = krb5_aead_encrypt,
+ .decrypt = krb5_aead_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
+
+const struct krb5_enctype krb5_camellia128_cts_cmac = {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA128,
+ .name = "camellia128-cts-cmac",
+ .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .cksum_name = "cmac(camellia)",
+ .hash_name = NULL,
+ .derivation_enc = "cts(cbc(camellia))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 16,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc6803_crypto_profile,
+};
+
+const struct krb5_enctype krb5_camellia256_cts_cmac = {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA256,
+ .name = "camellia256-cts-cmac",
+ .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .cksum_name = "cmac(camellia)",
+ .hash_name = NULL,
+ .derivation_enc = "cts(cbc(camellia))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 32,
+ .Ke_len = 32,
+ .Ki_len = 32,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 16,
+ .prf_len = 16,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc6803_crypto_profile,
+};
diff --git a/crypto/krb5/rfc8009_aes2.c b/crypto/krb5/rfc8009_aes2.c
new file mode 100644
index 000000000000..d39851fc3a4e
--- /dev/null
+++ b/crypto/krb5/rfc8009_aes2.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rfc8009 AES Encryption with HMAC-SHA2 for Kerberos 5
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <crypto/authenc.h>
+#include "internal.h"
+
+static const struct krb5_buffer rfc8009_no_context = { .len = 0, .data = "" };
+
+/*
+ * Calculate the key derivation function KDF-HMAC-SHA2(key, label, [context,] k)
+ *
+ * KDF-HMAC-SHA2(key, label, [context,] k) = k-truncate(K1)
+ *
+ * Using the appropriate one of:
+ * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | k)
+ * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | k)
+ * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | context | k)
+ * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | context | k)
+ * [rfc8009 sec 3]
+ */
+static int rfc8009_calc_KDF_HMAC_SHA2(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *key,
+ const struct krb5_buffer *label,
+ const struct krb5_buffer *context,
+ unsigned int k,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ struct crypto_shash *shash;
+ struct krb5_buffer K1, data;
+ struct shash_desc *desc;
+ __be32 tmp;
+ size_t bsize;
+ void *buffer;
+ u8 *p;
+ int ret = -ENOMEM;
+
+ if (WARN_ON(result->len != k / 8))
+ return -EINVAL;
+
+ shash = crypto_alloc_shash(krb5->cksum_name, 0, 0);
+ if (IS_ERR(shash))
+ return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash);
+ ret = crypto_shash_setkey(shash, key->data, key->len);
+ if (ret < 0)
+ goto error_shash;
+
+ ret = -EINVAL;
+ if (WARN_ON(crypto_shash_digestsize(shash) * 8 < k))
+ goto error_shash;
+
+ ret = -ENOMEM;
+ data.len = 4 + label->len + 1 + context->len + 4;
+ bsize = krb5_shash_size(shash) +
+ krb5_digest_size(shash) +
+ crypto_roundup(data.len);
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ goto error_shash;
+
+ desc = buffer;
+ desc->tfm = shash;
+ ret = crypto_shash_init(desc);
+ if (ret < 0)
+ goto error;
+
+ p = data.data = buffer +
+ krb5_shash_size(shash) +
+ krb5_digest_size(shash);
+ *(__be32 *)p = htonl(0x00000001);
+ p += 4;
+ memcpy(p, label->data, label->len);
+ p += label->len;
+ *p++ = 0;
+ memcpy(p, context->data, context->len);
+ p += context->len;
+ tmp = htonl(k);
+ memcpy(p, &tmp, 4);
+ p += 4;
+
+ ret = -EINVAL;
+ if (WARN_ON(p - (u8 *)data.data != data.len))
+ goto error;
+
+ K1.len = crypto_shash_digestsize(shash);
+ K1.data = buffer +
+ krb5_shash_size(shash);
+
+ ret = crypto_shash_finup(desc, data.data, data.len, K1.data);
+ if (ret < 0)
+ goto error;
+
+ memcpy(result->data, K1.data, result->len);
+
+error:
+ kfree_sensitive(buffer);
+error_shash:
+ crypto_free_shash(shash);
+ return ret;
+}
+
+/*
+ * Calculate the pseudo-random function, PRF().
+ *
+ * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 256)
+ * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 384)
+ *
+ * The "prfconstant" used in the PRF operation is the three-octet string
+ * "prf".
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_PRF(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *input_key,
+ const struct krb5_buffer *octet_string,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ static const struct krb5_buffer prfconstant = { 3, "prf" };
+
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, input_key, &prfconstant,
+ octet_string, krb5->prf_len * 8,
+ result, gfp);
+}
+
+/*
+ * Derive Ke.
+ * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128)
+ * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256)
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_Ke(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant,
+ &rfc8009_no_context, krb5->key_bytes * 8,
+ result, gfp);
+}
+
+/*
+ * Derive Kc/Ki
+ * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128)
+ * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128)
+ * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192)
+ * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192)
+ * [rfc8009 sec 5]
+ */
+static int rfc8009_calc_Ki(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_buffer *usage_constant,
+ struct krb5_buffer *result,
+ gfp_t gfp)
+{
+ return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant,
+ &rfc8009_no_context, krb5->cksum_len * 8,
+ result, gfp);
+}
+
+/*
+ * Apply encryption and checksumming functions to a message. Unlike for
+ * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first.
+ */
+static ssize_t rfc8009_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg, size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded)
+{
+ struct aead_request *req;
+ struct scatterlist bsg[2];
+ ssize_t ret, done;
+ size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset;
+ void *buffer;
+ u8 *iv, *ad;
+
+ if (WARN_ON(data_offset != krb5->conf_len))
+ return -EINVAL; /* Data is in wrong place */
+
+ secure_offset = 0;
+ base_len = krb5->conf_len + data_len;
+ pad_len = 0;
+ secure_len = base_len + pad_len;
+ cksum_offset = secure_len;
+ if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len))
+ return -EFAULT;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+ ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead);
+
+ /* Insert the confounder into the buffer */
+ ret = -EFAULT;
+ if (!preconfounded) {
+ get_random_bytes(buffer, krb5->conf_len);
+ done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len,
+ secure_offset);
+ if (done != krb5->conf_len)
+ goto error;
+ }
+
+ /* We may need to pad out to the crypto blocksize. */
+ if (pad_len) {
+ done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len);
+ if (done != pad_len)
+ goto error;
+ }
+
+ /* We need to include the starting IV in the hash. */
+ sg_init_table(bsg, 2);
+ sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead));
+ sg_chain(bsg, 2, sg);
+
+ /* Hash and encrypt the message. */
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, krb5_aead_ivsize(aead));
+ aead_request_set_crypt(req, bsg, bsg, secure_len, iv);
+ ret = crypto_aead_encrypt(req);
+ if (ret < 0)
+ goto error;
+
+ ret = secure_len + krb5->cksum_len;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+/*
+ * Apply decryption and checksumming functions to a message. Unlike for
+ * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first.
+ *
+ * The offset and length are updated to reflect the actual content of the
+ * encrypted region.
+ */
+static int rfc8009_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len)
+{
+ struct aead_request *req;
+ struct scatterlist bsg[2];
+ size_t bsize;
+ void *buffer;
+ int ret;
+ u8 *iv, *ad;
+
+ if (WARN_ON(*_offset != 0))
+ return -EINVAL; /* Can't set offset on aead */
+
+ if (*_len < krb5->conf_len + krb5->cksum_len)
+ return -EPROTO;
+
+ bsize = krb5_aead_size(aead) +
+ krb5_aead_ivsize(aead) * 2;
+ buffer = kzalloc(bsize, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ req = buffer;
+ iv = buffer + krb5_aead_size(aead);
+ ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead);
+
+ /* We need to include the starting IV in the hash. */
+ sg_init_table(bsg, 2);
+ sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead));
+ sg_chain(bsg, 2, sg);
+
+ /* Decrypt the message and verify its checksum. */
+ aead_request_set_tfm(req, aead);
+ aead_request_set_callback(req, 0, NULL, NULL);
+ aead_request_set_ad(req, krb5_aead_ivsize(aead));
+ aead_request_set_crypt(req, bsg, bsg, *_len, iv);
+ ret = crypto_aead_decrypt(req);
+ if (ret < 0)
+ goto error;
+
+ /* Adjust the boundaries of the data. */
+ *_offset += krb5->conf_len;
+ *_len -= krb5->conf_len + krb5->cksum_len;
+ ret = 0;
+
+error:
+ kfree_sensitive(buffer);
+ return ret;
+}
+
+static const struct krb5_crypto_profile rfc8009_crypto_profile = {
+ .calc_PRF = rfc8009_calc_PRF,
+ .calc_Kc = rfc8009_calc_Ki,
+ .calc_Ke = rfc8009_calc_Ke,
+ .calc_Ki = rfc8009_calc_Ki,
+ .derive_encrypt_keys = authenc_derive_encrypt_keys,
+ .load_encrypt_keys = authenc_load_encrypt_keys,
+ .derive_checksum_key = rfc3961_derive_checksum_key,
+ .load_checksum_key = rfc3961_load_checksum_key,
+ .encrypt = rfc8009_encrypt,
+ .decrypt = rfc8009_decrypt,
+ .get_mic = rfc3961_get_mic,
+ .verify_mic = rfc3961_verify_mic,
+};
+
+const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128 = {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128,
+ .name = "aes128-cts-hmac-sha256-128",
+ .encrypt_name = "authenc(hmac(sha256),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha256)",
+ .hash_name = "sha256",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 16,
+ .key_len = 16,
+ .Kc_len = 16,
+ .Ke_len = 16,
+ .Ki_len = 16,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 16,
+ .hash_len = 20,
+ .prf_len = 32,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc8009_crypto_profile,
+};
+
+const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192 = {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .ctype = KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256,
+ .name = "aes256-cts-hmac-sha384-192",
+ .encrypt_name = "authenc(hmac(sha384),cts(cbc(aes)))",
+ .cksum_name = "hmac(sha384)",
+ .hash_name = "sha384",
+ .derivation_enc = "cts(cbc(aes))",
+ .key_bytes = 32,
+ .key_len = 32,
+ .Kc_len = 24,
+ .Ke_len = 32,
+ .Ki_len = 24,
+ .block_len = 16,
+ .conf_len = 16,
+ .cksum_len = 24,
+ .hash_len = 20,
+ .prf_len = 48,
+ .keyed_cksum = true,
+ .random_to_key = NULL, /* Identity */
+ .profile = &rfc8009_crypto_profile,
+};
diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c
new file mode 100644
index 000000000000..2a81a6315a0d
--- /dev/null
+++ b/crypto/krb5/selftest.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Kerberos library self-testing
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include "internal.h"
+
+#define VALID(X) \
+ ({ \
+ bool __x = (X); \
+ if (__x) { \
+ pr_warn("!!! TESTINVAL %s:%u\n", __FILE__, __LINE__); \
+ ret = -EBADMSG; \
+ } \
+ __x; \
+ })
+
+#define CHECK(X) \
+ ({ \
+ bool __x = (X); \
+ if (__x) { \
+ pr_warn("!!! TESTFAIL %s:%u\n", __FILE__, __LINE__); \
+ ret = -EBADMSG; \
+ } \
+ __x; \
+ })
+
+enum which_key {
+ TEST_KC, TEST_KE, TEST_KI,
+};
+
+#if 0
+static void dump_sg(struct scatterlist *sg, unsigned int limit)
+{
+ unsigned int index = 0, n = 0;
+
+ for (; sg && limit > 0; sg = sg_next(sg)) {
+ unsigned int off = sg->offset, len = umin(sg->length, limit);
+ const void *p = kmap_local_page(sg_page(sg));
+
+ limit -= len;
+ while (len > 0) {
+ unsigned int part = umin(len, 32);
+
+ pr_notice("[%x] %04x: %*phN\n", n, index, part, p + off);
+ index += part;
+ off += part;
+ len -= part;
+ }
+
+ kunmap_local(p);
+ n++;
+ }
+}
+#endif
+
+static int prep_buf(struct krb5_buffer *buf)
+{
+ buf->data = kmalloc(buf->len, GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+ return 0;
+}
+
+#define PREP_BUF(BUF, LEN) \
+ do { \
+ (BUF)->len = (LEN); \
+ ret = prep_buf((BUF)); \
+ if (ret < 0) \
+ goto out; \
+ } while (0)
+
+static int load_buf(struct krb5_buffer *buf, const char *from)
+{
+ size_t len = strlen(from);
+ int ret;
+
+ if (len > 1 && from[0] == '\'') {
+ PREP_BUF(buf, len - 1);
+ memcpy(buf->data, from + 1, len - 1);
+ ret = 0;
+ goto out;
+ }
+
+ if (VALID(len & 1))
+ return -EINVAL;
+
+ PREP_BUF(buf, len / 2);
+ ret = hex2bin(buf->data, from, buf->len);
+ if (ret < 0) {
+ VALID(1);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+#define LOAD_BUF(BUF, FROM) do { ret = load_buf(BUF, FROM); if (ret < 0) goto out; } while (0)
+
+static void clear_buf(struct krb5_buffer *buf)
+{
+ kfree(buf->data);
+ buf->len = 0;
+ buf->data = NULL;
+}
+
+/*
+ * Perform a pseudo-random function check.
+ */
+static int krb5_test_one_prf(const struct krb5_prf_test *test)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct krb5_buffer key = {}, octet = {}, result = {}, prf = {};
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ LOAD_BUF(&key, test->key);
+ LOAD_BUF(&octet, test->octet);
+ LOAD_BUF(&prf, test->prf);
+ PREP_BUF(&result, krb5->prf_len);
+
+ if (VALID(result.len != prf.len)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = krb5->profile->calc_PRF(krb5, &key, &octet, &result, GFP_KERNEL);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("PRF calculation failed %d\n", ret);
+ goto out;
+ }
+
+ if (memcmp(result.data, prf.data, result.len) != 0) {
+ CHECK(1);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&result);
+ clear_buf(&octet);
+ clear_buf(&key);
+ return ret;
+}
+
+/*
+ * Perform a key derivation check.
+ */
+static int krb5_test_key(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *base_key,
+ const struct krb5_key_test_one *test,
+ enum which_key which)
+{
+ struct krb5_buffer key = {}, result = {};
+ int ret;
+
+ LOAD_BUF(&key, test->key);
+ PREP_BUF(&result, key.len);
+
+ switch (which) {
+ case TEST_KC:
+ ret = krb5_derive_Kc(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ case TEST_KE:
+ ret = krb5_derive_Ke(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ case TEST_KI:
+ ret = krb5_derive_Ki(krb5, base_key, test->use, &result, GFP_KERNEL);
+ break;
+ default:
+ VALID(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Key derivation failed %d\n", ret);
+ goto out;
+ }
+
+ if (memcmp(result.data, key.data, result.len) != 0) {
+ CHECK(1);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+out:
+ clear_buf(&key);
+ clear_buf(&result);
+ return ret;
+}
+
+static int krb5_test_one_key(const struct krb5_key_test *test)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct krb5_buffer base_key = {};
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ LOAD_BUF(&base_key, test->key);
+
+ ret = krb5_test_key(krb5, &base_key, &test->Kc, TEST_KC);
+ if (ret < 0)
+ goto out;
+ ret = krb5_test_key(krb5, &base_key, &test->Ke, TEST_KE);
+ if (ret < 0)
+ goto out;
+ ret = krb5_test_key(krb5, &base_key, &test->Ki, TEST_KI);
+ if (ret < 0)
+ goto out;
+
+out:
+ clear_buf(&base_key);
+ return ret;
+}
+
+/*
+ * Perform an encryption test.
+ */
+static int krb5_test_one_enc(const struct krb5_enc_test *test, void *buf)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct crypto_aead *ci = NULL;
+ struct krb5_buffer K0 = {}, Ke = {}, Ki = {}, keys = {};
+ struct krb5_buffer conf = {}, plain = {}, ct = {};
+ struct scatterlist sg[1];
+ size_t data_len, data_offset, message_len;
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ /* Load the test data into binary buffers. */
+ LOAD_BUF(&conf, test->conf);
+ LOAD_BUF(&plain, test->plain);
+ LOAD_BUF(&ct, test->ct);
+
+ if (test->K0) {
+ LOAD_BUF(&K0, test->K0);
+ } else {
+ LOAD_BUF(&Ke, test->Ke);
+ LOAD_BUF(&Ki, test->Ki);
+
+ ret = krb5->profile->load_encrypt_keys(krb5, &Ke, &Ki, &keys, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (VALID(conf.len != krb5->conf_len) ||
+ VALID(ct.len != krb5->conf_len + plain.len + krb5->cksum_len))
+ goto out;
+
+ data_len = plain.len;
+ message_len = crypto_krb5_how_much_buffer(krb5, KRB5_ENCRYPT_MODE,
+ data_len, &data_offset);
+
+ if (CHECK(message_len != ct.len)) {
+ pr_warn("Encrypted length mismatch %zu != %u\n", message_len, ct.len);
+ goto out;
+ }
+ if (CHECK(data_offset != conf.len)) {
+ pr_warn("Data offset mismatch %zu != %u\n", data_offset, conf.len);
+ goto out;
+ }
+
+ memcpy(buf, conf.data, conf.len);
+ memcpy(buf + data_offset, plain.data, plain.len);
+
+ /* Allocate a crypto object and set its key. */
+ if (test->K0)
+ ci = crypto_krb5_prepare_encryption(krb5, &K0, test->usage, GFP_KERNEL);
+ else
+ ci = krb5_prepare_encryption(krb5, &keys, GFP_KERNEL);
+
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ ci = NULL;
+ pr_err("Couldn't alloc AEAD %s: %d\n", krb5->encrypt_name, ret);
+ goto out;
+ }
+
+ /* Encrypt the message. */
+ sg_init_one(sg, buf, message_len);
+ ret = crypto_krb5_encrypt(krb5, ci, sg, 1, message_len,
+ data_offset, data_len, true);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Encryption failed %d\n", ret);
+ goto out;
+ }
+ if (ret != message_len) {
+ CHECK(1);
+ pr_warn("Encrypted message wrong size %x != %zx\n", ret, message_len);
+ goto out;
+ }
+
+ if (memcmp(buf, ct.data, ct.len) != 0) {
+ CHECK(1);
+ pr_warn("Ciphertext mismatch\n");
+ pr_warn("BUF %*phN\n", ct.len, buf);
+ pr_warn("CT %*phN\n", ct.len, ct.data);
+ pr_warn("PT %*phN%*phN\n", conf.len, conf.data, plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ /* Decrypt the encrypted message. */
+ data_offset = 0;
+ data_len = message_len;
+ ret = crypto_krb5_decrypt(krb5, ci, sg, 1, &data_offset, &data_len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Decryption failed %d\n", ret);
+ goto out;
+ }
+
+ if (CHECK(data_offset != conf.len) ||
+ CHECK(data_len != plain.len))
+ goto out;
+
+ if (memcmp(buf, conf.data, conf.len) != 0) {
+ CHECK(1);
+ pr_warn("Confounder mismatch\n");
+ pr_warn("ENC %*phN\n", conf.len, buf);
+ pr_warn("DEC %*phN\n", conf.len, conf.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ if (memcmp(buf + conf.len, plain.data, plain.len) != 0) {
+ CHECK(1);
+ pr_warn("Plaintext mismatch\n");
+ pr_warn("BUF %*phN\n", plain.len, buf + conf.len);
+ pr_warn("PT %*phN\n", plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&ct);
+ clear_buf(&plain);
+ clear_buf(&conf);
+ clear_buf(&keys);
+ clear_buf(&Ki);
+ clear_buf(&Ke);
+ clear_buf(&K0);
+ if (ci)
+ crypto_free_aead(ci);
+ return ret;
+}
+
+/*
+ * Perform a checksum test.
+ */
+static int krb5_test_one_mic(const struct krb5_mic_test *test, void *buf)
+{
+ const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype);
+ struct crypto_shash *ci = NULL;
+ struct scatterlist sg[1];
+ struct krb5_buffer K0 = {}, Kc = {}, keys = {}, plain = {}, mic = {};
+ size_t offset, len, message_len;
+ int ret;
+
+ if (!krb5)
+ return -EOPNOTSUPP;
+
+ pr_notice("Running %s %s\n", krb5->name, test->name);
+
+ /* Allocate a crypto object and set its key. */
+ if (test->K0) {
+ LOAD_BUF(&K0, test->K0);
+ ci = crypto_krb5_prepare_checksum(krb5, &K0, test->usage, GFP_KERNEL);
+ } else {
+ LOAD_BUF(&Kc, test->Kc);
+
+ ret = krb5->profile->load_checksum_key(krb5, &Kc, &keys, GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+
+ ci = krb5_prepare_checksum(krb5, &Kc, GFP_KERNEL);
+ }
+ if (IS_ERR(ci)) {
+ ret = PTR_ERR(ci);
+ ci = NULL;
+ pr_err("Couldn't alloc shash %s: %d\n", krb5->cksum_name, ret);
+ goto out;
+ }
+
+ /* Load the test data into binary buffers. */
+ LOAD_BUF(&plain, test->plain);
+ LOAD_BUF(&mic, test->mic);
+
+ len = plain.len;
+ message_len = crypto_krb5_how_much_buffer(krb5, KRB5_CHECKSUM_MODE,
+ len, &offset);
+
+ if (CHECK(message_len != mic.len + plain.len)) {
+ pr_warn("MIC length mismatch %zu != %u\n",
+ message_len, mic.len + plain.len);
+ goto out;
+ }
+
+ memcpy(buf + offset, plain.data, plain.len);
+
+ /* Generate a MIC generation request. */
+ sg_init_one(sg, buf, 1024);
+
+ ret = crypto_krb5_get_mic(krb5, ci, NULL, sg, 1, 1024,
+ krb5->cksum_len, plain.len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Get MIC failed %d\n", ret);
+ goto out;
+ }
+ len = ret;
+
+ if (CHECK(len != plain.len + mic.len)) {
+ pr_warn("MIC length mismatch %zu != %u\n", len, plain.len + mic.len);
+ goto out;
+ }
+
+ if (memcmp(buf, mic.data, mic.len) != 0) {
+ CHECK(1);
+ pr_warn("MIC mismatch\n");
+ pr_warn("BUF %*phN\n", mic.len, buf);
+ pr_warn("MIC %*phN\n", mic.len, mic.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ /* Generate a verification request. */
+ offset = 0;
+ ret = crypto_krb5_verify_mic(krb5, ci, NULL, sg, 1, &offset, &len);
+ if (ret < 0) {
+ CHECK(1);
+ pr_warn("Verify MIC failed %d\n", ret);
+ goto out;
+ }
+
+ if (CHECK(offset != mic.len) ||
+ CHECK(len != plain.len))
+ goto out;
+
+ if (memcmp(buf + offset, plain.data, plain.len) != 0) {
+ CHECK(1);
+ pr_warn("Plaintext mismatch\n");
+ pr_warn("BUF %*phN\n", plain.len, buf + offset);
+ pr_warn("PT %*phN\n", plain.len, plain.data);
+ ret = -EKEYREJECTED;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ clear_buf(&mic);
+ clear_buf(&plain);
+ clear_buf(&keys);
+ clear_buf(&K0);
+ clear_buf(&Kc);
+ if (ci)
+ crypto_free_shash(ci);
+ return ret;
+}
+
+int krb5_selftest(void)
+{
+ void *buf;
+ int ret = 0, i;
+
+ buf = kmalloc(4096, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pr_notice("\n");
+ pr_notice("Running selftests\n");
+
+ for (i = 0; krb5_prf_tests[i].name; i++) {
+ ret = krb5_test_one_prf(&krb5_prf_tests[i]);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_prf_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_key_tests[i].name; i++) {
+ ret = krb5_test_one_key(&krb5_key_tests[i]);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_key_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_enc_tests[i].name; i++) {
+ memset(buf, 0x5a, 4096);
+ ret = krb5_test_one_enc(&krb5_enc_tests[i], buf);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_enc_tests[i].name);
+ }
+ }
+
+ for (i = 0; krb5_mic_tests[i].name; i++) {
+ memset(buf, 0x5a, 4096);
+ ret = krb5_test_one_mic(&krb5_mic_tests[i], buf);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ goto out;
+ pr_notice("Skipping %s\n", krb5_mic_tests[i].name);
+ }
+ }
+
+ ret = 0;
+out:
+ pr_notice("Selftests %s\n", ret == 0 ? "succeeded" : "failed");
+ kfree(buf);
+ return ret;
+}
diff --git a/crypto/krb5/selftest_data.c b/crypto/krb5/selftest_data.c
new file mode 100644
index 000000000000..24447ee8bf07
--- /dev/null
+++ b/crypto/krb5/selftest_data.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Data for Kerberos library self-testing
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "internal.h"
+
+/*
+ * Pseudo-random function tests.
+ */
+const struct krb5_prf_test krb5_prf_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "prf",
+ .key = "3705D96080C17728A0E800EAB6E0D23C",
+ .octet = "74657374",
+ .prf = "9D188616F63852FE86915BB840B4A886FF3E6BB0F819B49B893393D393854295",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "prf",
+ .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52",
+ .octet = "74657374",
+ .prf =
+ "9801F69A368C2BF675E59521E177D9A07F67EFE1CFDE8D3C8D6F6A0256E3B17D"
+ "B3C1B62AD1B8553360D17367EB1514D2",
+ },
+ {/* END */}
+};
+
+/*
+ * Key derivation tests.
+ */
+const struct krb5_key_test krb5_key_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "key",
+ .key = "3705D96080C17728A0E800EAB6E0D23C",
+ .Kc.use = 0x00000002,
+ .Kc.key = "B31A018A48F54776F403E9A396325DC3",
+ .Ke.use = 0x00000002,
+ .Ke.key = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki.use = 0x00000002,
+ .Ki.key = "9FDA0E56AB2D85E1569A688696C26A6C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "key",
+ .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52",
+ .Kc.use = 0x00000002,
+ .Kc.key = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D",
+ .Ke.use = 0x00000002,
+ .Ke.key = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki.use = 0x00000002,
+ .Ki.key = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "key",
+ .key = "57D0297298FFD9D35DE5A47FB4BDE24B",
+ .Kc.use = 0x00000002,
+ .Kc.key = "D155775A209D05F02B38D42A389E5A56",
+ .Ke.use = 0x00000002,
+ .Ke.key = "64DF83F85A532F17577D8C37035796AB",
+ .Ki.use = 0x00000002,
+ .Ki.key = "3E4FBDF30FB8259C425CB6C96F1F4635",
+ },
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "key",
+ .key = "B9D6828B2056B7BE656D88A123B1FAC68214AC2B727ECF5F69AFE0C4DF2A6D2C",
+ .Kc.use = 0x00000002,
+ .Kc.key = "E467F9A9552BC7D3155A6220AF9C19220EEED4FF78B0D1E6A1544991461A9E50",
+ .Ke.use = 0x00000002,
+ .Ke.key = "412AEFC362A7285FC3966C6A5181E7605AE675235B6D549FBFC9AB6630A4C604",
+ .Ki.use = 0x00000002,
+ .Ki.key = "FA624FA0E523993FA388AEFDC67E67EBCD8C08E8A0246B1D73B0D1DD9FC582B0",
+ },
+ {/* END */}
+};
+
+/*
+ * Encryption tests.
+ */
+const struct krb5_enc_test krb5_enc_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "7E5895EAF2672435BAD817F545A37148",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "EF85FB890BB8472F4DAB20394DCA781DAD877EDA39D50C870C0D5A0A8E48C718",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain<block",
+ .plain = "000102030405",
+ .conf = "7BCA285E2FD4130FB55B1A5C83BC5B24",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "84D7F30754ED987BAB0BF3506BEB09CFB55402CEF7E6877CE99E247E52D16ED4421DFDF8976C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain==block",
+ .plain = "000102030405060708090A0B0C0D0E0F",
+ .conf = "56AB21713FF62C0A1457200F6FA9948F",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "3517D640F50DDC8AD3628722B3569D2AE07493FA8263254080EA65C1008E8FC295FB4852E7D83E1E7C48C37EEBE6B0D3",
+ }, {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "enc plain>block",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .conf = "A7A4E29A4728CE10664FB64E49AD3FAC",
+ .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E",
+ .Ki = "9FDA0E56AB2D85E1569A688696C26A6C",
+ .ct = "720F73B18D9859CD6CCB4346115CD336C70F58EDC0C4437C5573544C31C813BCE1E6D072C186B39A413C2F92CA9B8334A287FFCBFC",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "F764E9FA15C276478B2C7D0C4E5F58E4",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "41F53FA5BFE7026D91FAF9BE959195A058707273A96A40F0A01960621AC612748B9BBFBE7EB4CE3C",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain<block",
+ .plain = "000102030405",
+ .conf = "B80D3251C1F6471494256FFE712D0B9A",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "4ED7B37C2BCAC8F74F23C1CF07E62BC7B75FB3F637B9F559C7F664F69EAB7B6092237526EA0D1F61CB20D69D10F2",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain==block",
+ .plain = "000102030405060708090A0B0C0D0E0F",
+ .conf = "53BF8A0D105265D4E276428624CE5E63",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "BC47FFEC7998EB91E8115CF8D19DAC4BBBE2E163E87DD37F49BECA92027764F68CF51F14D798C2273F35DF574D1F932E40C4FF255B36A266",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "enc plain>block",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .conf = "763E65367E864F02F55153C7E3B58AF1",
+ .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49",
+ .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F",
+ .ct = "40013E2DF58E8751957D2878BCD2D6FE101CCFD556CB1EAE79DB3C3EE86429F2B2A602AC86FEF6ECB647D6295FAE077A1FEB517508D2C16B4192E01F62",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "B69822A19A6B09C0EBC8557D1F1B6C0A",
+ .K0 = "1DC46A8D763F4F93742BCBA3387576C3",
+ .usage = 0,
+ .ct = "C466F1871069921EDB7C6FDE244A52DB0BA10EDC197BDB8006658CA3CCCE6EB8",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 1 plain",
+ .plain = "'1",
+ .conf = "6F2FC3C2A166FD8898967A83DE9596D9",
+ .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C",
+ .usage = 1,
+ .ct = "842D21FD950311C0DD464A3F4BE8D6DA88A56D559C9B47D3F9A85067AF661559B8",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 9 plain",
+ .plain = "'9 bytesss",
+ .conf = "A5B4A71E077AEEF93C8763C18FDB1F10",
+ .K0 = "A1BB61E805F9BA6DDE8FDBDDC05CDEA0",
+ .usage = 2,
+ .ct = "619FF072E36286FF0A28DEB3A352EC0D0EDF5C5160D663C901758CCF9D1ED33D71DB8F23AABF8348A0",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 13 plain",
+ .plain = "'13 bytes byte",
+ .conf = "19FEE40D810C524B5B22F01874C693DA",
+ .K0 = "2CA27A5FAF5532244506434E1CEF6676",
+ .usage = 3,
+ .ct = "B8ECA3167AE6315512E59F98A7C500205E5F63FF3BB389AF1C41A21D640D8615C9ED3FBEB05AB6ACB67689B5EA",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "enc 30 plain",
+ .plain = "'30 bytes bytes bytes bytes byt",
+ .conf = "CA7A7AB4BE192DABD603506DB19C39E2",
+ .K0 = "7824F8C16F83FF354C6BF7515B973F43",
+ .usage = 4,
+ .ct = "A26A3905A4FFD5816B7B1E27380D08090C8EC1F304496E1ABDCD2BDCD1DFFC660989E117A713DDBB57A4146C1587CBA4356665591D2240282F5842B105A5",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc no plain",
+ .plain = "",
+ .conf = "3CBBD2B45917941067F96599BB98926C",
+ .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B",
+ .usage = 0,
+ .ct = "03886D03310B47A6D8F06D7B94D1DD837ECCE315EF652AFF620859D94A259266",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 1 plain",
+ .plain = "'1",
+ .conf = "DEF487FCEBE6DE6346D4DA4521BBA2D2",
+ .K0 = "1B97FE0A190E2021EB30753E1B6E1E77B0754B1D684610355864104963463833",
+ .usage = 1,
+ .ct = "2C9C1570133C99BF6A34BC1B0212002FD194338749DB4135497A347CFCD9D18A12",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 9 plain",
+ .plain = "'9 bytesss",
+ .conf = "AD4FF904D34E555384B14100FC465F88",
+ .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05",
+ .usage = 2,
+ .ct = "9C6DE75F812DE7ED0D28B2963557A115640998275B0AF5152709913FF52A2A9C8E63B872F92E64C839",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 13 plain",
+ .plain = "'13 bytes byte",
+ .conf = "CF9BCA6DF1144E0C0AF9B8F34C90D514",
+ .K0 = "B038B132CD8E06612267FAB7170066D88AECCBA0B744BFC60DC89BCA182D0715",
+ .usage = 3,
+ .ct = "EEEC85A9813CDC536772AB9B42DEFC5706F726E975DDE05A87EB5406EA324CA185C9986B42AABE794B84821BEE",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "enc 30 plain",
+ .plain = "'30 bytes bytes bytes bytes byt",
+ .conf = "644DEF38DA35007275878D216855E228",
+ .K0 = "CCFCD349BF4C6677E86E4B02B8EAB924A546AC731CF9BF6989B996E7D6BFBBA7",
+ .usage = 4,
+ .ct = "0E44680985855F2D1F1812529CA83BFD8E349DE6FD9ADA0BAAA048D68E265FEBF34AD1255A344999AD37146887A6C6845731AC7F46376A0504CD06571474",
+ },
+ {/* END */}
+};
+
+/*
+ * Checksum generation tests.
+ */
+const struct krb5_mic_test krb5_mic_tests[] = {
+ /* rfc8009 Appendix A */
+ {
+ .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128,
+ .name = "mic",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .Kc = "B31A018A48F54776F403E9A396325DC3",
+ .mic = "D78367186643D67B411CBA9139FC1DEE",
+ }, {
+ .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192,
+ .name = "mic",
+ .plain = "000102030405060708090A0B0C0D0E0F1011121314",
+ .Kc = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D",
+ .mic = "45EE791567EEFCA37F4AC1E0222DE80D43C3BFA06699672A",
+ },
+ /* rfc6803 sec 10 */
+ {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "mic abc",
+ .plain = "'abcdefghijk",
+ .K0 = "1DC46A8D763F4F93742BCBA3387576C3",
+ .usage = 7,
+ .mic = "1178E6C5C47A8C1AE0C4B9C7D4EB7B6B",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC,
+ .name = "mic ABC",
+ .plain = "'ABCDEFGHIJKLMNOPQRSTUVWXYZ",
+ .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C",
+ .usage = 8,
+ .mic = "D1B34F7004A731F23A0C00BF6C3F753A",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "mic 123",
+ .plain = "'123456789",
+ .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B",
+ .usage = 9,
+ .mic = "87A12CFD2B96214810F01C826E7744B1",
+ }, {
+ .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC,
+ .name = "mic !@#",
+ .plain = "'!@#$%^&*()!@#$%^&*()!@#$%^&*()",
+ .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05",
+ .usage = 10,
+ .mic = "3FA0B42355E52B189187294AA252AB64",
+ },
+ {/* END */}
+};
diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c
new file mode 100644
index 000000000000..d07769bf149e
--- /dev/null
+++ b/crypto/krb5enc.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AEAD wrapper for Kerberos 5 RFC3961 simplified profile.
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Derived from authenc:
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct krb5enc_instance_ctx {
+ struct crypto_ahash_spawn auth;
+ struct crypto_skcipher_spawn enc;
+ unsigned int reqoff;
+};
+
+struct krb5enc_ctx {
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+};
+
+struct krb5enc_request_ctx {
+ struct scatterlist src[2];
+ struct scatterlist dst[2];
+ char tail[];
+};
+
+static void krb5enc_request_complete(struct aead_request *req, int err)
+{
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+/**
+ * crypto_krb5enc_extractkeys - Extract Ke and Ki keys from the key blob.
+ * @keys: Where to put the key sizes and pointers
+ * @key: Encoded key material
+ * @keylen: Amount of key material
+ *
+ * Decode the key blob we're given. It starts with an rtattr that indicates
+ * the format and the length. Format CRYPTO_AUTHENC_KEYA_PARAM is:
+ *
+ * rtattr || __be32 enckeylen || authkey || enckey
+ *
+ * Note that the rtattr is in cpu-endian form, unlike enckeylen. This must be
+ * handled correctly in static testmgr data.
+ */
+int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen)
+{
+ struct rtattr *rta = (struct rtattr *)key;
+ struct crypto_authenc_key_param *param;
+
+ if (!RTA_OK(rta, keylen))
+ return -EINVAL;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ return -EINVAL;
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
+ return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
+
+ param = RTA_DATA(rta);
+ keys->enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
+
+ if (keylen < keys->enckeylen)
+ return -EINVAL;
+
+ keys->authkeylen = keylen - keys->enckeylen;
+ keys->authkey = key;
+ keys->enckey = key + keys->authkeylen;
+ return 0;
+}
+EXPORT_SYMBOL(crypto_krb5enc_extractkeys);
+
+static int krb5enc_setkey(struct crypto_aead *krb5enc, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct crypto_skcipher *enc = ctx->enc;
+ struct crypto_ahash *auth = ctx->auth;
+ unsigned int flags = crypto_aead_get_flags(krb5enc);
+ int err = -EINVAL;
+
+ if (crypto_krb5enc_extractkeys(&keys, key, keylen) != 0)
+ goto out;
+
+ crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
+ crypto_ahash_set_flags(auth, flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
+ if (err)
+ goto out;
+
+ crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(enc, flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
+static void krb5enc_encrypt_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ krb5enc_request_complete(req, err);
+}
+
+/*
+ * Start the encryption of the plaintext. We skip over the associated data as
+ * that only gets included in the hash.
+ */
+static int krb5enc_dispatch_encrypt(struct aead_request *req,
+ unsigned int flags)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_skcipher *enc = ctx->enc;
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
+ struct scatterlist *src, *dst;
+
+ src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+ if (req->src == req->dst)
+ dst = src;
+ else
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+
+ skcipher_request_set_tfm(skreq, enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ krb5enc_encrypt_done, req);
+ skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+/*
+ * Insert the hash into the checksum field in the destination buffer directly
+ * after the encrypted region.
+ */
+static void krb5enc_insert_checksum(struct aead_request *req, u8 *hash)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+
+ scatterwalk_map_and_copy(hash, req->dst,
+ req->assoclen + req->cryptlen,
+ crypto_aead_authsize(krb5enc), 1);
+}
+
+/*
+ * Upon completion of an asynchronous digest, transfer the hash to the checksum
+ * field.
+ */
+static void krb5enc_encrypt_ahash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+
+ if (err)
+ return krb5enc_request_complete(req, err);
+
+ krb5enc_insert_checksum(req, ahreq->result);
+
+ err = krb5enc_dispatch_encrypt(req, 0);
+ if (err != -EINPROGRESS)
+ aead_request_complete(req, err);
+}
+
+/*
+ * Start the digest of the plaintext for encryption. In theory, this could be
+ * run in parallel with the encryption, provided the src and dst buffers don't
+ * overlap.
+ */
+static int krb5enc_dispatch_encrypt_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct crypto_ahash *auth = ctx->auth;
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ krb5enc_encrypt_ahash_done, req);
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen);
+
+ err = crypto_ahash_digest(ahreq);
+ if (err)
+ return err;
+
+ krb5enc_insert_checksum(req, hash);
+ return 0;
+}
+
+/*
+ * Process an encryption operation. We can perform the cipher and the hash in
+ * parallel, provided the src and dst buffers are separate.
+ */
+static int krb5enc_encrypt(struct aead_request *req)
+{
+ int err;
+
+ err = krb5enc_dispatch_encrypt_hash(req);
+ if (err < 0)
+ return err;
+
+ return krb5enc_dispatch_encrypt(req, aead_request_flags(req));
+}
+
+static int krb5enc_verify_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ u8 *calc_hash = areq_ctx->tail;
+ u8 *msg_hash = areq_ctx->tail + authsize;
+
+ scatterwalk_map_and_copy(msg_hash, req->src, ahreq->nbytes, authsize, 0);
+
+ if (crypto_memneq(msg_hash, calc_hash, authsize))
+ return -EBADMSG;
+ return 0;
+}
+
+static void krb5enc_decrypt_hash_done(void *data, int err)
+{
+ struct aead_request *req = data;
+
+ if (err)
+ return krb5enc_request_complete(req, err);
+
+ err = krb5enc_verify_hash(req);
+ krb5enc_request_complete(req, err);
+}
+
+/*
+ * Dispatch the hashing of the plaintext after we've done the decryption.
+ */
+static int krb5enc_dispatch_decrypt_hash(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+ struct crypto_ahash *auth = ctx->auth;
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ u8 *hash = areq_ctx->tail;
+ int err;
+
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, req->dst, hash,
+ req->assoclen + req->cryptlen - authsize);
+ ahash_request_set_callback(ahreq, aead_request_flags(req),
+ krb5enc_decrypt_hash_done, req);
+
+ err = crypto_ahash_digest(ahreq);
+ if (err < 0)
+ return err;
+
+ return krb5enc_verify_hash(req);
+}
+
+/*
+ * Dispatch the decryption of the ciphertext.
+ */
+static int krb5enc_dispatch_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *krb5enc = crypto_aead_reqtfm(req);
+ struct aead_instance *inst = aead_alg_instance(krb5enc);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req);
+ struct skcipher_request *skreq = (void *)(areq_ctx->tail +
+ ictx->reqoff);
+ unsigned int authsize = crypto_aead_authsize(krb5enc);
+ struct scatterlist *src, *dst;
+
+ src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+ dst = src;
+
+ if (req->src != req->dst)
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+
+ skcipher_request_set_tfm(skreq, ctx->enc);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(skreq, src, dst,
+ req->cryptlen - authsize, req->iv);
+
+ return crypto_skcipher_decrypt(skreq);
+}
+
+static int krb5enc_decrypt(struct aead_request *req)
+{
+ int err;
+
+ err = krb5enc_dispatch_decrypt(req);
+ if (err < 0)
+ return err;
+
+ return krb5enc_dispatch_decrypt_hash(req);
+}
+
+static int krb5enc_init_tfm(struct crypto_aead *tfm)
+{
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ enc = crypto_spawn_skcipher(&ictx->enc);
+ err = PTR_ERR(enc);
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+ ctx->auth = auth;
+ ctx->enc = enc;
+
+ crypto_aead_set_reqsize(
+ tfm,
+ sizeof(struct krb5enc_request_ctx) +
+ ictx->reqoff + /* Space for two checksums */
+ umax(sizeof(struct ahash_request) + crypto_ahash_reqsize(auth),
+ sizeof(struct skcipher_request) + crypto_skcipher_reqsize(enc)));
+
+ return 0;
+
+err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+}
+
+static void krb5enc_exit_tfm(struct crypto_aead *tfm)
+{
+ struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_skcipher(ctx->enc);
+}
+
+static void krb5enc_free(struct aead_instance *inst)
+{
+ struct krb5enc_instance_ctx *ctx = aead_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->enc);
+ crypto_drop_ahash(&ctx->auth);
+ kfree(inst);
+}
+
+/*
+ * Create an instance of a template for a specific hash and cipher pair.
+ */
+static int krb5enc_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct krb5enc_instance_ctx *ictx;
+ struct skcipher_alg_common *enc;
+ struct hash_alg_common *auth;
+ struct aead_instance *inst;
+ struct crypto_alg *auth_base;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
+ if (err) {
+ pr_err("attr_type failed\n");
+ return err;
+ }
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ ictx = aead_instance_ctx(inst);
+
+ err = crypto_grab_ahash(&ictx->auth, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[1]), 0, mask);
+ if (err) {
+ pr_err("grab ahash failed\n");
+ goto err_free_inst;
+ }
+ auth = crypto_spawn_ahash_alg(&ictx->auth);
+ auth_base = &auth->base;
+
+ err = crypto_grab_skcipher(&ictx->enc, aead_crypto_instance(inst),
+ crypto_attr_alg_name(tb[2]), 0, mask);
+ if (err) {
+ pr_err("grab skcipher failed\n");
+ goto err_free_inst;
+ }
+ enc = crypto_spawn_skcipher_alg_common(&ictx->enc);
+
+ ictx->reqoff = 2 * auth->digestsize;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "krb5enc(%s,%s)", auth_base->cra_name,
+ enc->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "krb5enc(%s,%s)", auth_base->cra_driver_name,
+ enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto err_free_inst;
+
+ inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
+ auth_base->cra_priority;
+ inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = enc->base.cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct krb5enc_ctx);
+
+ inst->alg.ivsize = enc->ivsize;
+ inst->alg.chunksize = enc->chunksize;
+ inst->alg.maxauthsize = auth->digestsize;
+
+ inst->alg.init = krb5enc_init_tfm;
+ inst->alg.exit = krb5enc_exit_tfm;
+
+ inst->alg.setkey = krb5enc_setkey;
+ inst->alg.encrypt = krb5enc_encrypt;
+ inst->alg.decrypt = krb5enc_decrypt;
+
+ inst->free = krb5enc_free;
+
+ err = aead_register_instance(tmpl, inst);
+ if (err) {
+ pr_err("ref failed\n");
+ goto err_free_inst;
+ }
+
+ return 0;
+
+err_free_inst:
+ krb5enc_free(inst);
+ return err;
+}
+
+static struct crypto_template crypto_krb5enc_tmpl = {
+ .name = "krb5enc",
+ .create = krb5enc_create,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_krb5enc_module_init(void)
+{
+ return crypto_register_template(&crypto_krb5enc_tmpl);
+}
+
+static void __exit crypto_krb5enc_module_exit(void)
+{
+ crypto_unregister_template(&crypto_krb5enc_tmpl);
+}
+
+subsys_initcall(crypto_krb5enc_module_init);
+module_exit(crypto_krb5enc_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple AEAD wrapper for Kerberos 5 RFC3961");
+MODULE_ALIAS_CRYPTO("krb5enc");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index e216fbf2b786..391ae0f7641f 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -167,7 +167,7 @@ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wsrc;
+ const be128 *wsrc;
be128 *wdst;
wsrc = w.src.virt.addr;
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 0606f8862e78..82588607fb2e 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -16,7 +16,7 @@ struct lz4_ctx {
void *lz4_comp_mem;
};
-static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+static void *lz4_alloc_ctx(void)
{
void *ctx;
@@ -27,29 +27,11 @@ static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lz4_init(struct crypto_tfm *tfm)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
- if (IS_ERR(ctx->lz4_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lz4_free_ctx(void *ctx)
{
vfree(ctx);
}
-static void lz4_exit(struct crypto_tfm *tfm)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lz4_free_ctx(NULL, ctx->lz4_comp_mem);
-}
-
static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -70,14 +52,6 @@ static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
}
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
-}
-
static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -97,26 +71,6 @@ static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
}
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
-}
-
-static struct crypto_alg alg_lz4 = {
- .cra_name = "lz4",
- .cra_driver_name = "lz4-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lz4_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lz4_init,
- .cra_exit = lz4_exit,
- .cra_u = { .compress = {
- .coa_compress = lz4_compress_crypto,
- .coa_decompress = lz4_decompress_crypto } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lz4_alloc_ctx,
.free_ctx = lz4_free_ctx,
@@ -131,24 +85,11 @@ static struct scomp_alg scomp = {
static int __init lz4_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg_lz4);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg_lz4);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lz4_mod_fini(void)
{
- crypto_unregister_alg(&alg_lz4);
crypto_unregister_scomp(&scomp);
}
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index d7cc94aa2fcf..997e76c0183a 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -4,18 +4,17 @@
*
* Copyright (c) 2013 Chanho Min <chanho.min@lge.com>
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
-#include <crypto/internal/scompress.h>
struct lz4hc_ctx {
void *lz4hc_comp_mem;
};
-static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
+static void *lz4hc_alloc_ctx(void)
{
void *ctx;
@@ -26,29 +25,11 @@ static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lz4hc_init(struct crypto_tfm *tfm)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
- if (IS_ERR(ctx->lz4hc_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lz4hc_free_ctx(void *ctx)
{
vfree(ctx);
}
-static void lz4hc_exit(struct crypto_tfm *tfm)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
-}
-
static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -69,16 +50,6 @@ static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
}
-static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lz4hc_compress_crypto(src, slen, dst, dlen,
- ctx->lz4hc_comp_mem);
-}
-
static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -98,26 +69,6 @@ static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
}
-static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst,
- unsigned int *dlen)
-{
- return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
-}
-
-static struct crypto_alg alg_lz4hc = {
- .cra_name = "lz4hc",
- .cra_driver_name = "lz4hc-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lz4hc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lz4hc_init,
- .cra_exit = lz4hc_exit,
- .cra_u = { .compress = {
- .coa_compress = lz4hc_compress_crypto,
- .coa_decompress = lz4hc_decompress_crypto } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lz4hc_alloc_ctx,
.free_ctx = lz4hc_free_ctx,
@@ -132,24 +83,11 @@ static struct scomp_alg scomp = {
static int __init lz4hc_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg_lz4hc);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg_lz4hc);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lz4hc_mod_fini(void)
{
- crypto_unregister_alg(&alg_lz4hc);
crypto_unregister_scomp(&scomp);
}
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index 0631d975bfac..b1350ae278b8 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -3,19 +3,17 @@
* Cryptographic API.
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
#include <linux/lzo.h>
-#include <crypto/internal/scompress.h>
+#include <linux/module.h>
+#include <linux/slab.h>
struct lzorle_ctx {
void *lzorle_comp_mem;
};
-static void *lzorle_alloc_ctx(struct crypto_scomp *tfm)
+static void *lzorle_alloc_ctx(void)
{
void *ctx;
@@ -26,36 +24,18 @@ static void *lzorle_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lzorle_init(struct crypto_tfm *tfm)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL);
- if (IS_ERR(ctx->lzorle_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lzorle_free_ctx(void *ctx)
{
kvfree(ctx);
}
-static void lzorle_exit(struct crypto_tfm *tfm)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lzorle_free_ctx(NULL, ctx->lzorle_comp_mem);
-}
-
static int __lzorle_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
+ err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,14 +44,6 @@ static int __lzorle_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem);
-}
-
static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -94,12 +66,6 @@ static int __lzorle_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- return __lzorle_decompress(src, slen, dst, dlen);
-}
-
static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -107,19 +73,6 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lzorle_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "lzo-rle",
- .cra_driver_name = "lzo-rle-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lzorle_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lzorle_init,
- .cra_exit = lzorle_exit,
- .cra_u = { .compress = {
- .coa_compress = lzorle_compress,
- .coa_decompress = lzorle_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lzorle_alloc_ctx,
.free_ctx = lzorle_free_ctx,
@@ -134,24 +87,11 @@ static struct scomp_alg scomp = {
static int __init lzorle_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lzorle_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
diff --git a/crypto/lzo.c b/crypto/lzo.c
index ebda132dd22b..dfe5a07ca35f 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -3,19 +3,17 @@
* Cryptographic API.
*/
+#include <crypto/internal/scompress.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
#include <linux/lzo.h>
-#include <crypto/internal/scompress.h>
+#include <linux/module.h>
+#include <linux/slab.h>
struct lzo_ctx {
void *lzo_comp_mem;
};
-static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
+static void *lzo_alloc_ctx(void)
{
void *ctx;
@@ -26,36 +24,18 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int lzo_init(struct crypto_tfm *tfm)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
- if (IS_ERR(ctx->lzo_comp_mem))
- return -ENOMEM;
-
- return 0;
-}
-
-static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void lzo_free_ctx(void *ctx)
{
kvfree(ctx);
}
-static void lzo_exit(struct crypto_tfm *tfm)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lzo_free_ctx(NULL, ctx->lzo_comp_mem);
-}
-
static int __lzo_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
- err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
+ err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
@@ -64,14 +44,6 @@ static int __lzo_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
-}
-
static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -94,12 +66,6 @@ static int __lzo_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- return __lzo_decompress(src, slen, dst, dlen);
-}
-
static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -107,19 +73,6 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __lzo_decompress(src, slen, dst, dlen);
}
-static struct crypto_alg alg = {
- .cra_name = "lzo",
- .cra_driver_name = "lzo-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct lzo_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = lzo_init,
- .cra_exit = lzo_exit,
- .cra_u = { .compress = {
- .coa_compress = lzo_compress,
- .coa_decompress = lzo_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = lzo_alloc_ctx,
.free_ctx = lzo_free_ctx,
@@ -134,24 +87,11 @@ static struct scomp_alg scomp = {
static int __init lzo_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret) {
- crypto_unregister_alg(&alg);
- return ret;
- }
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit lzo_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index cbfb3ac14b3a..9d2e56d6744a 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -22,8 +22,8 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
@@ -45,17 +45,17 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
- memcpy(tmpbuf, src, bsize);
- crypto_xor(iv, src, bsize);
- crypto_cipher_encrypt_one(tfm, src, iv);
- crypto_xor_cpy(iv, tmpbuf, src, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_xor(iv, dst, bsize);
+ crypto_cipher_encrypt_one(tfm, dst, iv);
+ crypto_xor_cpy(iv, tmpbuf, dst, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
@@ -89,8 +89,8 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
@@ -112,17 +112,17 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
- memcpy(tmpbuf, src, bsize);
- crypto_cipher_decrypt_one(tfm, src, src);
- crypto_xor(src, iv, bsize);
- crypto_xor_cpy(iv, src, tmpbuf, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_cipher_decrypt_one(tfm, dst, dst);
+ crypto_xor(dst, iv, bsize);
+ crypto_xor_cpy(iv, dst, tmpbuf, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
diff --git a/crypto/proc.c b/crypto/proc.c
index 522b27d90d29..82f15b967e85 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -72,9 +72,6 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
- case CRYPTO_ALG_TYPE_COMPRESS:
- seq_printf(m, "type : compression\n");
- break;
default:
seq_printf(m, "type : unknown\n");
break;
diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c
index f68ffd338f48..d01ac75635e0 100644
--- a/crypto/rsassa-pkcs1.c
+++ b/crypto/rsassa-pkcs1.c
@@ -210,7 +210,7 @@ static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
memset(dst, 0, pad_len);
}
- return 0;
+ return ctx->key_size;
}
static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 16f6ba896fb6..8225801488d5 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -15,59 +15,103 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
-static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
+void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes)
{
- void *src = out ? buf : sgdata;
- void *dst = out ? sgdata : buf;
+ struct scatterlist *sg = walk->sg;
- memcpy(dst, src, nbytes);
+ nbytes += walk->offset - sg->offset;
+
+ while (nbytes > sg->length) {
+ nbytes -= sg->length;
+ sg = sg_next(sg);
+ }
+ walk->sg = sg;
+ walk->offset = sg->offset + nbytes;
}
+EXPORT_SYMBOL_GPL(scatterwalk_skip);
-void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes, int out)
+inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
+ unsigned int nbytes)
{
- for (;;) {
- unsigned int len_this_page = scatterwalk_pagelen(walk);
- u8 *vaddr;
-
- if (len_this_page > nbytes)
- len_this_page = nbytes;
-
- if (out != 2) {
- vaddr = scatterwalk_map(walk);
- memcpy_dir(buf, vaddr, len_this_page, out);
- scatterwalk_unmap(vaddr);
- }
+ do {
+ unsigned int to_copy;
+
+ to_copy = scatterwalk_next(walk, nbytes);
+ memcpy(buf, walk->addr, to_copy);
+ scatterwalk_done_src(walk, to_copy);
+ buf += to_copy;
+ nbytes -= to_copy;
+ } while (nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_from_scatterwalk);
- scatterwalk_advance(walk, len_this_page);
+inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
+ unsigned int nbytes)
+{
+ do {
+ unsigned int to_copy;
+
+ to_copy = scatterwalk_next(walk, nbytes);
+ memcpy(walk->addr, buf, to_copy);
+ scatterwalk_done_dst(walk, to_copy);
+ buf += to_copy;
+ nbytes -= to_copy;
+ } while (nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_to_scatterwalk);
- if (nbytes == len_this_page)
- break;
+void memcpy_from_sglist(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes)
+{
+ struct scatter_walk walk;
- buf += len_this_page;
- nbytes -= len_this_page;
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ return;
- scatterwalk_pagedone(walk, out & 1, 1);
- }
+ scatterwalk_start_at_pos(&walk, sg, start);
+ memcpy_from_scatterwalk(buf, &walk, nbytes);
}
-EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
+EXPORT_SYMBOL_GPL(memcpy_from_sglist);
-void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out)
+void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
+ const void *buf, unsigned int nbytes)
{
struct scatter_walk walk;
- struct scatterlist tmp[2];
- if (!nbytes)
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
return;
- sg = scatterwalk_ffwd(tmp, sg, start);
+ scatterwalk_start_at_pos(&walk, sg, start);
+ memcpy_to_scatterwalk(&walk, buf, nbytes);
+}
+EXPORT_SYMBOL_GPL(memcpy_to_sglist);
+
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct scatter_walk swalk;
+ struct scatter_walk dwalk;
+
+ if (unlikely(nbytes == 0)) /* in case sg == NULL */
+ return;
- scatterwalk_start(&walk, sg);
- scatterwalk_copychunks(buf, &walk, nbytes, out);
- scatterwalk_done(&walk, out, 0);
+ scatterwalk_start(&swalk, src);
+ scatterwalk_start(&dwalk, dst);
+
+ do {
+ unsigned int slen, dlen;
+ unsigned int len;
+
+ slen = scatterwalk_next(&swalk, nbytes);
+ dlen = scatterwalk_next(&dwalk, nbytes);
+ len = min(slen, dlen);
+ memcpy(dwalk.addr, swalk.addr, len);
+ scatterwalk_done_dst(&dwalk, len);
+ scatterwalk_done_src(&swalk, len);
+ nbytes -= len;
+ } while (nbytes);
}
-EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
+EXPORT_SYMBOL_GPL(memcpy_sglist);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 1cef6bb06a81..36934c78d127 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -12,8 +12,10 @@
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -23,9 +25,14 @@
#include "compress.h"
+#define SCOMP_SCRATCH_SIZE 65400
+
struct scomp_scratch {
spinlock_t lock;
- void *src;
+ union {
+ void *src;
+ unsigned long saddr;
+ };
void *dst;
};
@@ -66,7 +73,7 @@ static void crypto_scomp_free_scratches(void)
for_each_possible_cpu(i) {
scratch = per_cpu_ptr(&scomp_scratch, i);
- vfree(scratch->src);
+ free_page(scratch->saddr);
vfree(scratch->dst);
scratch->src = NULL;
scratch->dst = NULL;
@@ -79,14 +86,15 @@ static int crypto_scomp_alloc_scratches(void)
int i;
for_each_possible_cpu(i) {
+ struct page *page;
void *mem;
scratch = per_cpu_ptr(&scomp_scratch, i);
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
+ page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
+ if (!page)
goto error;
- scratch->src = mem;
+ scratch->src = page_address(page);
mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
if (!mem)
goto error;
@@ -98,13 +106,70 @@ error:
return -ENOMEM;
}
+static void scomp_free_streams(struct scomp_alg *alg)
+{
+ struct crypto_acomp_stream __percpu *stream = alg->stream;
+ int i;
+
+ alg->stream = NULL;
+ if (!stream)
+ return;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
+
+ if (IS_ERR_OR_NULL(ps->ctx))
+ break;
+
+ alg->free_ctx(ps->ctx);
+ }
+
+ free_percpu(stream);
+}
+
+static int scomp_alloc_streams(struct scomp_alg *alg)
+{
+ struct crypto_acomp_stream __percpu *stream;
+ int i;
+
+ stream = alloc_percpu(struct crypto_acomp_stream);
+ if (!stream)
+ return -ENOMEM;
+
+ alg->stream = stream;
+
+ for_each_possible_cpu(i) {
+ struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i);
+
+ ps->ctx = alg->alloc_ctx();
+ if (IS_ERR(ps->ctx)) {
+ scomp_free_streams(alg);
+ return PTR_ERR(ps->ctx);
+ }
+
+ spin_lock_init(&ps->lock);
+ }
+ return 0;
+}
+
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
+ struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm));
int ret = 0;
mutex_lock(&scomp_lock);
- if (!scomp_scratch_users++)
+ if (!alg->stream) {
+ ret = scomp_alloc_streams(alg);
+ if (ret)
+ goto unlock;
+ }
+ if (!scomp_scratch_users) {
ret = crypto_scomp_alloc_scratches();
+ if (ret)
+ goto unlock;
+ scomp_scratch_users++;
+ }
+unlock:
mutex_unlock(&scomp_lock);
return ret;
@@ -112,84 +177,144 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
+ struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
- void **ctx = acomp_request_ctx(req);
- struct scomp_scratch *scratch;
- void *src, *dst;
- unsigned int dlen;
+ struct crypto_acomp_stream *stream;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
+ struct page *spage, *dpage;
+ unsigned int n;
+ const u8 *src;
+ size_t soff;
+ size_t doff;
+ u8 *dst;
int ret;
- if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+ if (!req->src || !slen)
return -EINVAL;
- if (req->dst && !req->dlen)
+ if (!req->dst || !dlen)
return -EINVAL;
- if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
- req->dlen = SCOMP_SCRATCH_SIZE;
-
- dlen = req->dlen;
-
- scratch = raw_cpu_ptr(&scomp_scratch);
- spin_lock(&scratch->lock);
-
- if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
- src = page_to_virt(sg_page(req->src)) + req->src->offset;
- } else {
- scatterwalk_map_and_copy(scratch->src, req->src, 0,
- req->slen, 0);
+ if (acomp_request_src_isvirt(req))
+ src = req->svirt;
+ else {
src = scratch->src;
+ do {
+ if (acomp_request_src_isfolio(req)) {
+ spage = folio_page(req->sfolio, 0);
+ soff = req->soff;
+ } else if (slen <= req->src->length) {
+ spage = sg_page(req->src);
+ soff = req->src->offset;
+ } else
+ break;
+
+ spage = nth_page(spage, soff / PAGE_SIZE);
+ soff = offset_in_page(soff);
+
+ n = (slen - 1) / PAGE_SIZE;
+ n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
+ if (PageHighMem(nth_page(spage, n)) &&
+ size_add(soff, slen) > PAGE_SIZE)
+ break;
+ src = kmap_local_page(spage) + soff;
+ } while (0);
}
- if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
- dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
- else
+ if (acomp_request_dst_isvirt(req))
+ dst = req->dvirt;
+ else {
+ unsigned int max = SCOMP_SCRATCH_SIZE;
+
dst = scratch->dst;
+ do {
+ if (acomp_request_dst_isfolio(req)) {
+ dpage = folio_page(req->dfolio, 0);
+ doff = req->doff;
+ } else if (dlen <= req->dst->length) {
+ dpage = sg_page(req->dst);
+ doff = req->dst->offset;
+ } else
+ break;
+
+ dpage = nth_page(dpage, doff / PAGE_SIZE);
+ doff = offset_in_page(doff);
+
+ n = (dlen - 1) / PAGE_SIZE;
+ n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
+ if (PageHighMem(nth_page(dpage, n)) &&
+ size_add(doff, dlen) > PAGE_SIZE)
+ break;
+ dst = kmap_local_page(dpage) + doff;
+ max = dlen;
+ } while (0);
+ dlen = min(dlen, max);
+ }
+ spin_lock_bh(&scratch->lock);
+
+ if (src == scratch->src)
+ memcpy_from_sglist(scratch->src, req->src, 0, slen);
+
+ stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
+ spin_lock(&stream->lock);
if (dir)
- ret = crypto_scomp_compress(scomp, src, req->slen,
- dst, &req->dlen, *ctx);
+ ret = crypto_scomp_compress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
else
- ret = crypto_scomp_decompress(scomp, src, req->slen,
- dst, &req->dlen, *ctx);
- if (!ret) {
- if (!req->dst) {
- req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- goto out;
- }
- } else if (req->dlen > dlen) {
- ret = -ENOSPC;
- goto out;
- }
- if (dst == scratch->dst) {
- scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
- req->dlen, 1);
- } else {
- int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
- int i;
- struct page *dst_page = sg_page(req->dst);
-
- for (i = 0; i < nr_pages; i++)
- flush_dcache_page(dst_page + i);
+ ret = crypto_scomp_decompress(scomp, src, slen,
+ dst, &dlen, stream->ctx);
+
+ if (dst == scratch->dst)
+ memcpy_to_sglist(req->dst, 0, dst, dlen);
+
+ spin_unlock(&stream->lock);
+ spin_unlock_bh(&scratch->lock);
+
+ req->dlen = dlen;
+
+ if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) {
+ kunmap_local(dst);
+ dlen += doff;
+ for (;;) {
+ flush_dcache_page(dpage);
+ if (dlen <= PAGE_SIZE)
+ break;
+ dlen -= PAGE_SIZE;
+ dpage = nth_page(dpage, 1);
}
}
-out:
- spin_unlock(&scratch->lock);
+ if (!acomp_request_src_isvirt(req) && src != scratch->src)
+ kunmap_local(src);
+
return ret;
}
+static int scomp_acomp_chain(struct acomp_req *req, int dir)
+{
+ struct acomp_req *r2;
+ int err;
+
+ err = scomp_acomp_comp_decomp(req, dir);
+ req->base.err = err;
+
+ list_for_each_entry(r2, &req->base.list, base.list)
+ r2->base.err = scomp_acomp_comp_decomp(r2, dir);
+
+ return err;
+}
+
static int scomp_acomp_compress(struct acomp_req *req)
{
- return scomp_acomp_comp_decomp(req, 1);
+ return scomp_acomp_chain(req, 1);
}
static int scomp_acomp_decompress(struct acomp_req *req)
{
- return scomp_acomp_comp_decomp(req, 0);
+ return scomp_acomp_chain(req, 0);
}
static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
@@ -225,46 +350,19 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
- crt->dst_free = sgl_free;
- crt->reqsize = sizeof(void *);
return 0;
}
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
+static void crypto_scomp_destroy(struct crypto_alg *alg)
{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
- struct crypto_scomp *scomp = *tfm_ctx;
- void *ctx;
-
- ctx = crypto_scomp_alloc_ctx(scomp);
- if (IS_ERR(ctx)) {
- kfree(req);
- return NULL;
- }
-
- *req->__ctx = ctx;
-
- return req;
-}
-
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
-{
- struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
- struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
- struct crypto_scomp *scomp = *tfm_ctx;
- void *ctx = *req->__ctx;
-
- if (ctx)
- crypto_scomp_free_ctx(scomp, ctx);
+ scomp_free_streams(__crypto_scomp_alg(alg));
}
static const struct crypto_type crypto_scomp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_scomp_init_tfm,
+ .destroy = crypto_scomp_destroy,
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
@@ -277,12 +375,21 @@ static const struct crypto_type crypto_scomp_type = {
.tfmsize = offsetof(struct crypto_scomp, base),
};
-int crypto_register_scomp(struct scomp_alg *alg)
+static void scomp_prepare_alg(struct scomp_alg *alg)
{
struct crypto_alg *base = &alg->calg.base;
comp_prepare_alg(&alg->calg);
+ base->cra_flags |= CRYPTO_ALG_REQ_CHAIN;
+}
+
+int crypto_register_scomp(struct scomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ scomp_prepare_alg(alg);
+
base->cra_type = &crypto_scomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index a9eb2dcf2898..132075a905d9 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <net/netlink.h>
#include "skcipher.h"
@@ -38,26 +39,6 @@ static const struct crypto_type crypto_skcipher_type;
static int skcipher_walk_next(struct skcipher_walk *walk);
-static inline void skcipher_map_src(struct skcipher_walk *walk)
-{
- walk->src.virt.addr = scatterwalk_map(&walk->in);
-}
-
-static inline void skcipher_map_dst(struct skcipher_walk *walk)
-{
- walk->dst.virt.addr = scatterwalk_map(&walk->out);
-}
-
-static inline void skcipher_unmap_src(struct skcipher_walk *walk)
-{
- scatterwalk_unmap(walk->src.virt.addr);
-}
-
-static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
-{
- scatterwalk_unmap(walk->dst.virt.addr);
-}
-
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
{
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
@@ -69,14 +50,6 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
return container_of(alg, struct skcipher_alg, base);
}
-static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
-{
- u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
-
- scatterwalk_copychunks(addr, &walk->out, bsize, 1);
- return 0;
-}
-
/**
* skcipher_walk_done() - finish one step of a skcipher_walk
* @walk: the skcipher_walk
@@ -111,15 +84,13 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res)
if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
-unmap_src:
- skcipher_unmap_src(walk);
+ scatterwalk_advance(&walk->in, n);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
- skcipher_unmap_dst(walk);
- goto unmap_src;
+ scatterwalk_done_src(&walk->in, n);
} else if (walk->flags & SKCIPHER_WALK_COPY) {
- skcipher_map_dst(walk);
- memcpy(walk->dst.virt.addr, walk->page, n);
- skcipher_unmap_dst(walk);
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_map(&walk->out);
+ memcpy(walk->out.addr, walk->page, n);
} else { /* SKCIPHER_WALK_SLOW */
if (res > 0) {
/*
@@ -131,20 +102,19 @@ unmap_src:
res = -EINVAL;
total = 0;
} else
- n = skcipher_done_slow(walk, n);
+ memcpy_to_scatterwalk(&walk->out, walk->out.addr, n);
+ goto dst_done;
}
+ scatterwalk_done_dst(&walk->out, n);
+dst_done:
+
if (res > 0)
res = 0;
walk->total = total;
walk->nbytes = 0;
- scatterwalk_advance(&walk->in, n);
- scatterwalk_advance(&walk->out, n);
- scatterwalk_done(&walk->in, 0, total);
- scatterwalk_done(&walk->out, 1, total);
-
if (total) {
if (walk->flags & SKCIPHER_WALK_SLEEP)
cond_resched();
@@ -174,7 +144,7 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
{
unsigned alignmask = walk->alignmask;
unsigned n;
- u8 *buffer;
+ void *buffer;
if (!walk->buffer)
walk->buffer = walk->page;
@@ -188,10 +158,11 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
return skcipher_walk_done(walk, -ENOMEM);
walk->buffer = buffer;
}
- walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
- walk->src.virt.addr = walk->dst.virt.addr;
- scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
+ buffer = PTR_ALIGN(buffer, alignmask + 1);
+ memcpy_from_scatterwalk(buffer, &walk->in, bsize);
+ walk->out.__addr = buffer;
+ walk->in.__addr = walk->out.addr;
walk->nbytes = bsize;
walk->flags |= SKCIPHER_WALK_SLOW;
@@ -201,14 +172,18 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
static int skcipher_next_copy(struct skcipher_walk *walk)
{
- u8 *tmp = walk->page;
+ void *tmp = walk->page;
- skcipher_map_src(walk);
- memcpy(tmp, walk->src.virt.addr, walk->nbytes);
- skcipher_unmap_src(walk);
+ scatterwalk_map(&walk->in);
+ memcpy(tmp, walk->in.addr, walk->nbytes);
+ scatterwalk_unmap(&walk->in);
+ /*
+ * walk->in is advanced later when the number of bytes actually
+ * processed (which might be less than walk->nbytes) is known.
+ */
- walk->src.virt.addr = tmp;
- walk->dst.virt.addr = tmp;
+ walk->in.__addr = tmp;
+ walk->out.__addr = tmp;
return 0;
}
@@ -218,15 +193,15 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
diff = offset_in_page(walk->in.offset) -
offset_in_page(walk->out.offset);
- diff |= (u8 *)scatterwalk_page(&walk->in) -
- (u8 *)scatterwalk_page(&walk->out);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
- skcipher_map_src(walk);
- walk->dst.virt.addr = walk->src.virt.addr;
+ scatterwalk_map(&walk->out);
+ walk->in.__addr = walk->out.__addr;
if (diff) {
walk->flags |= SKCIPHER_WALK_DIFF;
- skcipher_map_dst(walk);
+ scatterwalk_map(&walk->in);
}
return 0;
@@ -305,14 +280,16 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
return skcipher_walk_next(walk);
}
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req, bool atomic)
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req, bool atomic)
{
- const struct skcipher_alg *alg =
- crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_alg *alg;
might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ alg = crypto_skcipher_alg(tfm);
+
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
@@ -328,14 +305,9 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
- /*
- * Accessing 'alg' directly generates better code than using the
- * crypto_skcipher_blocksize() and similar helper functions here, as it
- * prevents the algorithm pointer from being repeatedly reloaded.
- */
- walk->blocksize = alg->base.cra_blocksize;
- walk->ivsize = alg->co.ivsize;
- walk->alignmask = alg->base.cra_alignmask;
+ walk->blocksize = crypto_skcipher_blocksize(tfm);
+ walk->ivsize = crypto_skcipher_ivsize(tfm);
+ walk->alignmask = crypto_skcipher_alignmask(tfm);
if (alg->co.base.cra_type != &crypto_skcipher_type)
walk->stride = alg->co.chunksize;
@@ -346,10 +318,11 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
-static int skcipher_walk_aead_common(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
- const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
walk->nbytes = 0;
walk->iv = req->iv;
@@ -362,30 +335,20 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
if (unlikely(!walk->total))
return 0;
- scatterwalk_start(&walk->in, req->src);
- scatterwalk_start(&walk->out, req->dst);
-
- scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
- scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
-
- scatterwalk_done(&walk->in, 0, walk->total);
- scatterwalk_done(&walk->out, 0, walk->total);
+ scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen);
+ scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen);
- /*
- * Accessing 'alg' directly generates better code than using the
- * crypto_aead_blocksize() and similar helper functions here, as it
- * prevents the algorithm pointer from being repeatedly reloaded.
- */
- walk->blocksize = alg->base.cra_blocksize;
- walk->stride = alg->chunksize;
- walk->ivsize = alg->ivsize;
- walk->alignmask = alg->base.cra_alignmask;
+ walk->blocksize = crypto_aead_blocksize(tfm);
+ walk->stride = crypto_aead_chunksize(tfm);
+ walk->ivsize = crypto_aead_ivsize(tfm);
+ walk->alignmask = crypto_aead_alignmask(tfm);
return skcipher_walk_first(walk);
}
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
walk->total = req->cryptlen;
@@ -393,8 +356,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic)
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -612,7 +576,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
- alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
+ str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC));
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
@@ -681,6 +645,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
/* Only sync algorithms allowed. */
mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
+ type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE);
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e1a74cb2cfbe..96f4a66be14c 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -716,6 +716,207 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
return crypto_wait_req(ret, wait);
}
+struct test_mb_ahash_data {
+ struct scatterlist sg[XBUFSIZE];
+ char result[64];
+ struct ahash_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+};
+
+static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb,
+ int *rc)
+{
+ int i, err;
+
+ /* Fire up a bunch of concurrent requests */
+ err = crypto_ahash_digest(data[0].req);
+
+ /* Wait for all requests to finish */
+ err = crypto_wait_req(err, &data[0].wait);
+ if (num_mb < 2)
+ return err;
+
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = ahash_request_err(data[i].req);
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
+ int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret = 0;
+ int *rc;
+
+ rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_ahash_op(data, num_mb, rc);
+ if (ret)
+ goto out;
+ }
+
+ pr_cont("%d operations in %d seconds (%llu bytes)\n",
+ bcount * num_mb, secs, (u64)bcount * blen * num_mb);
+
+out:
+ kfree(rc);
+ return ret;
+}
+
+static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
+ u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+ int *rc;
+
+ rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_ahash_op(data, num_mb, rc);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_ahash_op(data, num_mb, rc);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+out:
+ kfree(rc);
+ return ret;
+}
+
+static void test_mb_ahash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed, u32 num_mb)
+{
+ struct test_mb_ahash_data *data;
+ struct crypto_ahash *tfm;
+ unsigned int i, j, k;
+ int ret;
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ tfm = crypto_alloc_ahash(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto free_data;
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ if (testmgr_alloc_buf(data[i].xbuf))
+ goto out;
+
+ crypto_init_wait(&data[i].wait);
+
+ data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: hash: Failed to allocate request for %s\n",
+ algo);
+ goto out;
+ }
+
+
+ if (i) {
+ ahash_request_set_callback(data[i].req, 0, NULL, NULL);
+ ahash_request_chain(data[i].req, data[0].req);
+ } else
+ ahash_request_set_callback(data[0].req, 0,
+ crypto_req_done,
+ &data[0].wait);
+
+ sg_init_table(data[i].sg, XBUFSIZE);
+ for (j = 0; j < XBUFSIZE; j++) {
+ sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
+ memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
+ }
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
+ get_driver_name(crypto_ahash, tfm));
+
+ for (i = 0; speed[i].blen != 0; i++) {
+ /* For some reason this only tests digests. */
+ if (speed[i].blen != speed[i].plen)
+ continue;
+
+ if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for tvmem (%lu)\n",
+ speed[i].blen, XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ if (klen)
+ crypto_ahash_setkey(tfm, tvmem[0], klen);
+
+ for (k = 0; k < num_mb; k++)
+ ahash_request_set_crypt(data[k].req, data[k].sg,
+ data[k].result, speed[i].blen);
+
+ pr_info("test%3u "
+ "(%5u byte blocks,%5u bytes per update,%4u updates): ",
+ i, speed[i].blen, speed[i].plen,
+ speed[i].blen / speed[i].plen);
+
+ if (secs) {
+ ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
+ num_mb);
+ cond_resched();
+ } else {
+ ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
+ }
+
+
+ if (ret) {
+ pr_err("At least one hashing failed ret=%d\n", ret);
+ break;
+ }
+ }
+
+out:
+ ahash_request_free(data[0].req);
+
+ for (k = 0; k < num_mb; ++k)
+ testmgr_free_buf(data[k].xbuf);
+
+ crypto_free_ahash(tfm);
+
+free_data:
+ kfree(data);
+}
+
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
@@ -1654,10 +1855,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("ghash"));
break;
- case 47:
- ret = min(ret, tcrypt_test("crct10dif"));
- break;
-
case 48:
ret = min(ret, tcrypt_test("sha3-224"));
break;
@@ -2272,10 +2469,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 320:
- test_hash_speed("crct10dif", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
@@ -2391,6 +2584,36 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_ahash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
+ case 450:
+ test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 451:
+ test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 452:
+ test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 453:
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
+ num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 454:
+ test_mb_ahash_speed("streebog256", sec,
+ generic_hash_speed_template, num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
+ case 455:
+ test_mb_ahash_speed("streebog512", sec,
+ generic_hash_speed_template, num_mb);
+ if (mode > 400 && mode < 500) break;
+ fallthrough;
case 499:
break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e61490ba4095..82977ea25db3 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3320,112 +3320,6 @@ out:
return err;
}
-static int test_comp(struct crypto_comp *tfm,
- const struct comp_testvec *ctemplate,
- const struct comp_testvec *dtemplate,
- int ctcount, int dtcount)
-{
- const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
- char *output, *decomp_output;
- unsigned int i;
- int ret;
-
- output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!output)
- return -ENOMEM;
-
- decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!decomp_output) {
- kfree(output);
- return -ENOMEM;
- }
-
- for (i = 0; i < ctcount; i++) {
- int ilen;
- unsigned int dlen = COMP_BUF_SIZE;
-
- memset(output, 0, COMP_BUF_SIZE);
- memset(decomp_output, 0, COMP_BUF_SIZE);
-
- ilen = ctemplate[i].inlen;
- ret = crypto_comp_compress(tfm, ctemplate[i].input,
- ilen, output, &dlen);
- if (ret) {
- printk(KERN_ERR "alg: comp: compression failed "
- "on test %d for %s: ret=%d\n", i + 1, algo,
- -ret);
- goto out;
- }
-
- ilen = dlen;
- dlen = COMP_BUF_SIZE;
- ret = crypto_comp_decompress(tfm, output,
- ilen, decomp_output, &dlen);
- if (ret) {
- pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n",
- i + 1, algo, -ret);
- goto out;
- }
-
- if (dlen != ctemplate[i].inlen) {
- printk(KERN_ERR "alg: comp: Compression test %d "
- "failed for %s: output len = %d\n", i + 1, algo,
- dlen);
- ret = -EINVAL;
- goto out;
- }
-
- if (memcmp(decomp_output, ctemplate[i].input,
- ctemplate[i].inlen)) {
- pr_err("alg: comp: compression failed: output differs: on test %d for %s\n",
- i + 1, algo);
- hexdump(decomp_output, dlen);
- ret = -EINVAL;
- goto out;
- }
- }
-
- for (i = 0; i < dtcount; i++) {
- int ilen;
- unsigned int dlen = COMP_BUF_SIZE;
-
- memset(decomp_output, 0, COMP_BUF_SIZE);
-
- ilen = dtemplate[i].inlen;
- ret = crypto_comp_decompress(tfm, dtemplate[i].input,
- ilen, decomp_output, &dlen);
- if (ret) {
- printk(KERN_ERR "alg: comp: decompression failed "
- "on test %d for %s: ret=%d\n", i + 1, algo,
- -ret);
- goto out;
- }
-
- if (dlen != dtemplate[i].outlen) {
- printk(KERN_ERR "alg: comp: Decompression test %d "
- "failed for %s: output len = %d\n", i + 1, algo,
- dlen);
- ret = -EINVAL;
- goto out;
- }
-
- if (memcmp(decomp_output, dtemplate[i].output, dlen)) {
- printk(KERN_ERR "alg: comp: Decompression test %d "
- "failed for %s\n", i + 1, algo);
- hexdump(decomp_output, dlen);
- ret = -EINVAL;
- goto out;
- }
- }
-
- ret = 0;
-
-out:
- kfree(decomp_output);
- kfree(output);
- return ret;
-}
-
static int test_acomp(struct crypto_acomp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
@@ -3522,21 +3416,6 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- crypto_init_wait(&wait);
- sg_init_one(&src, input_vec, ilen);
- acomp_request_set_params(req, &src, NULL, ilen, 0);
-
- ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
- if (ret) {
- pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n",
- i + 1, algo, -ret);
- kfree(input_vec);
- acomp_request_free(req);
- goto out;
- }
-#endif
-
kfree(input_vec);
acomp_request_free(req);
}
@@ -3598,20 +3477,6 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
- crypto_init_wait(&wait);
- acomp_request_set_params(req, &src, NULL, ilen, 0);
-
- ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
- if (ret) {
- pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n",
- i + 1, algo, -ret);
- kfree(input_vec);
- acomp_request_free(req);
- goto out;
- }
-#endif
-
kfree(input_vec);
acomp_request_free(req);
}
@@ -3713,42 +3578,22 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
- struct crypto_comp *comp;
struct crypto_acomp *acomp;
int err;
- u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
-
- if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
- acomp = crypto_alloc_acomp(driver, type, mask);
- if (IS_ERR(acomp)) {
- if (PTR_ERR(acomp) == -ENOENT)
- return 0;
- pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
- driver, PTR_ERR(acomp));
- return PTR_ERR(acomp);
- }
- err = test_acomp(acomp, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
- crypto_free_acomp(acomp);
- } else {
- comp = crypto_alloc_comp(driver, type, mask);
- if (IS_ERR(comp)) {
- if (PTR_ERR(comp) == -ENOENT)
- return 0;
- pr_err("alg: comp: Failed to load transform for %s: %ld\n",
- driver, PTR_ERR(comp));
- return PTR_ERR(comp);
- }
-
- err = test_comp(comp, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
- crypto_free_comp(comp);
- }
+ acomp = crypto_alloc_acomp(driver, type, mask);
+ if (IS_ERR(acomp)) {
+ if (PTR_ERR(acomp) == -ENOENT)
+ return 0;
+ pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(acomp));
+ return PTR_ERR(acomp);
+ }
+ err = test_acomp(acomp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
+ crypto_free_acomp(acomp);
return err;
}
@@ -4328,7 +4173,7 @@ static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs)
if (vecs->public_key_vec)
return 0;
- sig_size = crypto_sig_keysize(tfm);
+ sig_size = crypto_sig_maxsize(tfm);
if (sig_size < vecs->c_size) {
pr_err("alg: sig: invalid maxsize %u\n", sig_size);
return -EINVAL;
@@ -4340,13 +4185,14 @@ static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs)
/* Run asymmetric signature generation */
err = crypto_sig_sign(tfm, vecs->m, vecs->m_size, sig, sig_size);
- if (err) {
+ if (err < 0) {
pr_err("alg: sig: sign test failed: err %d\n", err);
return err;
}
/* Verify that generated signature equals cooked signature */
- if (memcmp(sig, vecs->c, vecs->c_size) ||
+ if (err != vecs->c_size ||
+ memcmp(sig, vecs->c, vecs->c_size) ||
memchr_inv(sig + vecs->c_size, 0, sig_size - vecs->c_size)) {
pr_err("alg: sig: sign test failed: invalid output\n");
hexdump(sig, sig_size);
@@ -4505,6 +4351,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "authenc(hmac(sha256),cts(cbc(aes)))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(krb5_test_aes128_cts_hmac_sha256_128)
+ }
+ }, {
.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
@@ -4525,6 +4377,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ .alg = "authenc(hmac(sha384),cts(cbc(aes)))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(krb5_test_aes256_cts_hmac_sha384_192)
+ }
+ }, {
.alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
@@ -4743,9 +4601,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(sm4_cmac128_tv_template)
}
}, {
- .alg = "compress_null",
- .test = alg_test_null,
- }, {
.alg = "crc32",
.test = alg_test_hash,
.fips_allowed = 1,
@@ -4760,20 +4615,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(crc32c_tv_template)
}
}, {
- .alg = "crc64-rocksoft",
- .test = alg_test_hash,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crc64_rocksoft_tv_template)
- }
- }, {
- .alg = "crct10dif",
- .test = alg_test_hash,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crct10dif_tv_template)
- }
- }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -5398,6 +5239,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
.test = alg_test_null,
}, {
+ .alg = "krb5enc(cmac(camellia),cts(cbc(camellia)))",
+ .test = alg_test_aead,
+ .suite.aead = __VECS(krb5_test_camellia_cts_cmac)
+ }, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
.test = alg_test_skcipher,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d754ab997186..afc10af59b0a 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -6017,309 +6017,6 @@ static const struct hash_testvec rmd160_tv_template[] = {
}
};
-static const u8 zeroes[4096] = { [0 ... 4095] = 0 };
-static const u8 ones[4096] = { [0 ... 4095] = 0xff };
-
-static const struct hash_testvec crc64_rocksoft_tv_template[] = {
- {
- .plaintext = zeroes,
- .psize = 4096,
- .digest = "\x4e\xb6\x22\xeb\x67\xd3\x82\x64",
- }, {
- .plaintext = ones,
- .psize = 4096,
- .digest = "\xac\xa3\xec\x02\x73\xba\xdd\xc0",
- }
-};
-
-static const struct hash_testvec crct10dif_tv_template[] = {
- {
- .plaintext = "abc",
- .psize = 3,
- .digest = (u8 *)(u16 []){ 0x443b },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 79,
- .digest = (u8 *)(u16 []){ 0x4b70 },
- }, {
- .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
- "ddddddddddddd",
- .psize = 56,
- .digest = (u8 *)(u16 []){ 0x9ce3 },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 319,
- .digest = (u8 *)(u16 []){ 0x44c6 },
- }, {
- .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
- "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
- "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
- "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
- "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
- "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
- "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
- "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
- "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
- "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
- "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
- "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
- "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
- "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
- "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
- "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
- "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
- "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
- "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
- "\x58\xef\x63\xfa\x91\x05\x9c\x33"
- "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
- "\x19\xb0\x47\xde\x52\xe9\x80\x17"
- "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
- "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
- "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
- "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
- "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
- "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
- "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
- "\x86\x1d\x91\x28\xbf\x33\xca\x61"
- "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
- "\x47\xde\x75\x0c\x80\x17\xae\x22"
- "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
- "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
- "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
- "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
- "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
- "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
- "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
- "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
- "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
- "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
- "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
- "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
- "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
- "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
- "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
- "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
- "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
- "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
- "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
- "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
- "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
- "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
- "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
- "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
- "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
- "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
- "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
- "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
- "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
- "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
- "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
- "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
- "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
- "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
- "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
- "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
- "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
- "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
- "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
- "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
- "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
- "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
- "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
- "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
- "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
- "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
- "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
- "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
- "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
- "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
- "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
- "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
- "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
- "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
- "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
- "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
- "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
- "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
- "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
- "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
- "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
- "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
- "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
- "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
- "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
- "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
- "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
- "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
- "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
- "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
- "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
- "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
- "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
- "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
- "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
- "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
- "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
- "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
- "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
- "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
- "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
- "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
- "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
- "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
- "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
- "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
- "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
- "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
- "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
- "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
- "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
- "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
- "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
- "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
- "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
- "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
- "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
- "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
- "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
- "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
- "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
- "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
- "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
- "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
- "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
- "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
- "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
- "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
- "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
- "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
- "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
- "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
- "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
- "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
- "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
- "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
- "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
- "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
- "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
- "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
- "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
- "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
- "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
- "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
- "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
- "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
- "\x47\xde\x52\xe9\x80\x17\x8b\x22"
- "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
- "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
- "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
- "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
- "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
- "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
- "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
- "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
- "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
- "\x75\x0c\x80\x17\xae\x22\xb9\x50"
- "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
- "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
- "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
- "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
- "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
- "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
- "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
- "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
- "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
- "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
- "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
- "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
- "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
- "\x48\xdf\x53\xea\x81\x18\x8c\x23"
- "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
- "\x09\xa0\x37\xce\x42\xd9\x70\x07"
- "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
- "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
- "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
- "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
- "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
- "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
- "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
- "\x76\x0d\x81\x18\xaf\x23\xba\x51"
- "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
- "\x37\xce\x65\xfc\x70\x07\x9e\x12"
- "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
- "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
- "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
- "\xff\x73\x0a\xa1\x15\xac\x43\xda"
- "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
- "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
- "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
- "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
- "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
- "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
- "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
- "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
- "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
- "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
- "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
- "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
- "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
- "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
- "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
- "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
- "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
- "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
- "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
- "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
- "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
- "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
- "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
- "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
- "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
- "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
- "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
- "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
- "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
- "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
- "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
- "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
- "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
- "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
- "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
- "\xef\x86\x1d\x91\x28\xbf\x33\xca"
- "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
- "\xd3\x47\xde\x75\x0c\x80\x17\xae"
- "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
- "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
- "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
- "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
- "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
- "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
- "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
- "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
- "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
- "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
- "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
- "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
- "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
- "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
- "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
- "\x67\xfe\x95\x09\xa0\x37\xce\x42"
- "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
- "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
- "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
- .psize = 2048,
- .digest = (u8 *)(u16 []){ 0x23ca },
- }
-};
-
/*
* Streebog test vectors from RFC 6986 and GOST R 34.11-2012
*/
@@ -38894,4 +38591,355 @@ static const struct cipher_testvec aes_hctr2_tv_template[] = {
};
+#ifdef __LITTLE_ENDIAN
+#define AUTHENC_KEY_HEADER(enckeylen) \
+ "\x08\x00\x01\x00" /* LE rtattr */ \
+ enckeylen /* crypto_authenc_key_param */
+#else
+#define AUTHENC_KEY_HEADER(enckeylen) \
+ "\x00\x08\x00\x01" /* BE rtattr */ \
+ enckeylen /* crypto_authenc_key_param */
+#endif
+
+static const struct aead_testvec krb5_test_aes128_cts_hmac_sha256_128[] = {
+ /* rfc8009 Appendix A */
+ {
+ /* "enc no plain" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x7E\x58\x95\xEA\xF2\x67\x24\x35\xBA\xD8\x17\xF5\x45\xA3\x71\x48" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\xEF\x85\xFB\x89\x0B\xB8\x47\x2F\x4D\xAB\x20\x39\x4D\xCA\x78\x1D"
+ "\xAD\x87\x7E\xDA\x39\xD5\x0C\x87\x0C\x0D\x5A\x0A\x8E\x48\xC7\x18",
+ .clen = 16 + 0 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain<block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x7B\xCA\x28\x5E\x2F\xD4\x13\x0F\xB5\x5B\x1A\x5C\x83\xBC\x5B\x24" // Confounder
+ "\x00\x01\x02\x03\x04\x05", // Plain
+ .plen = 16 + 6,
+ .ctext =
+ "\x84\xD7\xF3\x07\x54\xED\x98\x7B\xAB\x0B\xF3\x50\x6B\xEB\x09\xCF"
+ "\xB5\x54\x02\xCE\xF7\xE6\x87\x7C\xE9\x9E\x24\x7E\x52\xD1\x6E\xD4"
+ "\x42\x1D\xFD\xF8\x97\x6C",
+ .clen = 16 + 6 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain==block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\x56\xAB\x21\x71\x3F\xF6\x2C\x0A\x14\x57\x20\x0F\x6F\xA9\x94\x8F" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain
+ .plen = 16 + 16,
+ .ctext =
+ "\x35\x17\xD6\x40\xF5\x0D\xDC\x8A\xD3\x62\x87\x22\xB3\x56\x9D\x2A"
+ "\xE0\x74\x93\xFA\x82\x63\x25\x40\x80\xEA\x65\xC1\x00\x8E\x8F\xC2"
+ "\x95\xFB\x48\x52\xE7\xD8\x3E\x1E\x7C\x48\xC3\x7E\xEB\xE6\xB0\xD3",
+ .clen = 16 + 16 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain>block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki
+ "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke
+ .klen = 4 + 4 + 16 + 16,
+ .ptext =
+ "\xA7\xA4\xE2\x9A\x47\x28\xCE\x10\x66\x4F\xB6\x4E\x49\xAD\x3F\xAC" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+ "\x10\x11\x12\x13\x14", // Plain
+ .plen = 16 + 21,
+ .ctext =
+ "\x72\x0F\x73\xB1\x8D\x98\x59\xCD\x6C\xCB\x43\x46\x11\x5C\xD3\x36"
+ "\xC7\x0F\x58\xED\xC0\xC4\x43\x7C\x55\x73\x54\x4C\x31\xC8\x13\xBC"
+ "\xE1\xE6\xD0\x72\xC1\x86\xB3\x9A\x41\x3C\x2F\x92\xCA\x9B\x83\x34"
+ "\xA2\x87\xFF\xCB\xFC",
+ .clen = 16 + 21 + 16,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ },
+};
+
+static const struct aead_testvec krb5_test_aes256_cts_hmac_sha384_192[] = {
+ /* rfc8009 Appendix A */
+ {
+ /* "enc no plain" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\xF7\x64\xE9\xFA\x15\xC2\x76\x47\x8B\x2C\x7D\x0C\x4E\x5F\x58\xE4" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\x41\xF5\x3F\xA5\xBF\xE7\x02\x6D\x91\xFA\xF9\xBE\x95\x91\x95\xA0"
+ "\x58\x70\x72\x73\xA9\x6A\x40\xF0\xA0\x19\x60\x62\x1A\xC6\x12\x74"
+ "\x8B\x9B\xBF\xBE\x7E\xB4\xCE\x3C",
+ .clen = 16 + 0 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain<block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\xB8\x0D\x32\x51\xC1\xF6\x47\x14\x94\x25\x6F\xFE\x71\x2D\x0B\x9A" // Confounder
+ "\x00\x01\x02\x03\x04\x05", // Plain
+ .plen = 16 + 6,
+ .ctext =
+ "\x4E\xD7\xB3\x7C\x2B\xCA\xC8\xF7\x4F\x23\xC1\xCF\x07\xE6\x2B\xC7"
+ "\xB7\x5F\xB3\xF6\x37\xB9\xF5\x59\xC7\xF6\x64\xF6\x9E\xAB\x7B\x60"
+ "\x92\x23\x75\x26\xEA\x0D\x1F\x61\xCB\x20\xD6\x9D\x10\xF2",
+ .clen = 16 + 6 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain==block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\x53\xBF\x8A\x0D\x10\x52\x65\xD4\xE2\x76\x42\x86\x24\xCE\x5E\x63" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain
+ .plen = 16 + 16,
+ .ctext =
+ "\xBC\x47\xFF\xEC\x79\x98\xEB\x91\xE8\x11\x5C\xF8\xD1\x9D\xAC\x4B"
+ "\xBB\xE2\xE1\x63\xE8\x7D\xD3\x7F\x49\xBE\xCA\x92\x02\x77\x64\xF6"
+ "\x8C\xF5\x1F\x14\xD7\x98\xC2\x27\x3F\x35\xDF\x57\x4D\x1F\x93\x2E"
+ "\x40\xC4\xFF\x25\x5B\x36\xA2\x66",
+ .clen = 16 + 16 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ }, {
+ /* "enc plain>block" */
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6"
+ "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki
+ "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7"
+ "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke
+ .klen = 4 + 4 + 32 + 24,
+ .ptext =
+ "\x76\x3E\x65\x36\x7E\x86\x4F\x02\xF5\x51\x53\xC7\xE3\xB5\x8A\xF1" // Confounder
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
+ "\x10\x11\x12\x13\x14", // Plain
+ .plen = 16 + 21,
+ .ctext =
+ "\x40\x01\x3E\x2D\xF5\x8E\x87\x51\x95\x7D\x28\x78\xBC\xD2\xD6\xFE"
+ "\x10\x1C\xCF\xD5\x56\xCB\x1E\xAE\x79\xDB\x3C\x3E\xE8\x64\x29\xF2"
+ "\xB2\xA6\x02\xAC\x86\xFE\xF6\xEC\xB6\x47\xD6\x29\x5F\xAE\x07\x7A"
+ "\x1F\xEB\x51\x75\x08\xD2\xC1\x6B\x41\x92\xE0\x1F\x62",
+ .clen = 16 + 21 + 24,
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV
+ .alen = 16,
+ },
+};
+
+static const struct aead_testvec krb5_test_camellia_cts_cmac[] = {
+ /* rfc6803 sec 10 */
+ {
+ // "enc no plain"
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x45\xeb\x66\xe2\xef\xa8\x77\x8f\x7d\xf1\x46\x54\x53\x05\x98\x06" // Ki
+ "\xe9\x9b\x82\xb3\x6c\x4a\xe8\xea\x19\xe9\x5d\xfa\x9e\xde\x88\x2c", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xB6\x98\x22\xA1\x9A\x6B\x09\xC0\xEB\xC8\x55\x7D\x1F\x1B\x6C\x0A" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\xC4\x66\xF1\x87\x10\x69\x92\x1E\xDB\x7C\x6F\xDE\x24\x4A\x52\xDB"
+ "\x0B\xA1\x0E\xDC\x19\x7B\xDB\x80\x06\x65\x8C\xA3\xCC\xCE\x6E\xB8",
+ .clen = 16 + 0 + 16,
+ }, {
+ // "enc 1 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x13\x5f\xe7\x11\x6f\x53\xc2\xaa\x36\x12\xb7\xea\xe0\xf2\x84\xaa" // Ki
+ "\xa7\xed\xcd\x53\x97\xea\x6d\x12\xb0\xaf\xf4\xcb\x8d\xaa\x57\xad", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\x6F\x2F\xC3\xC2\xA1\x66\xFD\x88\x98\x96\x7A\x83\xDE\x95\x96\xD9" // Confounder
+ "1", // Plain
+ .plen = 16 + 1,
+ .ctext =
+ "\x84\x2D\x21\xFD\x95\x03\x11\xC0\xDD\x46\x4A\x3F\x4B\xE8\xD6\xDA"
+ "\x88\xA5\x6D\x55\x9C\x9B\x47\xD3\xF9\xA8\x50\x67\xAF\x66\x15\x59"
+ "\xB8",
+ .clen = 16 + 1 + 16,
+ }, {
+ // "enc 9 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x10\x2c\x34\xd0\x75\x74\x9f\x77\x8a\x15\xca\xd1\xe9\x7d\xa9\x86" // Ki
+ "\xdd\xe4\x2e\xca\x7c\xd9\x86\x3f\xc3\xce\x89\xcb\xc9\x43\x62\xd7", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xA5\xB4\xA7\x1E\x07\x7A\xEE\xF9\x3C\x87\x63\xC1\x8F\xDB\x1F\x10" // Confounder
+ "9 bytesss", // Plain
+ .plen = 16 + 9,
+ .ctext =
+ "\x61\x9F\xF0\x72\xE3\x62\x86\xFF\x0A\x28\xDE\xB3\xA3\x52\xEC\x0D"
+ "\x0E\xDF\x5C\x51\x60\xD6\x63\xC9\x01\x75\x8C\xCF\x9D\x1E\xD3\x3D"
+ "\x71\xDB\x8F\x23\xAA\xBF\x83\x48\xA0",
+ .clen = 16 + 9 + 16,
+ }, {
+ // "enc 13 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\xb8\xc4\x38\xcc\x1a\x00\x60\xfc\x91\x3a\x8e\x07\x16\x96\xbd\x08" // Ki
+ "\xc3\x11\x3a\x25\x85\x90\xb9\xae\xbf\x72\x1b\x1a\xf6\xb0\xcb\xf8", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\x19\xFE\xE4\x0D\x81\x0C\x52\x4B\x5B\x22\xF0\x18\x74\xC6\x93\xDA" // Confounder
+ "13 bytes byte", // Plain
+ .plen = 16 + 13,
+ .ctext =
+ "\xB8\xEC\xA3\x16\x7A\xE6\x31\x55\x12\xE5\x9F\x98\xA7\xC5\x00\x20"
+ "\x5E\x5F\x63\xFF\x3B\xB3\x89\xAF\x1C\x41\xA2\x1D\x64\x0D\x86\x15"
+ "\xC9\xED\x3F\xBE\xB0\x5A\xB6\xAC\xB6\x76\x89\xB5\xEA",
+ .clen = 16 + 13 + 16,
+ }, {
+ // "enc 30 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x10")
+ "\x18\xaf\x19\xb0\x23\x74\x44\xfd\x75\x04\xad\x7d\xbd\x48\xad\xd3" // Ki
+ "\x8b\x07\xee\xd3\x01\x49\x91\x6a\xa2\x0d\xb3\xf5\xce\xd8\xaf\xad", // Ke
+ .klen = 4 + 4 + 16 * 2,
+ .ptext =
+ "\xCA\x7A\x7A\xB4\xBE\x19\x2D\xAB\xD6\x03\x50\x6D\xB1\x9C\x39\xE2" // Confounder
+ "30 bytes bytes bytes bytes byt", // Plain
+ .plen = 16 + 30,
+ .ctext =
+ "\xA2\x6A\x39\x05\xA4\xFF\xD5\x81\x6B\x7B\x1E\x27\x38\x0D\x08\x09"
+ "\x0C\x8E\xC1\xF3\x04\x49\x6E\x1A\xBD\xCD\x2B\xDC\xD1\xDF\xFC\x66"
+ "\x09\x89\xE1\x17\xA7\x13\xDD\xBB\x57\xA4\x14\x6C\x15\x87\xCB\xA4"
+ "\x35\x66\x65\x59\x1D\x22\x40\x28\x2F\x58\x42\xB1\x05\xA5",
+ .clen = 16 + 30 + 16,
+ }, {
+ // "enc no plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\xa2\xb8\x33\xe9\x43\xbb\x10\xee\x53\xb4\xa1\x9b\xc2\xbb\xc7\xe1"
+ "\x9b\x87\xad\x5d\xe9\x21\x22\xa4\x33\x8b\xe6\xf7\x32\xfd\x8a\x0e" // Ki
+ "\x6c\xcb\x3f\x25\xd8\xae\x57\xf4\xe8\xf6\xca\x47\x4b\xdd\xef\xf1"
+ "\x16\xce\x13\x1b\x3f\x71\x01\x2e\x75\x6d\x6b\x1e\x3f\x70\xa7\xf1", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\x3C\xBB\xD2\xB4\x59\x17\x94\x10\x67\xF9\x65\x99\xBB\x98\x92\x6C" // Confounder
+ "", // Plain
+ .plen = 16 + 0,
+ .ctext =
+ "\x03\x88\x6D\x03\x31\x0B\x47\xA6\xD8\xF0\x6D\x7B\x94\xD1\xDD\x83"
+ "\x7E\xCC\xE3\x15\xEF\x65\x2A\xFF\x62\x08\x59\xD9\x4A\x25\x92\x66",
+ .clen = 16 + 0 + 16,
+ }, {
+ // "enc 1 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x84\x61\x4b\xfa\x98\xf1\x74\x8a\xa4\xaf\x99\x2b\x8c\x26\x28\x0d"
+ "\xc8\x98\x73\x29\xdf\x77\x5c\x1d\xb0\x4a\x43\xf1\x21\xaa\x86\x65" // Ki
+ "\xe9\x31\x73\xaa\x01\xeb\x3c\x24\x62\x31\xda\xfc\x78\x02\xee\x32"
+ "\xaf\x24\x85\x1d\x8c\x73\x87\xd1\x8c\xb9\xb2\xc5\xb7\xf5\x70\xb8", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xDE\xF4\x87\xFC\xEB\xE6\xDE\x63\x46\xD4\xDA\x45\x21\xBB\xA2\xD2" // Confounder
+ "1", // Plain
+ .plen = 16 + 1,
+ .ctext =
+ "\x2C\x9C\x15\x70\x13\x3C\x99\xBF\x6A\x34\xBC\x1B\x02\x12\x00\x2F"
+ "\xD1\x94\x33\x87\x49\xDB\x41\x35\x49\x7A\x34\x7C\xFC\xD9\xD1\x8A"
+ "\x12",
+ .clen = 16 + 1 + 16,
+ }, {
+ // "enc 9 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x47\xb9\xf5\xba\xd7\x63\x00\x58\x2a\x54\x45\xfa\x0c\x1b\x29\xc3"
+ "\xaa\x83\xec\x63\xb9\x0b\x4a\xb0\x08\x48\xc1\x85\x67\x4f\x44\xa7" // Ki
+ "\xcd\xa2\xd3\x9a\x9b\x24\x3f\xfe\xb5\x6e\x8d\x5f\x4b\xd5\x28\x74"
+ "\x1e\xcb\x52\x0c\x62\x12\x3f\xb0\x40\xb8\x41\x8b\x15\xc7\xd7\x0c", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xAD\x4F\xF9\x04\xD3\x4E\x55\x53\x84\xB1\x41\x00\xFC\x46\x5F\x88" // Confounder
+ "9 bytesss", // Plain
+ .plen = 16 + 9,
+ .ctext =
+ "\x9C\x6D\xE7\x5F\x81\x2D\xE7\xED\x0D\x28\xB2\x96\x35\x57\xA1\x15"
+ "\x64\x09\x98\x27\x5B\x0A\xF5\x15\x27\x09\x91\x3F\xF5\x2A\x2A\x9C"
+ "\x8E\x63\xB8\x72\xF9\x2E\x64\xC8\x39",
+ .clen = 16 + 9 + 16,
+ }, {
+ // "enc 13 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x15\x2f\x8c\x9d\xc9\x85\x79\x6e\xb1\x94\xed\x14\xc5\x9e\xac\xdd"
+ "\x41\x8a\x33\x32\x36\xb7\x8f\xaf\xa7\xc7\x9b\x04\xe0\xac\xe7\xbf" // Ki
+ "\xcd\x8a\x10\xe2\x79\xda\xdd\xb6\x90\x1e\xc3\x0b\xdf\x98\x73\x25"
+ "\x0f\x6e\xfc\x6a\x77\x36\x7d\x74\xdc\x3e\xe7\xf7\x4b\xc7\x77\x4e", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\xCF\x9B\xCA\x6D\xF1\x14\x4E\x0C\x0A\xF9\xB8\xF3\x4C\x90\xD5\x14" // Confounder
+ "13 bytes byte",
+ .plen = 16 + 13,
+ .ctext =
+ "\xEE\xEC\x85\xA9\x81\x3C\xDC\x53\x67\x72\xAB\x9B\x42\xDE\xFC\x57"
+ "\x06\xF7\x26\xE9\x75\xDD\xE0\x5A\x87\xEB\x54\x06\xEA\x32\x4C\xA1"
+ "\x85\xC9\x98\x6B\x42\xAA\xBE\x79\x4B\x84\x82\x1B\xEE",
+ .clen = 16 + 13 + 16,
+ }, {
+ // "enc 30 plain",
+ .key =
+ AUTHENC_KEY_HEADER("\x00\x00\x00\x20")
+ "\x04\x8d\xeb\xf7\xb1\x2c\x09\x32\xe8\xb2\x96\x99\x6c\x23\xf8\xb7"
+ "\x9d\x59\xb9\x7e\xa1\x19\xfc\x0c\x15\x6b\xf7\x88\xdc\x8c\x85\xe8" // Ki
+ "\x1d\x51\x47\xf3\x4b\xb0\x01\xa0\x4a\x68\xa7\x13\x46\xe7\x65\x4e"
+ "\x02\x23\xa6\x0d\x90\xbc\x2b\x79\xb4\xd8\x79\x56\xd4\x7c\xd4\x2a", // Ke
+ .klen = 4 + 4 + 32 * 2,
+ .ptext =
+ "\x64\x4D\xEF\x38\xDA\x35\x00\x72\x75\x87\x8D\x21\x68\x55\xE2\x28" // Confounder
+ "30 bytes bytes bytes bytes byt", // Plain
+ .plen = 16 + 30,
+ .ctext =
+ "\x0E\x44\x68\x09\x85\x85\x5F\x2D\x1F\x18\x12\x52\x9C\xA8\x3B\xFD"
+ "\x8E\x34\x9D\xE6\xFD\x9A\xDA\x0B\xAA\xA0\x48\xD6\x8E\x26\x5F\xEB"
+ "\xF3\x4A\xD1\x25\x5A\x34\x49\x99\xAD\x37\x14\x68\x87\xA6\xC6\x84"
+ "\x57\x31\xAC\x7F\x46\x37\x6A\x05\x04\xCD\x06\x57\x14\x74",
+ .clen = 16 + 30 + 16,
+ },
+};
+
#endif /* _CRYPTO_TESTMGR_H */
diff --git a/crypto/xctr.c b/crypto/xctr.c
index 6ed9c85ededa..9c536ab6d2e5 100644
--- a/crypto/xctr.c
+++ b/crypto/xctr.c
@@ -78,7 +78,7 @@ static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk,
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *data = walk->src.virt.addr;
+ u8 *data = walk->dst.virt.addr;
u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
diff --git a/crypto/xts.c b/crypto/xts.c
index 821060ede2cf..31529c9ef08f 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -99,7 +99,7 @@ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
while (w.nbytes) {
unsigned int avail = w.nbytes;
- le128 *wsrc;
+ const le128 *wsrc;
le128 *wdst;
wsrc = w.src.virt.addr;
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 154a969c83a8..90bb4f36f846 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -103,7 +103,7 @@ static int __zstd_init(void *ctx)
return ret;
}
-static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
+static void *zstd_alloc_ctx(void)
{
int ret;
struct zstd_ctx *ctx;
@@ -121,32 +121,18 @@ static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
return ctx;
}
-static int zstd_init(struct crypto_tfm *tfm)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_init(ctx);
-}
-
static void __zstd_exit(void *ctx)
{
zstd_comp_exit(ctx);
zstd_decomp_exit(ctx);
}
-static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx)
+static void zstd_free_ctx(void *ctx)
{
__zstd_exit(ctx);
kfree_sensitive(ctx);
}
-static void zstd_exit(struct crypto_tfm *tfm)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- __zstd_exit(ctx);
-}
-
static int __zstd_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
@@ -161,14 +147,6 @@ static int __zstd_compress(const u8 *src, unsigned int slen,
return 0;
}
-static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_compress(src, slen, dst, dlen, ctx);
-}
-
static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -189,14 +167,6 @@ static int __zstd_decompress(const u8 *src, unsigned int slen,
return 0;
}
-static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
-
- return __zstd_decompress(src, slen, dst, dlen, ctx);
-}
-
static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
@@ -204,19 +174,6 @@ static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
-static struct crypto_alg alg = {
- .cra_name = "zstd",
- .cra_driver_name = "zstd-generic",
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct zstd_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = zstd_init,
- .cra_exit = zstd_exit,
- .cra_u = { .compress = {
- .coa_compress = zstd_compress,
- .coa_decompress = zstd_decompress } }
-};
-
static struct scomp_alg scomp = {
.alloc_ctx = zstd_alloc_ctx,
.free_ctx = zstd_free_ctx,
@@ -231,22 +188,11 @@ static struct scomp_alg scomp = {
static int __init zstd_mod_init(void)
{
- int ret;
-
- ret = crypto_register_alg(&alg);
- if (ret)
- return ret;
-
- ret = crypto_register_scomp(&scomp);
- if (ret)
- crypto_unregister_alg(&alg);
-
- return ret;
+ return crypto_register_scomp(&scomp);
}
static void __exit zstd_mod_fini(void)
{
- crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}