summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2024-03-13 09:49:37 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2024-03-13 09:49:37 +0800
commit6a8dbd71a70620c42d4fa82509204ba18231f28d (patch)
tree4cf38267140e30c8bd1ae3ec9d02f29b28037020 /crypto
parent77292bb8ca69c808741aadbd29207605296e24af (diff)
downloadlinux-6a8dbd71a70620c42d4fa82509204ba18231f28d.tar.gz
linux-6a8dbd71a70620c42d4fa82509204ba18231f28d.tar.bz2
linux-6a8dbd71a70620c42d4fa82509204ba18231f28d.zip
Revert "crypto: remove CONFIG_CRYPTO_STATS"
This reverts commit 2beb81fbf0c01a62515a1bcef326168494ee2bd0. While removing CONFIG_CRYPTO_STATS is a worthy goal, this also removed unrelated infrastructure such as crypto_comp_alg_common. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig20
-rw-r--r--crypto/Makefile2
-rw-r--r--crypto/acompress.c47
-rw-r--r--crypto/aead.c84
-rw-r--r--crypto/ahash.c63
-rw-r--r--crypto/akcipher.c31
-rw-r--r--crypto/compress.h5
-rw-r--r--crypto/crypto_user_base.c (renamed from crypto/crypto_user.c)10
-rw-r--r--crypto/crypto_user_stat.c176
-rw-r--r--crypto/hash.h30
-rw-r--r--crypto/kpp.c30
-rw-r--r--crypto/lskcipher.c73
-rw-r--r--crypto/rng.c44
-rw-r--r--crypto/scompress.c8
-rw-r--r--crypto/shash.c75
-rw-r--r--crypto/sig.c13
-rw-r--r--crypto/skcipher.c86
-rw-r--r--crypto/skcipher.h10
18 files changed, 760 insertions, 47 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f937142aa94d..44661c2e30ca 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1456,6 +1456,26 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE
already been phased out from internal use by the kernel, and are
only useful for userspace clients that still rely on them.
+config CRYPTO_STATS
+ bool "Crypto usage statistics"
+ depends on CRYPTO_USER
+ help
+ Enable the gathering of crypto stats.
+
+ Enabling this option reduces the performance of the crypto API. It
+ should only be enabled when there is actually a use case for it.
+
+ This collects data sizes, numbers of requests, and numbers
+ of errors processed by:
+ - AEAD ciphers (encrypt, decrypt)
+ - asymmetric key ciphers (encrypt, decrypt, verify, sign)
+ - symmetric key ciphers (encrypt, decrypt)
+ - compression algorithms (compress, decompress)
+ - hash algorithms (hash)
+ - key-agreement protocol primitives (setsecret, generate
+ public key, compute shared secret)
+ - RNG (generate, seed)
+
endmenu
config CRYPTO_HASH_INFO
diff --git a/crypto/Makefile b/crypto/Makefile
index de9a3312a2c8..408f0a1f9ab9 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -69,6 +69,8 @@ cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
+crypto_user-y := crypto_user_base.o
+crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 484a865b23cd..1c682810a484 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -25,7 +25,7 @@ static const struct crypto_type crypto_acomp_type;
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
- return container_of(alg, struct acomp_alg, base);
+ return container_of(alg, struct acomp_alg, calg.base);
}
static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
@@ -93,6 +93,32 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
return extsize;
}
+static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
+ struct crypto_istat_compress *istat = comp_get_stat(calg);
+ struct crypto_stat_compress racomp;
+
+ memset(&racomp, 0, sizeof(racomp));
+
+ strscpy(racomp.type, "acomp", sizeof(racomp.type));
+ racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
+ racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
+ racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
+ racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
+ racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
+}
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return __crypto_acomp_report_stat(skb, alg);
+}
+#endif
+
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
@@ -102,6 +128,9 @@ static const struct crypto_type crypto_acomp_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_acomp_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_acomp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
@@ -153,12 +182,24 @@ void acomp_request_free(struct acomp_req *req)
}
EXPORT_SYMBOL_GPL(acomp_request_free);
-int crypto_register_acomp(struct acomp_alg *alg)
+void comp_prepare_alg(struct comp_alg_common *alg)
{
+ struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
- base->cra_type = &crypto_acomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+}
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
+
+ base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/aead.c b/crypto/aead.c
index 0e75a69189df..54906633566a 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -20,6 +20,15 @@
#include "internal.h"
+static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -81,28 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
+static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
+ int ret;
+
+ istat = aead_get_stat(alg);
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+ ret = -ENOKEY;
+ else
+ ret = alg->encrypt(req);
- return crypto_aead_alg(aead)->encrypt(req);
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct crypto_istat_aead *istat;
+ int ret;
- if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+ istat = aead_get_stat(alg);
- if (req->cryptlen < crypto_aead_authsize(aead))
- return -EINVAL;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ ret = -ENOKEY;
+ else if (req->cryptlen < crypto_aead_authsize(aead))
+ ret = -EINVAL;
+ else
+ ret = alg->decrypt(req);
- return crypto_aead_alg(aead)->decrypt(req);
+ return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
@@ -172,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
aead->free(aead);
}
+static int __maybe_unused crypto_aead_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct aead_alg *aead = container_of(alg, struct aead_alg, base);
+ struct crypto_istat_aead *istat = aead_get_stat(aead);
+ struct crypto_stat_aead raead;
+
+ memset(&raead, 0, sizeof(raead));
+
+ strscpy(raead.type, "aead", sizeof(raead.type));
+
+ raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
+}
+
static const struct crypto_type crypto_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
@@ -182,6 +245,9 @@ static const struct crypto_type crypto_aead_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_aead_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_aead_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
@@ -211,6 +277,7 @@ EXPORT_SYMBOL_GPL(crypto_has_aead);
static int aead_prepare_alg(struct aead_alg *alg)
{
+ struct crypto_istat_aead *istat = aead_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -224,6 +291,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/ahash.c b/crypto/ahash.c
index bcd9de009a91..0ac83f7f701d 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -27,6 +27,22 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
+static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg)
+{
+ return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&ahash_get_stat(alg)->err_cnt);
+
+ return err;
+}
+
/*
* For an ahash tfm that is using an shash algorithm (instead of an ahash
* algorithm), this returns the underlying shash tfm.
@@ -328,47 +344,75 @@ static void ahash_restore_req(struct ahash_request *req, int err)
int crypto_ahash_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ahash_alg *alg;
if (likely(tfm->using_shash))
return shash_ahash_update(req, ahash_request_ctx(req));
- return crypto_ahash_alg(tfm)->update(req);
+ alg = crypto_ahash_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen);
+ return crypto_ahash_errstat(alg, alg->update(req));
}
EXPORT_SYMBOL_GPL(crypto_ahash_update);
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ahash_alg *alg;
if (likely(tfm->using_shash))
return crypto_shash_final(ahash_request_ctx(req), req->result);
- return crypto_ahash_alg(tfm)->final(req);
+ alg = crypto_ahash_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&ahash_get_stat(alg)->hash_cnt);
+ return crypto_ahash_errstat(alg, alg->final(req));
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ahash_alg *alg;
if (likely(tfm->using_shash))
return shash_ahash_finup(req, ahash_request_ctx(req));
- return crypto_ahash_alg(tfm)->finup(req);
+ alg = crypto_ahash_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = ahash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
+ return crypto_ahash_errstat(alg, alg->finup(req));
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ahash_alg *alg;
+ int err;
if (likely(tfm->using_shash))
return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
+ alg = crypto_ahash_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = ahash_get_stat(alg);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(req->nbytes, &istat->hash_tlen);
+ }
+
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+ err = -ENOKEY;
+ else
+ err = alg->digest(req);
- return crypto_ahash_alg(tfm)->digest(req);
+ return crypto_ahash_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -527,6 +571,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__crypto_hash_alg_common(alg)->digestsize);
}
+static int __maybe_unused crypto_ahash_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return crypto_hash_report_stat(skb, alg, "ahash");
+}
+
static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
@@ -537,6 +587,9 @@ static const struct crypto_type crypto_ahash_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_ahash_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_ahash_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index e0ff5f4dda6d..52813f0b19e4 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -70,6 +70,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
akcipher->free(akcipher);
}
+static int __maybe_unused crypto_akcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
+ struct crypto_istat_akcipher *istat;
+ struct crypto_stat_akcipher rakcipher;
+
+ istat = akcipher_get_stat(akcipher);
+
+ memset(&rakcipher, 0, sizeof(rakcipher));
+
+ strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
+ rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
+ rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+ sizeof(rakcipher), &rakcipher);
+}
+
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
@@ -80,6 +104,9 @@ static const struct crypto_type crypto_akcipher_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_akcipher_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_akcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
@@ -104,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
+ struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)
diff --git a/crypto/compress.h b/crypto/compress.h
index 23ea43810810..19f65516d699 100644
--- a/crypto/compress.h
+++ b/crypto/compress.h
@@ -12,10 +12,15 @@
#include "internal.h"
struct acomp_req;
+struct comp_alg_common;
struct sk_buff;
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg);
+
+void comp_prepare_alg(struct comp_alg_common *alg);
+
#endif /* _LOCAL_CRYPTO_COMPRESS_H */
diff --git a/crypto/crypto_user.c b/crypto/crypto_user_base.c
index 6c571834e86a..3fa20f12989f 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user_base.c
@@ -18,6 +18,7 @@
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
+#include <crypto/internal/cryptouser.h>
#include "internal.h"
@@ -32,7 +33,7 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
-static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
{
struct crypto_alg *q, *alg = NULL;
@@ -386,13 +387,6 @@ static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
return crypto_del_default_rng();
}
-static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
- struct nlattr **attrs)
-{
- /* No longer supported */
- return -ENOTSUPP;
-}
-
#define MSGSIZE(type) sizeof(struct type)
static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
new file mode 100644
index 000000000000..d4f3d39b5137
--- /dev/null
+++ b/crypto/crypto_user_stat.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Crypto user configuration API.
+ *
+ * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/cryptouser.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+
+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
+
+struct crypto_dump_info {
+ struct sk_buff *in_skb;
+ struct sk_buff *out_skb;
+ u32 nlmsg_seq;
+ u16 nlmsg_flags;
+};
+
+static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat_cipher rcipher;
+
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
+static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat_compress rcomp;
+
+ memset(&rcomp, 0, sizeof(rcomp));
+
+ strscpy(rcomp.type, "compression", sizeof(rcomp.type));
+
+ return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
+}
+
+static int crypto_reportstat_one(struct crypto_alg *alg,
+ struct crypto_user_alg *ualg,
+ struct sk_buff *skb)
+{
+ memset(ualg, 0, sizeof(*ualg));
+
+ strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+ strscpy(ualg->cru_driver_name, alg->cra_driver_name,
+ sizeof(ualg->cru_driver_name));
+ strscpy(ualg->cru_module_name, module_name(alg->cra_module),
+ sizeof(ualg->cru_module_name));
+
+ ualg->cru_type = 0;
+ ualg->cru_mask = 0;
+ ualg->cru_flags = alg->cra_flags;
+ ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
+
+ if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+ goto nla_put_failure;
+ if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
+ struct crypto_stat_larval rl;
+
+ memset(&rl, 0, sizeof(rl));
+ strscpy(rl.type, "larval", sizeof(rl.type));
+ if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
+ goto nla_put_failure;
+ goto out;
+ }
+
+ if (alg->cra_type && alg->cra_type->report_stat) {
+ if (alg->cra_type->report_stat(skb, alg))
+ goto nla_put_failure;
+ goto out;
+ }
+
+ switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
+ case CRYPTO_ALG_TYPE_CIPHER:
+ if (crypto_report_cipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_COMPRESS:
+ if (crypto_report_comp(skb, alg))
+ goto nla_put_failure;
+ break;
+ default:
+ pr_err("ERROR: Unhandled alg %d in %s\n",
+ alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
+ __func__);
+ }
+
+out:
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_reportstat_alg(struct crypto_alg *alg,
+ struct crypto_dump_info *info)
+{
+ struct sk_buff *in_skb = info->in_skb;
+ struct sk_buff *skb = info->out_skb;
+ struct nlmsghdr *nlh;
+ struct crypto_user_alg *ualg;
+ int err = 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
+ CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto out;
+ }
+
+ ualg = nlmsg_data(nlh);
+
+ err = crypto_reportstat_one(alg, ualg, skb);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+out:
+ return err;
+}
+
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ struct nlattr **attrs)
+{
+ struct net *net = sock_net(in_skb->sk);
+ struct crypto_user_alg *p = nlmsg_data(in_nlh);
+ struct crypto_alg *alg;
+ struct sk_buff *skb;
+ struct crypto_dump_info info;
+ int err;
+
+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+ return -EINVAL;
+
+ alg = crypto_alg_match(p, 0);
+ if (!alg)
+ return -ENOENT;
+
+ err = -ENOMEM;
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ goto drop_alg;
+
+ info.in_skb = in_skb;
+ info.out_skb = skb;
+ info.nlmsg_seq = in_nlh->nlmsg_seq;
+ info.nlmsg_flags = 0;
+
+ err = crypto_reportstat_alg(alg, &info);
+
+drop_alg:
+ crypto_mod_put(alg);
+
+ if (err) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/hash.h b/crypto/hash.h
index cf9aee07f77d..93f6ba0df263 100644
--- a/crypto/hash.h
+++ b/crypto/hash.h
@@ -8,9 +8,39 @@
#define _LOCAL_CRYPTO_HASH_H
#include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
#include "internal.h"
+static inline struct crypto_istat_hash *hash_get_stat(
+ struct hash_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_hash_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg,
+ const char *type)
+{
+ struct hash_alg_common *halg = __crypto_hash_alg_common(alg);
+ struct crypto_istat_hash *istat = hash_get_stat(halg);
+ struct crypto_stat_hash rhash;
+
+ memset(&rhash, 0, sizeof(rhash));
+
+ strscpy(rhash.type, type, sizeof(rhash.type));
+
+ rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt);
+ rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen);
+ rhash.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
+}
+
extern const struct crypto_type crypto_shash_type;
int hash_prepare_alg(struct hash_alg_common *alg);
diff --git a/crypto/kpp.c b/crypto/kpp.c
index ecc63a1a948d..33d44e59387f 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -66,6 +66,29 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst)
kpp->free(kpp);
}
+static int __maybe_unused crypto_kpp_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct kpp_alg *kpp = __crypto_kpp_alg(alg);
+ struct crypto_istat_kpp *istat;
+ struct crypto_stat_kpp rkpp;
+
+ istat = kpp_get_stat(kpp);
+
+ memset(&rkpp, 0, sizeof(rkpp));
+
+ strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
+ rkpp.stat_generate_public_key_cnt =
+ atomic64_read(&istat->generate_public_key_cnt);
+ rkpp.stat_compute_shared_secret_cnt =
+ atomic64_read(&istat->compute_shared_secret_cnt);
+ rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
+}
+
static const struct crypto_type crypto_kpp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_kpp_init_tfm,
@@ -76,6 +99,9 @@ static const struct crypto_type crypto_kpp_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_kpp_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_kpp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
@@ -105,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp);
static void kpp_prepare_alg(struct kpp_alg *alg)
{
+ struct crypto_istat_kpp *istat = kpp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_kpp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
}
int crypto_register_kpp(struct kpp_alg *alg)
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 0a800292ca4e..0b6dd8aa21f2 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -29,6 +29,25 @@ static inline struct lskcipher_alg *__crypto_lskcipher_alg(
return container_of(alg, struct lskcipher_alg, co.base);
}
+static inline struct crypto_istat_cipher *lskcipher_get_stat(
+ struct lskcipher_alg *alg)
+{
+ return skcipher_get_stat_common(&alg->co);
+}
+
+static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
+{
+ struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -128,13 +147,20 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
u32 flags))
{
unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+ struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+ int ret;
if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
- alignmask)
- return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
- crypt);
+ alignmask) {
+ ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
+ crypt);
+ goto out;
+ }
- return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+ ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+
+out:
+ return crypto_lskcipher_errstat(alg, ret);
}
int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
@@ -142,6 +168,13 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
{
struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(len, &istat->encrypt_tlen);
+ }
+
return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
}
EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
@@ -151,6 +184,13 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
{
struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(len, &istat->decrypt_tlen);
+ }
+
return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
}
EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
@@ -282,6 +322,28 @@ static int __maybe_unused crypto_lskcipher_report(
sizeof(rblkcipher), &rblkcipher);
}
+static int __maybe_unused crypto_lskcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
+ struct crypto_istat_cipher *istat;
+ struct crypto_stat_cipher rcipher;
+
+ istat = lskcipher_get_stat(skcipher);
+
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
static const struct crypto_type crypto_lskcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_lskcipher_init_tfm,
@@ -292,6 +354,9 @@ static const struct crypto_type crypto_lskcipher_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_lskcipher_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_lskcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_LSKCIPHER,
diff --git a/crypto/rng.c b/crypto/rng.c
index 9d8804e46422..279dffdebf59 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -30,24 +30,30 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
+ struct rng_alg *alg = crypto_rng_alg(tfm);
u8 *buf = NULL;
int err;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&rng_get_stat(alg)->seed_cnt);
+
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
+ err = -ENOMEM;
if (!buf)
- return -ENOMEM;
+ goto out;
err = get_random_bytes_wait(buf, slen);
if (err)
- goto out;
+ goto free_buf;
seed = buf;
}
- err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
-out:
+ err = alg->seed(tfm, seed, slen);
+free_buf:
kfree_sensitive(buf);
- return err;
+out:
+ return crypto_rng_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_rng_reset);
@@ -85,6 +91,27 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "seedsize : %u\n", seedsize(alg));
}
+static int __maybe_unused crypto_rng_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct rng_alg *rng = __crypto_rng_alg(alg);
+ struct crypto_istat_rng *istat;
+ struct crypto_stat_rng rrng;
+
+ istat = rng_get_stat(rng);
+
+ memset(&rrng, 0, sizeof(rrng));
+
+ strscpy(rrng.type, "rng", sizeof(rrng.type));
+
+ rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
+ rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
+ rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
+ rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
+}
+
static const struct crypto_type crypto_rng_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_rng_init_tfm,
@@ -94,6 +121,9 @@ static const struct crypto_type crypto_rng_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_rng_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_rng_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
@@ -169,6 +199,7 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
int crypto_register_rng(struct rng_alg *alg)
{
+ struct crypto_istat_rng *istat = rng_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->seedsize > PAGE_SIZE / 8)
@@ -178,6 +209,9 @@ int crypto_register_rng(struct rng_alg *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_rng);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 93daf3eb9842..60bbb7ea4060 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -271,6 +271,9 @@ static const struct crypto_type crypto_scomp_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_scomp_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_acomp_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
@@ -279,10 +282,11 @@ static const struct crypto_type crypto_scomp_type = {
int crypto_register_scomp(struct scomp_alg *alg)
{
- struct crypto_alg *base = &alg->base;
+ struct crypto_alg *base = &alg->calg.base;
+
+ comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_scomp_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
return crypto_register_alg(base);
diff --git a/crypto/shash.c b/crypto/shash.c
index 0ffe671b519e..c3f7f6a25280 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -16,6 +16,18 @@
#include "hash.h"
+static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
+{
+ return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err)
+ atomic64_inc(&shash_get_stat(alg)->err_cnt);
+ return err;
+}
+
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -49,13 +61,29 @@ EXPORT_SYMBOL_GPL(crypto_shash_setkey);
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return crypto_shash_alg(desc->tfm)->update(desc, data, len);
+ struct shash_alg *shash = crypto_shash_alg(desc->tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
+
+ err = shash->update(desc, data, len);
+
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
int crypto_shash_final(struct shash_desc *desc, u8 *out)
{
- return crypto_shash_alg(desc->tfm)->final(desc, out);
+ struct shash_alg *shash = crypto_shash_alg(desc->tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ atomic64_inc(&shash_get_stat(shash)->hash_cnt);
+
+ err = shash->final(desc, out);
+
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
@@ -71,7 +99,20 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data,
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out);
+ struct crypto_shash *tfm = desc->tfm;
+ struct shash_alg *shash = crypto_shash_alg(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = shash_get_stat(shash);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(len, &istat->hash_tlen);
+ }
+
+ err = shash->finup(desc, data, len, out);
+
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
@@ -88,11 +129,22 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
+ struct shash_alg *shash = crypto_shash_alg(tfm);
+ int err;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_hash *istat = shash_get_stat(shash);
+
+ atomic64_inc(&istat->hash_cnt);
+ atomic64_add(len, &istat->hash_tlen);
+ }
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+ err = -ENOKEY;
+ else
+ err = shash->digest(desc, data, len, out);
- return crypto_shash_alg(desc->tfm)->digest(desc, data, len, out);
+ return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
@@ -213,6 +265,12 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
+static int __maybe_unused crypto_shash_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return crypto_hash_report_stat(skb, alg, "shash");
+}
+
const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
@@ -223,6 +281,9 @@ const struct crypto_type crypto_shash_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_shash_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_shash_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
@@ -289,6 +350,7 @@ EXPORT_SYMBOL_GPL(crypto_clone_shash);
int hash_prepare_alg(struct hash_alg_common *alg)
{
+ struct crypto_istat_hash *istat = hash_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
@@ -300,6 +362,9 @@ int hash_prepare_alg(struct hash_alg_common *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/sig.c b/crypto/sig.c
index 7645bedf3a1f..224c47019297 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -45,6 +45,16 @@ static int __maybe_unused crypto_sig_report(struct sk_buff *skb,
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig);
}
+static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb,
+ struct crypto_alg *alg)
+{
+ struct crypto_stat_akcipher rsig = {};
+
+ strscpy(rsig.type, "sig", sizeof(rsig.type));
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig);
+}
+
static const struct crypto_type crypto_sig_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_sig_init_tfm,
@@ -54,6 +64,9 @@ static const struct crypto_type crypto_sig_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_sig_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_sig_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_SIG_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index ceed7f33a67b..bc70e159d27d 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -89,6 +89,25 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
return container_of(alg, struct skcipher_alg, base);
}
+static inline struct crypto_istat_cipher *skcipher_get_stat(
+ struct skcipher_alg *alg)
+{
+ return skcipher_get_stat_common(&alg->co);
+}
+
+static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
+{
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
@@ -635,12 +654,23 @@ int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ int ret;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
- if (alg->co.base.cra_type != &crypto_skcipher_type)
- return crypto_lskcipher_encrypt_sg(req);
- return alg->encrypt(req);
+ ret = -ENOKEY;
+ else if (alg->co.base.cra_type != &crypto_skcipher_type)
+ ret = crypto_lskcipher_encrypt_sg(req);
+ else
+ ret = alg->encrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
@@ -648,12 +678,23 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ int ret;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->decrypt_tlen);
+ }
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
- if (alg->co.base.cra_type != &crypto_skcipher_type)
- return crypto_lskcipher_decrypt_sg(req);
- return alg->decrypt(req);
+ ret = -ENOKEY;
+ else if (alg->co.base.cra_type != &crypto_skcipher_type)
+ ret = crypto_lskcipher_decrypt_sg(req);
+ else
+ ret = alg->decrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
@@ -805,6 +846,28 @@ static int __maybe_unused crypto_skcipher_report(
sizeof(rblkcipher), &rblkcipher);
}
+static int __maybe_unused crypto_skcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
+ struct crypto_istat_cipher *istat;
+ struct crypto_stat_cipher rcipher;
+
+ istat = skcipher_get_stat(skcipher);
+
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_skcipher_extsize,
.init_tfm = crypto_skcipher_init_tfm,
@@ -815,6 +878,9 @@ static const struct crypto_type crypto_skcipher_type = {
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_skcipher_report,
#endif
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_skcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
@@ -869,6 +935,7 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
{
+ struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@@ -881,6 +948,9 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
diff --git a/crypto/skcipher.h b/crypto/skcipher.h
index 703651367dd8..16c9484360da 100644
--- a/crypto/skcipher.h
+++ b/crypto/skcipher.h
@@ -10,6 +10,16 @@
#include <crypto/internal/skcipher.h>
#include "internal.h"
+static inline struct crypto_istat_cipher *skcipher_get_stat_common(
+ struct skcipher_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
int crypto_lskcipher_encrypt_sg(struct skcipher_request *req);
int crypto_lskcipher_decrypt_sg(struct skcipher_request *req);
int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm);