summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-04 20:00:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-04 20:00:14 -0700
commitfa9db655d0e112c108fe838809608caf759bdf5e (patch)
tree899a983b333871688095fd14b413c199b9a38f73 /drivers/nvme
parente495274793ea602415d050452088a496abcd9e6c (diff)
parentbc792884b76f0da2f5c9a8d720e430e2de9756f5 (diff)
downloadlinux-stable-fa9db655d0e112c108fe838809608caf759bdf5e.tar.gz
linux-stable-fa9db655d0e112c108fe838809608caf759bdf5e.tar.bz2
linux-stable-fa9db655d0e112c108fe838809608caf759bdf5e.zip
Merge tag 'for-5.20/block-2022-08-04' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: - NVMe pull requests via Christoph: - add support for In-Band authentication (Hannes Reinecke) - handle the persistent internal error AER (Michael Kelley) - use in-capsule data for TCP I/O queue connect (Caleb Sander) - remove timeout for getting RDMA-CM established event (Israel Rukshin) - misc cleanups (Joel Granados, Sagi Grimberg, Chaitanya Kulkarni, Guixin Liu, Xiang wangx) - use command_id instead of req->tag in trace_nvme_complete_rq() (Bean Huo) - various fixes for the new authentication code (Lukas Bulwahn, Dan Carpenter, Colin Ian King, Chaitanya Kulkarni, Hannes Reinecke) - small cleanups (Liu Song, Christoph Hellwig) - restore compat_ioctl support (Nick Bowler) - make a nvmet-tcp workqueue lockdep-safe (Sagi Grimberg) - enable generic interface (/dev/ngXnY) for unknown command sets (Joel Granados, Christoph Hellwig) - don't always build constants.o (Christoph Hellwig) - print the command name of aborted commands (Christoph Hellwig) - MD pull requests via Song: - Improve raid5 lock contention, by Logan Gunthorpe. - Misc fixes to raid5, by Logan Gunthorpe. - Fix race condition with md_reap_sync_thread(), by Guoqing Jiang. - Fix potential deadlock with raid5_quiesce and raid5_get_active_stripe, by Logan Gunthorpe. - Refactoring md_alloc(), by Christoph" - Fix md disk_name lifetime problems, by Christoph Hellwig - Convert prepare_to_wait() to wait_woken() api, by Logan Gunthorpe; - Fix sectors_to_do bitmap issue, by Logan Gunthorpe. - Work on unifying the null_blk module parameters and configfs API (Vincent) - drbd bitmap IO error fix (Lars) - Set of rnbd fixes (Guoqing, Md Haris) - Remove experimental marker on bcache async device registration (Coly) - Series from cleaning up the bio splitting (Christoph) - Removal of the sx8 block driver. This hardware never really widespread, and it didn't receive a lot of attention after the initial merge of it back in 2005 (Christoph) - A few fixes for s390 dasd (Eric, Jiang) - Followup set of fixes for ublk (Ming) - Support for UBLK_IO_NEED_GET_DATA for ublk (ZiyangZhang) - Fixes for the dio dma alignment (Keith) - Misc fixes and cleanups (Ming, Yu, Dan, Christophe * tag 'for-5.20/block-2022-08-04' of git://git.kernel.dk/linux-block: (136 commits) s390/dasd: Establish DMA alignment s390/dasd: drop unexpected word 'for' in comments ublk_drv: add support for UBLK_IO_NEED_GET_DATA ublk_cmd.h: add one new ublk command: UBLK_IO_NEED_GET_DATA ublk_drv: cleanup ublksrv_ctrl_dev_info ublk_drv: add SET_PARAMS/GET_PARAMS control command ublk_drv: fix ublk device leak in case that add_disk fails ublk_drv: cancel device even though disk isn't up block: fix leaking page ref on truncated direct io block: ensure bio_iov_add_page can't fail block: ensure iov_iter advances for added pages drivers:md:fix a potential use-after-free bug md/raid5: Ensure batch_last is released before sleeping for quiesce md/raid5: Move stripe_request_ctx up md/raid5: Drop unnecessary call to r5c_check_stripe_cache_usage() md/raid5: Make is_inactive_blocked() helper md/raid5: Refactor raid5_get_active_stripe() block: pass struct queue_limits to the bio splitting helpers block: move bio_allowed_max_sectors to blk-merge.c block: move the call to get_max_io_size out of blk_bio_segment_split ...
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/common/Kconfig4
-rw-r--r--drivers/nvme/common/Makefile7
-rw-r--r--drivers/nvme/common/auth.c483
-rw-r--r--drivers/nvme/host/Kconfig15
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/apple.c28
-rw-r--r--drivers/nvme/host/auth.c1017
-rw-r--r--drivers/nvme/host/constants.c3
-rw-r--r--drivers/nvme/host/core.c490
-rw-r--r--drivers/nvme/host/fabrics.c94
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvme/host/multipath.c9
-rw-r--r--drivers/nvme/host/nvme.h39
-rw-r--r--drivers/nvme/host/pci.c145
-rw-r--r--drivers/nvme/host/rdma.c106
-rw-r--r--drivers/nvme/host/tcp.c95
-rw-r--r--drivers/nvme/host/trace.c32
-rw-r--r--drivers/nvme/host/trace.h2
-rw-r--r--drivers/nvme/target/Kconfig15
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/auth.c525
-rw-r--r--drivers/nvme/target/configfs.c136
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c544
-rw-r--r--drivers/nvme/target/fabrics-cmd.c55
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h75
-rw-r--r--drivers/nvme/target/tcp.c3
31 files changed, 3609 insertions, 354 deletions
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index 87ae409a32b9..656e46d938da 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "NVME Support"
+source "drivers/nvme/common/Kconfig"
source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index fb42c44609a8..eedca8c72098 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NVME_COMMON) += common/
obj-y += host/
obj-y += target/
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
new file mode 100644
index 000000000000..4514f44362dd
--- /dev/null
+++ b/drivers/nvme/common/Kconfig
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NVME_COMMON
+ tristate
diff --git a/drivers/nvme/common/Makefile b/drivers/nvme/common/Makefile
new file mode 100644
index 000000000000..720c625b8a52
--- /dev/null
+++ b/drivers/nvme/common/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_NVME_COMMON) += nvme-common.o
+
+nvme-common-y += auth.o
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
new file mode 100644
index 000000000000..04bd28f17dcc
--- /dev/null
+++ b/drivers/nvme/common/auth.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <linux/scatterlist.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include <linux/nvme.h>
+#include <linux/nvme-auth.h>
+
+static u32 nvme_dhchap_seqnum;
+static DEFINE_MUTEX(nvme_dhchap_mutex);
+
+u32 nvme_auth_get_seqnum(void)
+{
+ u32 seqnum;
+
+ mutex_lock(&nvme_dhchap_mutex);
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum = prandom_u32();
+ else {
+ nvme_dhchap_seqnum++;
+ if (!nvme_dhchap_seqnum)
+ nvme_dhchap_seqnum++;
+ }
+ seqnum = nvme_dhchap_seqnum;
+ mutex_unlock(&nvme_dhchap_mutex);
+ return seqnum;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
+
+static struct nvme_auth_dhgroup_map {
+ const char name[16];
+ const char kpp[16];
+} dhgroup_map[] = {
+ [NVME_AUTH_DHGROUP_NULL] = {
+ .name = "null", .kpp = "null" },
+ [NVME_AUTH_DHGROUP_2048] = {
+ .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
+ [NVME_AUTH_DHGROUP_3072] = {
+ .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
+ [NVME_AUTH_DHGROUP_4096] = {
+ .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
+ [NVME_AUTH_DHGROUP_6144] = {
+ .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
+ [NVME_AUTH_DHGROUP_8192] = {
+ .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
+};
+
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].name;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
+
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
+{
+ if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
+ return NULL;
+ return dhgroup_map[dhgroup_id].kpp;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
+
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
+{
+ int i;
+
+ if (!dhgroup_name || !strlen(dhgroup_name))
+ return NVME_AUTH_DHGROUP_INVALID;
+ for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
+ if (!strlen(dhgroup_map[i].name))
+ continue;
+ if (!strncmp(dhgroup_map[i].name, dhgroup_name,
+ strlen(dhgroup_map[i].name)))
+ return i;
+ }
+ return NVME_AUTH_DHGROUP_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
+
+static struct nvme_dhchap_hash_map {
+ int len;
+ const char hmac[15];
+ const char digest[8];
+} hash_map[] = {
+ [NVME_AUTH_HASH_SHA256] = {
+ .len = 32,
+ .hmac = "hmac(sha256)",
+ .digest = "sha256",
+ },
+ [NVME_AUTH_HASH_SHA384] = {
+ .len = 48,
+ .hmac = "hmac(sha384)",
+ .digest = "sha384",
+ },
+ [NVME_AUTH_HASH_SHA512] = {
+ .len = 64,
+ .hmac = "hmac(sha512)",
+ .digest = "sha512",
+ },
+};
+
+const char *nvme_auth_hmac_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].hmac;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
+
+const char *nvme_auth_digest_name(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return NULL;
+ return hash_map[hmac_id].digest;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
+
+u8 nvme_auth_hmac_id(const char *hmac_name)
+{
+ int i;
+
+ if (!hmac_name || !strlen(hmac_name))
+ return NVME_AUTH_HASH_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
+ if (!strlen(hash_map[i].hmac))
+ continue;
+ if (!strncmp(hash_map[i].hmac, hmac_name,
+ strlen(hash_map[i].hmac)))
+ return i;
+ }
+ return NVME_AUTH_HASH_INVALID;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
+
+size_t nvme_auth_hmac_hash_len(u8 hmac_id)
+{
+ if (hmac_id >= ARRAY_SIZE(hash_map))
+ return 0;
+ return hash_map[hmac_id].len;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
+
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash)
+{
+ struct nvme_dhchap_key *key;
+ unsigned char *p;
+ u32 crc;
+ int ret, key_len;
+ size_t allocated_len = strlen(secret);
+
+ /* Secret might be affixed with a ':' */
+ p = strrchr(secret, ':');
+ if (p)
+ allocated_len = p - secret;
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return ERR_PTR(-ENOMEM);
+ key->key = kzalloc(allocated_len, GFP_KERNEL);
+ if (!key->key) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ key_len = base64_decode(secret, allocated_len, key->key);
+ if (key_len < 0) {
+ pr_debug("base64 key decoding error %d\n",
+ key_len);
+ ret = key_len;
+ goto out_free_secret;
+ }
+
+ if (key_len != 36 && key_len != 52 &&
+ key_len != 68) {
+ pr_err("Invalid key len %d\n", key_len);
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ if (key_hash > 0 &&
+ (key_len - 4) != nvme_auth_hmac_hash_len(key_hash)) {
+ pr_err("Mismatched key len %d for %s\n", key_len,
+ nvme_auth_hmac_name(key_hash));
+ ret = -EINVAL;
+ goto out_free_secret;
+ }
+
+ /* The last four bytes is the CRC in little-endian format */
+ key_len -= 4;
+ /*
+ * The linux implementation doesn't do pre- and post-increments,
+ * so we have to do it manually.
+ */
+ crc = ~crc32(~0, key->key, key_len);
+
+ if (get_unaligned_le32(key->key + key_len) != crc) {
+ pr_err("key crc mismatch (key %08x, crc %08x)\n",
+ get_unaligned_le32(key->key + key_len), crc);
+ ret = -EKEYREJECTED;
+ goto out_free_secret;
+ }
+ key->len = key_len;
+ key->hash = key_hash;
+ return key;
+out_free_secret:
+ kfree_sensitive(key->key);
+out_free_key:
+ kfree(key);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
+
+void nvme_auth_free_key(struct nvme_dhchap_key *key)
+{
+ if (!key)
+ return;
+ kfree_sensitive(key->key);
+ kfree(key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free_key);
+
+u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn)
+{
+ const char *hmac_name;
+ struct crypto_shash *key_tfm;
+ struct shash_desc *shash;
+ u8 *transformed_key;
+ int ret;
+
+ if (!key || !key->key) {
+ pr_warn("No key specified\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ if (key->hash == 0) {
+ transformed_key = kmemdup(key->key, key->len, GFP_KERNEL);
+ return transformed_key ? transformed_key : ERR_PTR(-ENOMEM);
+ }
+ hmac_name = nvme_auth_hmac_name(key->hash);
+ if (!hmac_name) {
+ pr_warn("Invalid key hash id %d\n", key->hash);
+ return ERR_PTR(-EINVAL);
+ }
+
+ key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(key_tfm))
+ return (u8 *)key_tfm;
+
+ shash = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(key_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_key;
+ }
+
+ transformed_key = kzalloc(crypto_shash_digestsize(key_tfm), GFP_KERNEL);
+ if (!transformed_key) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ shash->tfm = key_tfm;
+ ret = crypto_shash_setkey(key_tfm, key->key, key->len);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_init(shash);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, nqn, strlen(nqn));
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
+ if (ret < 0)
+ goto out_free_transformed_key;
+ ret = crypto_shash_final(shash, transformed_key);
+ if (ret < 0)
+ goto out_free_transformed_key;
+
+ kfree(shash);
+ crypto_free_shash(key_tfm);
+
+ return transformed_key;
+
+out_free_transformed_key:
+ kfree_sensitive(transformed_key);
+out_free_shash:
+ kfree(shash);
+out_free_key:
+ crypto_free_shash(key_tfm);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
+
+static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
+{
+ const char *digest_name;
+ struct crypto_shash *tfm;
+ int ret;
+
+ digest_name = nvme_auth_digest_name(hmac_id);
+ if (!digest_name) {
+ pr_debug("%s: failed to get digest for %d\n", __func__,
+ hmac_id);
+ return -EINVAL;
+ }
+ tfm = crypto_alloc_shash(digest_name, 0, 0);
+ if (IS_ERR(tfm))
+ return -ENOMEM;
+
+ ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
+ if (ret < 0)
+ pr_debug("%s: Failed to hash digest len %zu\n", __func__,
+ skey_len);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *desc;
+ u8 *hashed_key;
+ const char *hmac_name;
+ int ret;
+
+ hashed_key = kmalloc(hlen, GFP_KERNEL);
+ if (!hashed_key)
+ return -ENOMEM;
+
+ ret = nvme_auth_hash_skey(hmac_id, skey,
+ skey_len, hashed_key);
+ if (ret < 0)
+ goto out_free_key;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ ret = -EINVAL;
+ goto out_free_key;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_key;
+ }
+
+ desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out_free_hash;
+ }
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, hashed_key, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_update(desc, challenge, hlen);
+ if (ret)
+ goto out_free_desc;
+
+ ret = crypto_shash_final(desc, aug);
+out_free_desc:
+ kfree_sensitive(desc);
+out_free_hash:
+ crypto_free_shash(tfm);
+out_free_key:
+ kfree_sensitive(hashed_key);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
+
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid)
+{
+ int ret;
+
+ ret = crypto_kpp_set_secret(dh_tfm, NULL, 0);
+ if (ret)
+ pr_debug("failed to set private key, error %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey);
+
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ kpp_request_set_input(req, NULL, 0);
+ sg_init_one(&dst, host_key, host_key_len);
+ kpp_request_set_output(req, &dst, host_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
+
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len)
+{
+ struct kpp_request *req;
+ struct crypto_wait wait;
+ struct scatterlist src, dst;
+ int ret;
+
+ req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ sg_init_one(&src, ctrl_key, ctrl_key_len);
+ kpp_request_set_input(req, &src, ctrl_key_len);
+ sg_init_one(&dst, sess_key, sess_key_len);
+ kpp_request_set_output(req, &dst, sess_key_len);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
+
+ kpp_request_free(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
+
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
+{
+ struct nvme_dhchap_key *key;
+ u8 key_hash;
+
+ if (!secret) {
+ *ret_key = NULL;
+ return 0;
+ }
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
+ return -EINVAL;
+
+ /* Pass in the secret without the 'DHHC-1:XX:' prefix */
+ key = nvme_auth_extract_key(secret + 10, key_hash);
+ if (IS_ERR(key)) {
+ *ret_key = NULL;
+ return PTR_ERR(key);
+ }
+
+ *ret_key = key;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 877d2ec4ea9f..2f6a7f8c94e8 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -92,6 +92,21 @@ config NVME_TCP
If unsure, say N.
+config NVME_AUTH
+ bool "NVM Express over Fabrics In-Band Authentication"
+ depends on NVME_CORE
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This provides support for NVMe over Fabrics In-Band Authentication.
+
+ If unsure, say N.
+
config NVME_APPLE
tristate "Apple ANS2 NVM Express host driver"
depends on OF && BLOCK
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a36ae1612059..e27202d22c7d 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -10,12 +10,14 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
-nvme-core-y := core.o ioctl.o constants.o
+nvme-core-y += core.o ioctl.o
+nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_BLK_DEV_ZONED) += zns.o
nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o
nvme-core-$(CONFIG_NVME_HWMON) += hwmon.o
+nvme-core-$(CONFIG_NVME_AUTH) += auth.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 5c352d5d8ee6..5fc5ea196b40 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -845,11 +845,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
apple_nvme_handle_cq(&anv->adminq, true);
spin_unlock_irqrestore(&anv->lock, flags);
- blk_mq_tagset_busy_iter(&anv->tagset, nvme_cancel_request, &anv->ctrl);
- blk_mq_tagset_busy_iter(&anv->admin_tagset, nvme_cancel_request,
- &anv->ctrl);
- blk_mq_tagset_wait_completed_request(&anv->tagset);
- blk_mq_tagset_wait_completed_request(&anv->admin_tagset);
+ nvme_cancel_tagset(&anv->ctrl);
+ nvme_cancel_admin_tagset(&anv->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -1222,6 +1219,11 @@ static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
nvme_put_ctrl(&anv->ctrl);
}
+static void devm_apple_nvme_put_tag_set(void *data)
+{
+ blk_mq_free_tag_set(data);
+}
+
static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
{
int ret;
@@ -1238,8 +1240,7 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(anv->dev,
- (void (*)(void *))blk_mq_free_tag_set,
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
&anv->admin_tagset);
if (ret)
return ret;
@@ -1263,8 +1264,8 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
ret = blk_mq_alloc_tag_set(&anv->tagset);
if (ret)
return ret;
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))blk_mq_free_tag_set, &anv->tagset);
+ ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
+ &anv->tagset);
if (ret)
return ret;
@@ -1365,6 +1366,11 @@ static int apple_nvme_attach_genpd(struct apple_nvme *anv)
return 0;
}
+static void devm_apple_nvme_mempool_destroy(void *data)
+{
+ mempool_destroy(data);
+}
+
static int apple_nvme_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1462,8 +1468,8 @@ static int apple_nvme_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto put_dev;
}
- ret = devm_add_action_or_reset(
- anv->dev, (void (*)(void *))mempool_destroy, anv->iod_mempool);
+ ret = devm_add_action_or_reset(anv->dev,
+ devm_apple_nvme_mempool_destroy, anv->iod_mempool);
if (ret)
goto put_dev;
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
new file mode 100644
index 000000000000..c8a6db7c4498
--- /dev/null
+++ b/drivers/nvme/host/auth.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
+ */
+
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/prandom.h>
+#include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/dh.h>
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-auth.h>
+
+struct nvme_dhchap_queue_context {
+ struct list_head entry;
+ struct work_struct auth_work;
+ struct nvme_ctrl *ctrl;
+ struct crypto_shash *shash_tfm;
+ struct crypto_kpp *dh_tfm;
+ void *buf;
+ size_t buf_size;
+ int qid;
+ int error;
+ u32 s1;
+ u32 s2;
+ u16 transaction;
+ u8 status;
+ u8 hash_id;
+ size_t hash_len;
+ u8 dhgroup_id;
+ u8 c1[64];
+ u8 c2[64];
+ u8 response[64];
+ u8 *host_response;
+ u8 *ctrl_key;
+ int ctrl_key_len;
+ u8 *host_key;
+ int host_key_len;
+ u8 *sess_key;
+ int sess_key_len;
+};
+
+#define nvme_auth_flags_from_qid(qid) \
+ (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
+#define nvme_auth_queue_from_qid(ctrl, qid) \
+ (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
+
+static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
+ void *data, size_t data_len, bool auth_send)
+{
+ struct nvme_command cmd = {};
+ blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
+ struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ int ret;
+
+ cmd.auth_common.opcode = nvme_fabrics_command;
+ cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
+ cmd.auth_common.spsp0 = 0x01;
+ cmd.auth_common.spsp1 = 0x01;
+ if (auth_send) {
+ cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
+ cmd.auth_send.tl = cpu_to_le32(data_len);
+ } else {
+ cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
+ cmd.auth_receive.al = cpu_to_le32(data_len);
+ }
+
+ ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
+ qid == 0 ? NVME_QID_ANY : qid,
+ 0, flags);
+ if (ret > 0)
+ dev_warn(ctrl->device,
+ "qid %d auth_send failed with status %d\n", qid, ret);
+ else if (ret < 0)
+ dev_err(ctrl->device,
+ "qid %d auth_send failed with error %d\n", qid, ret);
+ return ret;
+}
+
+static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
+ struct nvmf_auth_dhchap_failure_data *data,
+ u16 transaction, u8 expected_msg)
+{
+ dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
+ __func__, qid, data->auth_type, data->auth_id);
+
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ return data->rescode_exp;
+ }
+ if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
+ data->auth_id != expected_msg) {
+ dev_warn(ctrl->device,
+ "qid %d invalid message %02x/%02x\n",
+ qid, data->auth_type, data->auth_id);
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ if (le16_to_cpu(data->t_id) != transaction) {
+ dev_warn(ctrl->device,
+ "qid %d invalid transaction ID %d\n",
+ qid, le16_to_cpu(data->t_id));
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ }
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
+ size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+ memset((u8 *)chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->sc_c = 0; /* No secure channel concatenation */
+ data->napd = 1;
+ data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
+ data->auth_protocol[0].dhchap.halen = 3;
+ data->auth_protocol[0].dhchap.dhlen = 6;
+ data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
+ data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
+ data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
+ data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
+ data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
+ data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
+ data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
+ data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
+ data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ size_t size = sizeof(*data) + data->hl + dhvlen;
+ const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
+ const char *hmac_name, *kpp_name;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ hmac_name = nvme_auth_hmac_name(data->hashid);
+ if (!hmac_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid HASH ID %d\n",
+ chap->qid, data->hashid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (chap->hash_id == data->hashid && chap->shash_tfm &&
+ !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
+ crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing hash %s\n",
+ chap->qid, hmac_name);
+ goto select_kpp;
+ }
+
+ /* Reset if hash cannot be reused */
+ if (chap->shash_tfm) {
+ crypto_free_shash(chap->shash_tfm);
+ chap->hash_id = 0;
+ chap->hash_len = 0;
+ }
+ chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
+ CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(chap->shash_tfm)) {
+ dev_warn(ctrl->device,
+ "qid %d: failed to allocate hash %s, error %ld\n",
+ chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %d\n",
+ chap->qid, data->hl);
+ crypto_free_shash(chap->shash_tfm);
+ chap->shash_tfm = NULL;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Reset host response if the hash had been changed */
+ if (chap->hash_id != data->hashid) {
+ kfree(chap->host_response);
+ chap->host_response = NULL;
+ }
+
+ chap->hash_id = data->hashid;
+ chap->hash_len = data->hl;
+ dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
+ chap->qid, hmac_name);
+
+select_kpp:
+ kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
+ if (!kpp_name) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH group id %d\n",
+ chap->qid, data->dhgid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ /* Leave previous dh_tfm intact */
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Clear host and controller key to avoid accidental reuse */
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+
+ if (chap->dhgroup_id == data->dhgid &&
+ (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
+ dev_dbg(ctrl->device,
+ "qid %d: reuse existing DH group %s\n",
+ chap->qid, gid_name);
+ goto skip_kpp;
+ }
+
+ /* Reset dh_tfm if it can't be reused */
+ if (chap->dh_tfm) {
+ crypto_free_kpp(chap->dh_tfm);
+ chap->dh_tfm = NULL;
+ }
+
+ if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
+ if (dhvlen == 0) {
+ dev_warn(ctrl->device,
+ "qid %d: empty DH value\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
+ if (IS_ERR(chap->dh_tfm)) {
+ int ret = PTR_ERR(chap->dh_tfm);
+
+ dev_warn(ctrl->device,
+ "qid %d: error %d initializing DH group %s\n",
+ chap->qid, ret, gid_name);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ chap->dh_tfm = NULL;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
+ chap->qid, gid_name);
+ } else if (dhvlen != 0) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid DH value for NULL DH\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+ chap->dhgroup_id = data->dhgid;
+
+skip_kpp:
+ chap->s1 = le32_to_cpu(data->seqnum);
+ memcpy(chap->c1, data->cval, chap->hash_len);
+ if (dhvlen) {
+ chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
+ if (!chap->ctrl_key) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+ chap->ctrl_key_len = dhvlen;
+ memcpy(chap->ctrl_key, data->cval + chap->hash_len,
+ dhvlen);
+ dev_dbg(ctrl->device, "ctrl public key %*ph\n",
+ (int)chap->ctrl_key_len, chap->ctrl_key);
+ }
+
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_reply_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ size += 2 * chap->hash_len;
+
+ if (chap->host_key_len)
+ size += chap->host_key_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return -EINVAL;
+ }
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->hl = chap->hash_len;
+ data->dhvlen = cpu_to_le16(chap->host_key_len);
+ memcpy(data->rval, chap->response, chap->hash_len);
+ if (ctrl->ctrl_key) {
+ get_random_bytes(chap->c2, chap->hash_len);
+ data->cvalid = 1;
+ chap->s2 = nvme_auth_get_seqnum();
+ memcpy(data->rval + chap->hash_len, chap->c2,
+ chap->hash_len);
+ dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, chap->c2);
+ } else {
+ memset(chap->c2, 0, chap->hash_len);
+ chap->s2 = 0;
+ }
+ data->seqnum = cpu_to_le32(chap->s2);
+ if (chap->host_key_len) {
+ dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
+ __func__, chap->qid,
+ chap->host_key_len, chap->host_key);
+ memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
+ chap->host_key_len);
+ }
+
+ return size;
+}
+
+static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success1_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ if (ctrl->ctrl_key)
+ size += chap->hash_len;
+
+ if (chap->buf_size < size) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ if (data->hl != chap->hash_len) {
+ dev_warn(ctrl->device,
+ "qid %d: invalid hash length %u\n",
+ chap->qid, data->hl);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ return NVME_SC_INVALID_FIELD;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: authenticated with hash %s dhgroup %s\n",
+ nvme_auth_hmac_name(chap->hash_id),
+ nvme_auth_dhgroup_name(chap->dhgroup_id));
+
+ if (!data->rvalid)
+ return 0;
+
+ /* Validate controller response */
+ if (memcmp(chap->response, data->rval, data->hl)) {
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, data->rval);
+ dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len,
+ chap->response);
+ dev_warn(ctrl->device,
+ "qid %d: controller authentication failed\n",
+ chap->qid);
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return NVME_SC_AUTH_REQUIRED;
+ }
+
+ /* Just print out information for the admin queue */
+ if (chap->qid == 0)
+ dev_info(ctrl->device,
+ "qid 0: controller authenticated\n");
+ return 0;
+}
+
+static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_success2_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ data->t_id = cpu_to_le16(chap->transaction);
+
+ return size;
+}
+
+static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ struct nvmf_auth_dhchap_failure_data *data = chap->buf;
+ size_t size = sizeof(*data);
+
+ memset(chap->buf, 0, size);
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ data->t_id = cpu_to_le16(chap->transaction);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = chap->status;
+
+ return size;
+}
+
+static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 buf[4], *challenge = chap->c1;
+ int ret;
+
+ dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s1, chap->transaction);
+
+ if (!chap->host_response) {
+ chap->host_response = nvme_auth_transform_key(ctrl->host_key,
+ ctrl->opts->host->nqn);
+ if (IS_ERR(chap->host_response)) {
+ ret = PTR_ERR(chap->host_response);
+ chap->host_response = NULL;
+ return ret;
+ }
+ } else {
+ dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
+ __func__, chap->qid);
+ }
+
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ chap->host_response, ctrl->host_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, sizeof(buf));
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c1)
+ kfree(challenge);
+ return ret;
+}
+
+static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
+ u8 *ctrl_response;
+ u8 buf[4], *challenge = chap->c2;
+ int ret;
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->opts->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ return ret;
+ }
+ ret = crypto_shash_setkey(chap->shash_tfm,
+ ctrl_response, ctrl->ctrl_key->len);
+ if (ret) {
+ dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
+ chap->qid, ret);
+ goto out;
+ }
+
+ if (chap->dh_tfm) {
+ challenge = kmalloc(chap->hash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = nvme_auth_augmented_challenge(chap->hash_id,
+ chap->sess_key,
+ chap->sess_key_len,
+ chap->c2, challenge,
+ chap->hash_len);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
+ __func__, chap->qid, chap->s2, chap->transaction);
+ dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
+ __func__, chap->qid, (int)chap->hash_len, challenge);
+ dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
+ __func__, chap->qid, ctrl->opts->subsysnqn);
+ dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
+ __func__, chap->qid, ctrl->opts->host->nqn);
+ shash->tfm = chap->shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, chap->hash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(chap->s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(chap->transaction, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
+ strlen(ctrl->opts->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
+ strlen(ctrl->opts->host->nqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, chap->response);
+out:
+ if (challenge != chap->c2)
+ kfree(challenge);
+ kfree(ctrl_response);
+ return ret;
+}
+
+static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ int ret;
+
+ if (chap->host_key && chap->host_key_len) {
+ dev_dbg(ctrl->device,
+ "qid %d: reusing host key\n", chap->qid);
+ goto gen_sesskey;
+ }
+ ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
+ if (ret < 0) {
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+ chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
+
+ chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
+ if (!chap->host_key) {
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(chap->dh_tfm,
+ chap->host_key, chap->host_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate public key, error %d\n", ret);
+ kfree(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+
+gen_sesskey:
+ chap->sess_key_len = chap->host_key_len;
+ chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
+ if (!chap->sess_key) {
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ return -ENOMEM;
+ }
+
+ ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
+ chap->ctrl_key, chap->ctrl_key_len,
+ chap->sess_key, chap->sess_key_len);
+ if (ret) {
+ dev_dbg(ctrl->device,
+ "failed to generate shared secret, error %d\n", ret);
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ return ret;
+ }
+ dev_dbg(ctrl->device, "shared secret %*ph\n",
+ (int)chap->sess_key_len, chap->sess_key);
+ return 0;
+}
+
+static void __nvme_auth_reset(struct nvme_dhchap_queue_context *chap)
+{
+ kfree_sensitive(chap->host_response);
+ chap->host_response = NULL;
+ kfree_sensitive(chap->host_key);
+ chap->host_key = NULL;
+ chap->host_key_len = 0;
+ kfree_sensitive(chap->ctrl_key);
+ chap->ctrl_key = NULL;
+ chap->ctrl_key_len = 0;
+ kfree_sensitive(chap->sess_key);
+ chap->sess_key = NULL;
+ chap->sess_key_len = 0;
+ chap->status = 0;
+ chap->error = 0;
+ chap->s1 = 0;
+ chap->s2 = 0;
+ chap->transaction = 0;
+ memset(chap->c1, 0, sizeof(chap->c1));
+ memset(chap->c2, 0, sizeof(chap->c2));
+}
+
+static void __nvme_auth_free(struct nvme_dhchap_queue_context *chap)
+{
+ __nvme_auth_reset(chap);
+ if (chap->shash_tfm)
+ crypto_free_shash(chap->shash_tfm);
+ if (chap->dh_tfm)
+ crypto_free_kpp(chap->dh_tfm);
+ kfree_sensitive(chap->ctrl_key);
+ kfree_sensitive(chap->host_key);
+ kfree_sensitive(chap->sess_key);
+ kfree_sensitive(chap->host_response);
+ kfree(chap->buf);
+ kfree(chap);
+}
+
+static void __nvme_auth_work(struct work_struct *work)
+{
+ struct nvme_dhchap_queue_context *chap =
+ container_of(work, struct nvme_dhchap_queue_context, auth_work);
+ struct nvme_ctrl *ctrl = chap->ctrl;
+ size_t tl;
+ int ret = 0;
+
+ chap->transaction = ctrl->transaction++;
+
+ /* DH-HMAC-CHAP Step 1: send negotiate */
+ dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ return;
+ }
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ return;
+ }
+
+ /* DH-HMAC-CHAP Step 2: receive challenge */
+ dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive challenge, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
+ if (ret) {
+ /* Invalid challenge parameters */
+ chap->error = ret;
+ goto fail2;
+ }
+
+ if (chap->ctrl_key_len) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d DH exponential\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_exponential(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ dev_dbg(ctrl->device, "%s: qid %d host response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 3: send reply */
+ dev_dbg(ctrl->device, "%s: qid %d send reply\n",
+ __func__, chap->qid);
+ ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
+ if (ret < 0) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ tl = ret;
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+
+ /* DH-HMAC-CHAP Step 4: receive success1 */
+ dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
+ __func__, chap->qid);
+
+ memset(chap->buf, 0, chap->buf_size);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, chap->buf_size, false);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d failed to receive success1, %s %d\n",
+ chap->qid, ret < 0 ? "error" : "nvme status", ret);
+ chap->error = ret;
+ return;
+ }
+ ret = nvme_auth_receive_validate(ctrl, chap->qid,
+ chap->buf, chap->transaction,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
+ if (ret) {
+ chap->status = ret;
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ return;
+ }
+
+ if (ctrl->ctrl_key) {
+ dev_dbg(ctrl->device,
+ "%s: qid %d controller response\n",
+ __func__, chap->qid);
+ ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
+ if (ret) {
+ chap->error = ret;
+ goto fail2;
+ }
+ }
+
+ ret = nvme_auth_process_dhchap_success1(ctrl, chap);
+ if (ret) {
+ /* Controller authentication failed */
+ chap->error = NVME_SC_AUTH_REQUIRED;
+ goto fail2;
+ }
+
+ if (ctrl->ctrl_key) {
+ /* DH-HMAC-CHAP Step 5: send success2 */
+ dev_dbg(ctrl->device, "%s: qid %d send success2\n",
+ __func__, chap->qid);
+ tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ if (ret)
+ chap->error = ret;
+ }
+ if (!ret) {
+ chap->error = 0;
+ return;
+ }
+
+fail2:
+ dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
+ __func__, chap->qid, chap->status);
+ tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
+ ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
+ /*
+ * only update error if send failure2 failed and no other
+ * error had been set during authentication.
+ */
+ if (ret && !chap->error)
+ chap->error = ret;
+}
+
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ if (!ctrl->host_key) {
+ dev_warn(ctrl->device, "qid %d: no key\n", qid);
+ return -ENOKEY;
+ }
+
+ if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
+ dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
+ return -ENOKEY;
+ }
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ /* Check if the context is already queued */
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ WARN_ON(!chap->buf);
+ if (chap->qid == qid) {
+ dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+ }
+ }
+ chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+ if (!chap) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENOMEM;
+ }
+ chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
+ chap->ctrl = ctrl;
+
+ /*
+ * Allocate a large enough buffer for the entire negotiation:
+ * 4k should be enough to ffdhe8192.
+ */
+ chap->buf_size = 4096;
+ chap->buf = kzalloc(chap->buf_size, GFP_KERNEL);
+ if (!chap->buf) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ kfree(chap);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&chap->auth_work, __nvme_auth_work);
+ list_add(&chap->entry, &ctrl->dhchap_auth_list);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ queue_work(nvme_wq, &chap->auth_work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
+
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ struct nvme_dhchap_queue_context *chap;
+ int ret;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ if (chap->qid != qid)
+ continue;
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ ret = chap->error;
+ return ret;
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_wait);
+
+void nvme_auth_reset(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ flush_work(&chap->auth_work);
+ __nvme_auth_reset(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_reset);
+
+static void nvme_dhchap_auth_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, dhchap_auth_work);
+ int ret, q;
+
+ /* Authenticate admin queue first */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: error %d setting up authentication\n", ret);
+ return;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ return;
+ }
+
+ for (q = 1; q < ctrl->queue_count; q++) {
+ ret = nvme_auth_negotiate(ctrl, q);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: error %d setting up authentication\n",
+ q, ret);
+ break;
+ }
+ }
+
+ /*
+ * Failure is a soft-state; credentials remain valid until
+ * the controller terminates the connection.
+ */
+}
+
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
+{
+ INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+ INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
+ mutex_init(&ctrl->dhchap_auth_mutex);
+ if (!ctrl->opts)
+ return;
+ nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
+ nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, &ctrl->ctrl_key);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
+
+void nvme_auth_stop(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ cancel_work_sync(&ctrl->dhchap_auth_work);
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+ cancel_work_sync(&chap->auth_work);
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+}
+EXPORT_SYMBOL_GPL(nvme_auth_stop);
+
+void nvme_auth_free(struct nvme_ctrl *ctrl)
+{
+ struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+
+ mutex_lock(&ctrl->dhchap_auth_mutex);
+ list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
+ list_del_init(&chap->entry);
+ flush_work(&chap->auth_work);
+ __nvme_auth_free(chap);
+ }
+ mutex_unlock(&ctrl->dhchap_auth_mutex);
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_auth_free);
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 4910543f00ff..e958d5015585 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -6,7 +6,6 @@
#include "nvme.h"
-#ifdef CONFIG_NVME_VERBOSE_ERRORS
static const char * const nvme_ops[] = {
[nvme_cmd_flush] = "Flush",
[nvme_cmd_write] = "Write",
@@ -178,6 +177,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
return nvme_ops[opcode];
return "Unknown";
}
+EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
@@ -185,4 +185,3 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
return nvme_admin_ops[opcode];
return "Unknown";
}
-#endif /* CONFIG_NVME_VERBOSE_ERRORS */
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2533b88e66d5..2429b11eb9a8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -24,12 +24,22 @@
#include "nvme.h"
#include "fabrics.h"
+#include <linux/nvme-auth.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#define NVME_MINORS (1U << MINORBITS)
+struct nvme_ns_info {
+ struct nvme_ns_ids ids;
+ u32 nsid;
+ __le32 anagrpid;
+ bool is_shared;
+ bool is_readonly;
+ bool is_ready;
+};
+
unsigned int admin_timeout = 60;
module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
@@ -330,6 +340,7 @@ enum nvme_disposition {
COMPLETE,
RETRY,
FAILOVER,
+ AUTHENTICATE,
};
static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
@@ -337,6 +348,9 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
if (likely(nvme_req(req)->status == 0))
return COMPLETE;
+ if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED)
+ return AUTHENTICATE;
+
if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) ||
nvme_req(req)->retries >= nvme_max_retries)
@@ -375,11 +389,13 @@ static inline void nvme_end_req(struct request *req)
void nvme_complete_rq(struct request *req)
{
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
+
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- if (nvme_req(req)->ctrl->kas)
- nvme_req(req)->ctrl->comp_seen = true;
+ if (ctrl->kas)
+ ctrl->comp_seen = true;
switch (nvme_decide_disposition(req)) {
case COMPLETE:
@@ -391,6 +407,14 @@ void nvme_complete_rq(struct request *req)
case FAILOVER:
nvme_failover_req(req);
return;
+ case AUTHENTICATE:
+#ifdef CONFIG_NVME_AUTH
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+ nvme_retry_req(req);
+#else
+ nvme_end_req(req);
+#endif
+ return;
}
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -702,7 +726,9 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
- req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
+ (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
+ req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
return true;
break;
default:
@@ -990,8 +1016,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags)
+ int qid, int at_head, blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1000,15 +1025,12 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
- qid ? qid - 1 : 0);
+ qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
- if (timeout)
- req->timeout = timeout;
-
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
if (ret)
@@ -1028,7 +1050,7 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1329,8 +1351,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
}
}
-static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
struct nvme_command c = { };
bool csi_seen = false;
@@ -1343,7 +1365,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
return 0;
c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.nsid = cpu_to_le32(info->nsid);
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
@@ -1355,7 +1377,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (status) {
dev_warn(ctrl->device,
"Identify Descriptors failed (nsid=%u, status=0x%x)\n",
- nsid, status);
+ info->nsid, status);
goto free_data;
}
@@ -1365,7 +1387,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (cur->nidl == 0)
break;
- len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen);
+ len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
if (len < 0)
break;
@@ -1374,7 +1396,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
if (nvme_multi_css(ctrl) && !csi_seen) {
dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
- nsid);
+ info->nsid);
status = -EINVAL;
}
@@ -1384,7 +1406,7 @@ free_data:
}
static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids, struct nvme_id_ns **id)
+ struct nvme_id_ns **id)
{
struct nvme_command c = { };
int error;
@@ -1407,51 +1429,66 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
error = NVME_SC_INVALID_NS | NVME_SC_DNR;
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
+ return 0;
+out_free_id:
+ kfree(*id);
+ return error;
+}
+static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
+{
+ struct nvme_ns_ids *ids = &info->ids;
+ struct nvme_id_ns *id;
+ int ret;
+
+ ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = true;
if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
dev_info(ctrl->device,
"Ignoring bogus Namespace Identifiers\n");
} else {
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0) &&
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
-
+ kfree(id);
return 0;
-
-out_free_id:
- kfree(*id);
- return error;
}
-static int nvme_identify_ns_cs_indep(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_id_ns_cs_indep **id)
+static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
+ struct nvme_ns_info *info)
{
+ struct nvme_id_ns_cs_indep *id;
struct nvme_command c = {
.identify.opcode = nvme_admin_identify,
- .identify.nsid = cpu_to_le32(nsid),
+ .identify.nsid = cpu_to_le32(info->nsid),
.identify.cns = NVME_ID_CNS_NS_CS_INDEP,
};
int ret;
- *id = kmalloc(sizeof(**id), GFP_KERNEL);
- if (!*id)
+ id = kmalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
return -ENOMEM;
- ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
- if (ret) {
- dev_warn(ctrl->device,
- "Identify namespace (CS independent) failed (%d)\n",
- ret);
- kfree(*id);
- return ret;
+ ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
+ if (!ret) {
+ info->anagrpid = id->anagrpid;
+ info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+ info->is_ready = id->nstat & NVME_NSTAT_NRDY;
}
-
- return 0;
+ kfree(id);
+ return ret;
}
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
@@ -1466,7 +1503,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1875,6 +1912,11 @@ static void nvme_update_disk_info(struct gendisk *disk,
ns->ctrl->max_zeroes_sectors);
}
+static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
+}
+
static inline bool nvme_first_scan(struct gendisk *disk)
{
/* nvme_alloc_ns() scans the disk prior to adding it */
@@ -1912,12 +1954,44 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
blk_queue_chunk_sectors(ns->queue, iob);
}
-static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
+static int nvme_update_ns_info_generic(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
{
- unsigned lbaf = nvme_lbaf_index(id->flbas);
+ blk_mq_freeze_queue(ns->disk->queue);
+ nvme_set_queue_limits(ns->ctrl, ns->queue);
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_mq_unfreeze_queue(ns->disk->queue);
+
+ if (nvme_ns_head_multipath(ns->head)) {
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+ blk_stack_limits(&ns->head->disk->queue->limits,
+ &ns->queue->limits, 0);
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ /* Hide the block-interface for these devices */
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+
+ return 0;
+}
+
+static int nvme_update_ns_info_block(struct nvme_ns *ns,
+ struct nvme_ns_info *info)
+{
+ struct nvme_id_ns *id;
+ unsigned lbaf;
int ret;
+ ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
+ if (ret)
+ return ret;
+
blk_mq_freeze_queue(ns->disk->queue);
+ lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
@@ -1927,36 +2001,35 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf);
- if (ret)
- goto out_unfreeze;
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
}
- set_disk_ro(ns->disk, (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- return ret;
+ goto out;
}
if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id);
- set_disk_ro(ns->head->disk,
- (id->nsattr & NVME_NS_ATTR_RO) ||
- test_bit(NVME_NS_FORCE_RO, &ns->flags));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits,
&ns->queue->limits, 0);
disk_update_readahead(ns->head->disk);
blk_mq_unfreeze_queue(ns->head->disk->queue);
}
- return 0;
-out_unfreeze:
+ ret = 0;
+out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
@@ -1966,10 +2039,31 @@ out_unfreeze:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
- blk_mq_unfreeze_queue(ns->disk->queue);
+ kfree(id);
return ret;
}
+static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
+{
+ switch (info->ids.csi) {
+ case NVME_CSI_ZNS:
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
+ info->nsid);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+ return nvme_update_ns_info_block(ns, info);
+ case NVME_CSI_NVM:
+ return nvme_update_ns_info_block(ns, info);
+ default:
+ dev_info(ns->ctrl->device,
+ "block device for nsid %u not supported (csi %u)\n",
+ info->nsid, info->ids.csi);
+ return nvme_update_ns_info_generic(ns, info);
+ }
+}
+
static char nvme_pr_type(enum pr_type type)
{
switch (type) {
@@ -2103,7 +2197,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
cmd.common.cdw11 = cpu_to_le32(len);
- return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
+ return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
@@ -2123,6 +2217,7 @@ static int nvme_report_zones(struct gendisk *disk, sector_t sector,
static const struct block_device_operations nvme_bdev_ops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -3613,6 +3708,108 @@ static ssize_t dctype_show(struct device *dev,
}
static DEVICE_ATTR_RO(dctype);
+#ifdef CONFIG_NVME_AUTH
+static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->host_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+
+ if (!opts->dhchap_ctrl_secret)
+ return sysfs_emit(buf, "none\n");
+ return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
+}
+
+static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ char *dhchap_secret;
+
+ if (!ctrl->opts->dhchap_ctrl_secret)
+ return -EINVAL;
+ if (count < 7)
+ return -EINVAL;
+ if (memcmp(buf, "DHHC-1:", 7))
+ return -EINVAL;
+
+ dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ memcpy(dhchap_secret, buf, count);
+ nvme_auth_stop(ctrl);
+ if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
+ int ret;
+
+ ret = nvme_auth_generate_key(dhchap_secret, &ctrl->ctrl_key);
+ if (ret)
+ return ret;
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = dhchap_secret;
+ /* Key has changed; re-authentication with new key */
+ nvme_auth_reset(ctrl);
+ }
+ /* Start re-authentication */
+ dev_info(ctrl->device, "re-authenticating controller\n");
+ queue_work(nvme_wq, &ctrl->dhchap_auth_work);
+
+ return count;
+}
+static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
+ nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
+#endif
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -3636,6 +3833,10 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_kato.attr,
&dev_attr_cntrltype.attr,
&dev_attr_dctype.attr,
+#ifdef CONFIG_NVME_AUTH
+ &dev_attr_dhchap_secret.attr,
+ &dev_attr_dhchap_ctrl_secret.attr,
+#endif
NULL
};
@@ -3659,6 +3860,12 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
return 0;
+#ifdef CONFIG_NVME_AUTH
+ if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
+ return 0;
+#endif
return a->mode;
}
@@ -3786,7 +3993,7 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
}
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
- unsigned nsid, struct nvme_ns_ids *ids, bool is_shared)
+ struct nvme_ns_info *info)
{
struct nvme_ns_head *head;
size_t size = sizeof(*head);
@@ -3808,9 +4015,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
if (ret)
goto out_ida_remove;
head->subsys = ctrl->subsys;
- head->ns_id = nsid;
- head->ids = *ids;
- head->shared = is_shared;
+ head->ns_id = info->nsid;
+ head->ids = info->ids;
+ head->shared = info->is_shared;
kref_init(&head->ref);
if (head->ids.csi) {
@@ -3867,54 +4074,54 @@ static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
return ret;
}
-static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
- struct nvme_ns_ids *ids, bool is_shared)
+static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_ns_head *head = NULL;
int ret;
- ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
- "globally duplicate IDs for nsid %d\n", nsid);
+ "globally duplicate IDs for nsid %d\n", info->nsid);
nvme_print_device_info(ctrl);
return ret;
}
mutex_lock(&ctrl->subsys->lock);
- head = nvme_find_ns_head(ctrl, nsid);
+ head = nvme_find_ns_head(ctrl, info->nsid);
if (!head) {
- ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, ids);
+ ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
"duplicate IDs in subsystem for nsid %d\n",
- nsid);
+ info->nsid);
goto out_unlock;
}
- head = nvme_alloc_ns_head(ctrl, nsid, ids, is_shared);
+ head = nvme_alloc_ns_head(ctrl, info);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out_unlock;
}
} else {
ret = -EINVAL;
- if (!is_shared || !head->shared) {
+ if (!info->is_shared || !head->shared) {
dev_err(ctrl->device,
- "Duplicate unshared namespace %d\n", nsid);
+ "Duplicate unshared namespace %d\n",
+ info->nsid);
goto out_put_ns_head;
}
- if (!nvme_ns_ids_equal(&head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
dev_err(ctrl->device,
"IDs don't match for shared namespace %d\n",
- nsid);
+ info->nsid);
goto out_put_ns_head;
}
if (!multipath && !list_empty(&head->list)) {
dev_warn(ctrl->device,
"Found shared namespace %d, but multipathing not supported.\n",
- nsid);
+ info->nsid);
dev_warn_once(ctrl->device,
"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
}
@@ -3968,20 +4175,15 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
list_add(&ns->list, &ns->ctrl->namespaces);
}
-static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
- struct nvme_ns_ids *ids)
+static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
{
struct nvme_ns *ns;
struct gendisk *disk;
- struct nvme_id_ns *id;
int node = ctrl->numa_node;
- if (nvme_identify_ns(ctrl, nsid, ids, &id))
- return;
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
- goto out_free_id;
+ return;
disk = blk_mq_alloc_disk(ctrl->tagset, ns);
if (IS_ERR(disk))
@@ -4002,7 +4204,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->ctrl = ctrl;
kref_init(&ns->kref);
- if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
+ if (nvme_init_ns_head(ns, info))
goto out_cleanup_disk;
/*
@@ -4028,7 +4230,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
ns->head->instance);
}
- if (nvme_update_ns_info(ns, id))
+ if (nvme_update_ns_info(ns, info))
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
@@ -4042,9 +4244,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (!nvme_ns_head_multipath(ns->head))
nvme_add_ns_cdev(ns);
- nvme_mpath_add_disk(ns, id);
+ nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
- kfree(id);
return;
@@ -4064,8 +4265,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
put_disk(disk);
out_free_ns:
kfree(ns);
- out_free_id:
- kfree(id);
}
static void nvme_ns_remove(struct nvme_ns *ns)
@@ -4123,29 +4322,21 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
}
}
-static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
+static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{
- struct nvme_id_ns *id;
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
if (test_bit(NVME_NS_DEAD, &ns->flags))
goto out;
- ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id);
- if (ret)
- goto out;
-
ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
- if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
+ if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device,
"identifiers changed for nsid %d\n", ns->head->ns_id);
- goto out_free_id;
+ goto out;
}
- ret = nvme_update_ns_info(ns, id);
-
-out_free_id:
- kfree(id);
+ ret = nvme_update_ns_info(ns, info);
out:
/*
* Only remove the namespace if we got a fatal error back from the
@@ -4157,59 +4348,47 @@ out:
nvme_ns_remove(ns);
}
-static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns_ids ids = { };
- struct nvme_id_ns_cs_indep *id;
+ struct nvme_ns_info info = { .nsid = nsid };
struct nvme_ns *ns;
- bool ready = true;
- if (nvme_identify_ns_descs(ctrl, nsid, &ids))
+ if (nvme_identify_ns_descs(ctrl, &info))
return;
+ if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
+ dev_warn(ctrl->device,
+ "command set not reported for nsid: %d\n", nsid);
+ return;
+ }
+
/*
- * Check if the namespace is ready. If not ignore it, we will get an
- * AEN once it becomes ready and restart the scan.
+ * If available try to use the Command Set Idependent Identify Namespace
+ * data structure to find all the generic information that is needed to
+ * set up a namespace. If not fall back to the legacy version.
*/
- if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) &&
- !nvme_identify_ns_cs_indep(ctrl, nsid, &id)) {
- ready = id->nstat & NVME_NSTAT_NRDY;
- kfree(id);
+ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+ (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+ if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+ return;
+ } else {
+ if (nvme_ns_info_from_identify(ctrl, &info))
+ return;
}
- if (!ready)
+ /*
+ * Ignore the namespace if it is not ready. We will get an AEN once it
+ * becomes ready and restart the scan.
+ */
+ if (!info.is_ready)
return;
ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
- nvme_validate_ns(ns, &ids);
+ nvme_validate_ns(ns, &info);
nvme_put_ns(ns);
- return;
- }
-
- switch (ids.csi) {
- case NVME_CSI_NVM:
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- case NVME_CSI_ZNS:
- if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
- dev_warn(ctrl->device,
- "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
- nsid);
- break;
- }
- if (!nvme_multi_css(ctrl)) {
- dev_warn(ctrl->device,
- "command set not reported for nsid: %d\n",
- nsid);
- break;
- }
- nvme_alloc_ns(ctrl, nsid, &ids);
- break;
- default:
- dev_warn(ctrl->device, "unknown csi %u for nsid %u\n",
- ids.csi, nsid);
- break;
+ } else {
+ nvme_alloc_ns(ctrl, &info);
}
}
@@ -4265,7 +4444,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
if (!nsid) /* end of the list? */
goto out;
- nvme_validate_or_alloc_ns(ctrl, nsid);
+ nvme_scan_ns(ctrl, nsid);
while (++prev < nsid)
nvme_ns_remove_by_nsid(ctrl, prev);
}
@@ -4288,7 +4467,7 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
kfree(id);
for (i = 1; i <= nn; i++)
- nvme_validate_or_alloc_ns(ctrl, i);
+ nvme_scan_ns(ctrl, i);
nvme_remove_invalid_namespaces(ctrl, nn);
}
@@ -4525,9 +4704,19 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_get_fw_slot_info(ctrl);
}
+static u32 nvme_aer_type(u32 result)
+{
+ return result & 0x7;
+}
+
+static u32 nvme_aer_subtype(u32 result)
+{
+ return (result & 0xff00) >> 8;
+}
+
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- u32 aer_notice_type = (result & 0xff00) >> 8;
+ u32 aer_notice_type = nvme_aer_subtype(result);
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4542,8 +4731,10 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
* recovery actions from interfering with the controller's
* firmware activation.
*/
- if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
+ nvme_auth_stop(ctrl);
queue_work(nvme_wq, &ctrl->fw_act_work);
+ }
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
@@ -4560,11 +4751,19 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
}
}
+static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+{
+ trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ dev_warn(ctrl->device, "resetting controller due to AER\n");
+ nvme_reset_ctrl(ctrl);
+}
+
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- u32 aer_type = result & 0x07;
+ u32 aer_type = nvme_aer_type(result);
+ u32 aer_subtype = nvme_aer_subtype(result);
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
@@ -4574,6 +4773,15 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
+ /*
+ * For a persistent internal error, don't run async_event_work
+ * to submit a new AER. The controller reset will do it.
+ */
+ if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
+ nvme_handle_aer_persistent_error(ctrl);
+ return;
+ }
+ fallthrough;
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
@@ -4590,6 +4798,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
nvme_stop_failfast_work(ctrl);
flush_work(&ctrl->async_event_work);
@@ -4649,6 +4858,8 @@ static void nvme_free_ctrl(struct device *dev)
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
+ nvme_auth_stop(ctrl);
+ nvme_auth_free(ctrl);
__free_page(ctrl->discard_page);
if (subsys) {
@@ -4739,6 +4950,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
nvme_mpath_init_ctrl(ctrl);
+ nvme_auth_init_ctrl(ctrl);
return 0;
out_free_name:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..5207a2348257 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -152,7 +152,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -198,7 +198,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0);
if (ret >= 0)
@@ -243,7 +243,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.offset = cpu_to_le32(off);
cmd.prop_set.value = cpu_to_le64(val);
- ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
+ ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
@@ -331,6 +331,10 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
dev_err(ctrl->device,
"Connect command failed: host path error\n");
break;
+ case NVME_SC_AUTH_REQUIRED:
+ dev_err(ctrl->device,
+ "Connect command failed: authentication required\n");
+ break;
default:
dev_err(ctrl->device,
"Connect command failed, error wo/DNR bit: %d\n",
@@ -365,6 +369,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
union nvme_result res;
struct nvmf_connect_data *data;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -389,7 +394,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ data, sizeof(*data), NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
@@ -397,8 +402,25 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
goto out_free_data;
}
- ctrl->cntlid = le16_to_cpu(res.u16);
-
+ result = le32_to_cpu(res.u32);
+ ctrl->cntlid = result & 0xFFFF;
+ if ((result >> 16) & 0x3) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, 0);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid 0: authentication setup failed\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
+ ret = nvme_auth_wait(ctrl, 0);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid 0: authentication failed\n");
+ else
+ dev_info(ctrl->device,
+ "qid 0: authenticated\n");
+ }
out_free_data:
kfree(data);
return ret;
@@ -431,6 +453,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
struct nvmf_connect_data *data;
union nvme_result res;
int ret;
+ u32 result;
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
@@ -450,12 +473,27 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), 0, qid, 1,
+ data, sizeof(*data), qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
}
+ result = le32_to_cpu(res.u32);
+ if ((result >> 16) & 2) {
+ /* Authentication required */
+ ret = nvme_auth_negotiate(ctrl, qid);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "qid %d: authentication setup failed\n", qid);
+ ret = NVME_SC_AUTH_REQUIRED;
+ } else {
+ ret = nvme_auth_wait(ctrl, qid);
+ if (ret)
+ dev_warn(ctrl->device,
+ "qid %u: authentication failed\n", qid);
+ }
+ }
kfree(data);
return ret;
}
@@ -548,6 +586,8 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+ { NVMF_OPT_DHCHAP_SECRET, "dhchap_secret=%s" },
+ { NVMF_OPT_DHCHAP_CTRL_SECRET, "dhchap_ctrl_secret=%s" },
{ NVMF_OPT_ERR, NULL }
};
@@ -829,6 +869,34 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
+ case NVMF_OPT_DHCHAP_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_secret);
+ opts->dhchap_secret = p;
+ break;
+ case NVMF_OPT_DHCHAP_CTRL_SECRET:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) < 11 || strncmp(p, "DHHC-1:", 7)) {
+ pr_err("Invalid DH-CHAP secret %s\n", p);
+ ret = -EINVAL;
+ goto out;
+ }
+ kfree(opts->dhchap_ctrl_secret);
+ opts->dhchap_ctrl_secret = p;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -947,6 +1015,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts)
kfree(opts->subsysnqn);
kfree(opts->host_traddr);
kfree(opts->host_iface);
+ kfree(opts->dhchap_secret);
+ kfree(opts->dhchap_ctrl_secret);
kfree(opts);
}
EXPORT_SYMBOL_GPL(nvmf_free_options);
@@ -956,7 +1026,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
- NVMF_OPT_FAIL_FAST_TMO)
+ NVMF_OPT_FAIL_FAST_TMO | NVMF_OPT_DHCHAP_SECRET |\
+ NVMF_OPT_DHCHAP_CTRL_SECRET)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf)
@@ -1192,7 +1263,14 @@ static void __exit nvmf_exit(void)
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_send_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_receive_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_negotiate_data) != 8);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_challenge_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_reply_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success1_data) != 16);
+ BUILD_BUG_ON(sizeof(struct nvmf_auth_dhchap_success2_data) != 16);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..a6e22116e139 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,8 @@ enum {
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22,
+ NVMF_OPT_DHCHAP_SECRET = 1 << 23,
+ NVMF_OPT_DHCHAP_CTRL_SECRET = 1 << 24,
};
/**
@@ -97,6 +99,9 @@ enum {
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
+ * @dhchap_secret: DH-HMAC-CHAP secret
+ * @dhchap_ctrl_secret: DH-HMAC-CHAP controller secret for bi-directional
+ * authentication
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
@@ -121,6 +126,8 @@ struct nvmf_ctrl_options {
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
+ char *dhchap_secret;
+ char *dhchap_ctrl_secret;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f26640ccb955..6ef497c75a16 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -346,7 +346,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
* different queue via blk_steal_bios(), so we need to use the bio_split
* pool from the original queue to allocate the bvecs from.
*/
- blk_queue_split(&bio);
+ bio = bio_split_to_limits(bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
@@ -408,6 +408,7 @@ const struct block_device_operations nvme_ns_head_ops = {
.open = nvme_ns_head_open,
.release = nvme_ns_head_release,
.ioctl = nvme_ns_head_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
.getgeo = nvme_getgeo,
.report_zones = nvme_ns_head_report_zones,
.pr_ops = &nvme_pr_ops,
@@ -800,16 +801,16 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
return -ENXIO; /* just break out of the loop */
}
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
struct nvme_ana_group_desc desc = {
- .grpid = id->anagrpid,
+ .grpid = anagrpid,
.state = 0,
};
mutex_lock(&ns->ctrl->ana_lock);
- ns->ana_grpid = le32_to_cpu(id->anagrpid);
+ ns->ana_grpid = le32_to_cpu(anagrpid);
nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
if (desc.state) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e0a925bf3be..bdc0ff7ed9ab 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -140,7 +140,7 @@ enum nvme_quirks {
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
/*
- * The controller requires the command_id value be be limited, so skip
+ * The controller requires the command_id value be limited, so skip
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
@@ -328,6 +328,15 @@ struct nvme_ctrl {
struct work_struct ana_work;
#endif
+#ifdef CONFIG_NVME_AUTH
+ struct work_struct dhchap_auth_work;
+ struct list_head dhchap_auth_list;
+ struct mutex dhchap_auth_mutex;
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u16 transaction;
+#endif
+
/* Power saving configuration */
u64 ps_max_latency_us;
bool apst_enabled;
@@ -781,7 +790,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head,
+ int qid, int at_head,
blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
@@ -837,7 +846,7 @@ void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
-void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
@@ -879,8 +888,7 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
{
return 0;
}
-static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
- struct nvme_id_ns *id)
+static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
}
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -992,6 +1000,27 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
return ctrl->sgls & ((1 << 0) | (1 << 1));
}
+#ifdef CONFIG_NVME_AUTH
+void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
+void nvme_auth_stop(struct nvme_ctrl *ctrl);
+int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
+int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
+void nvme_auth_reset(struct nvme_ctrl *ctrl);
+void nvme_auth_free(struct nvme_ctrl *ctrl);
+#else
+static inline void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
+static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
+{
+ return -EPROTONOSUPPORT;
+}
+static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
+{
+ return NVME_SC_AUTH_REQUIRED;
+}
+static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+#endif
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
int nvme_execute_passthru_rq(struct request *rq);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 7e7d4802ac6b..71a4f26ba476 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -670,7 +670,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->first_dma = dma_addr;
iod->npages = -1;
return BLK_STS_RESOURCE;
}
@@ -1435,8 +1434,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device,
- "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
+ "I/O %d (%s) QID %d timeout, aborting\n",
+ req->tag,
+ nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
+ nvmeq->qid);
abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
BLK_MQ_REQ_NOWAIT);
@@ -1765,37 +1766,35 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
}
}
-static int nvme_alloc_admin_tags(struct nvme_dev *dev)
+static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev)
{
- if (!dev->ctrl.admin_q) {
- dev->admin_tagset.ops = &nvme_mq_admin_ops;
- dev->admin_tagset.nr_hw_queues = 1;
+ struct blk_mq_tag_set *set = &dev->admin_tagset;
- dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev->ctrl.numa_node;
- dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
- dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
- dev->admin_tagset.driver_data = dev;
+ set->ops = &nvme_mq_admin_ops;
+ set->nr_hw_queues = 1;
- if (blk_mq_alloc_tag_set(&dev->admin_tagset))
- return -ENOMEM;
- dev->ctrl.admin_tagset = &dev->admin_tagset;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_NO_SCHED;
+ set->driver_data = dev;
- dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (IS_ERR(dev->ctrl.admin_q)) {
- blk_mq_free_tag_set(&dev->admin_tagset);
- dev->ctrl.admin_q = NULL;
- return -ENOMEM;
- }
- if (!blk_get_queue(dev->ctrl.admin_q)) {
- nvme_dev_remove_admin(dev);
- dev->ctrl.admin_q = NULL;
- return -ENODEV;
- }
- } else
- nvme_start_admin_queue(&dev->ctrl);
+ if (blk_mq_alloc_tag_set(set))
+ return -ENOMEM;
+ dev->ctrl.admin_tagset = set;
+ dev->ctrl.admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(dev->ctrl.admin_q)) {
+ blk_mq_free_tag_set(set);
+ dev->ctrl.admin_q = NULL;
+ return -ENOMEM;
+ }
+ if (!blk_get_queue(dev->ctrl.admin_q)) {
+ nvme_dev_remove_admin(dev);
+ dev->ctrl.admin_q = NULL;
+ return -ENODEV;
+ }
return 0;
}
@@ -2534,47 +2533,45 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
return true;
}
-static void nvme_dev_add(struct nvme_dev *dev)
+static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
{
+ struct blk_mq_tag_set * set = &dev->tagset;
int ret;
- if (!dev->ctrl.tagset) {
- dev->tagset.ops = &nvme_mq_ops;
- dev->tagset.nr_hw_queues = dev->online_queues - 1;
- dev->tagset.nr_maps = 2; /* default + read */
- if (dev->io_queues[HCTX_TYPE_POLL])
- dev->tagset.nr_maps++;
- dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev->ctrl.numa_node;
- dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
- BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = sizeof(struct nvme_iod);
- dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
- dev->tagset.driver_data = dev;
+ set->ops = &nvme_mq_ops;
+ set->nr_hw_queues = dev->online_queues - 1;
+ set->nr_maps = 2; /* default + read */
+ if (dev->io_queues[HCTX_TYPE_POLL])
+ set->nr_maps++;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->numa_node = dev->ctrl.numa_node;
+ set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
+ set->cmd_size = sizeof(struct nvme_iod);
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->driver_data = dev;
- /*
- * Some Apple controllers requires tags to be unique
- * across admin and IO queue, so reserve the first 32
- * tags of the IO queue.
- */
- if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
- dev->tagset.reserved_tags = NVME_AQ_DEPTH;
-
- ret = blk_mq_alloc_tag_set(&dev->tagset);
- if (ret) {
- dev_warn(dev->ctrl.device,
- "IO queues tagset allocation failed %d\n", ret);
- return;
- }
- dev->ctrl.tagset = &dev->tagset;
- } else {
- blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /*
+ * Some Apple controllers requires tags to be unique
+ * across admin and IO queue, so reserve the first 32
+ * tags of the IO queue.
+ */
+ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
+ set->reserved_tags = NVME_AQ_DEPTH;
- /* Free previously allocated queues that are no longer usable */
- nvme_free_queues(dev, dev->online_queues);
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return;
}
+ dev->ctrl.tagset = set;
+}
- nvme_dbbuf_set(dev);
+static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
+{
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
}
static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2725,10 +2722,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_pci_disable(dev);
nvme_reap_pending_cqes(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
- blk_mq_tagset_wait_completed_request(&dev->tagset);
- blk_mq_tagset_wait_completed_request(&dev->admin_tagset);
+ nvme_cancel_tagset(&dev->ctrl);
+ nvme_cancel_admin_tagset(&dev->ctrl);
/*
* The driver will not be starting up queues again if shutting down so
@@ -2842,9 +2837,13 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out_unlock;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto out_unlock;
+ if (!dev->ctrl.admin_q) {
+ result = nvme_pci_alloc_admin_tag_set(dev);
+ if (result)
+ goto out_unlock;
+ } else {
+ nvme_start_admin_queue(&dev->ctrl);
+ }
/*
* Limit the max command size to prevent iod->sg allocations going
@@ -2923,7 +2922,11 @@ static void nvme_reset_work(struct work_struct *work)
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ if (!dev->ctrl.tagset)
+ nvme_pci_alloc_tag_set(dev);
+ else
+ nvme_pci_update_nr_queues(dev);
+ nvme_dbbuf_set(dev);
nvme_unfreeze(&dev->ctrl);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4665aebd944d..3100643be299 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -29,7 +29,7 @@
#include "fabrics.h"
-#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
+#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */
#define NVME_RDMA_MAX_SEGMENTS 256
@@ -248,12 +248,9 @@ static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
int ret;
- ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
- msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
- if (ret < 0)
+ ret = wait_for_completion_interruptible(&queue->cm_done);
+ if (ret)
return ret;
- if (ret == 0)
- return -ETIMEDOUT;
WARN_ON_ONCE(queue->cm_error > 0);
return queue->cm_error;
}
@@ -612,7 +609,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
queue->cm_error = -ETIMEDOUT;
ret = rdma_resolve_addr(queue->cm_id, src_addr,
(struct sockaddr *)&ctrl->addr,
- NVME_RDMA_CONNECT_TIMEOUT_MS);
+ NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_info(ctrl->ctrl.device,
"rdma_resolve_addr failed (%d).\n", ret);
@@ -790,50 +787,54 @@ out_free_queues:
return ret;
}
-static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- set->flags = BLK_MQ_F_NO_SCHED;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- if (nctrl->max_integrity_segments)
- set->cmd_size += sizeof(struct nvme_rdma_sgl) +
- NVME_RDMA_METADATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ set->flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ ctrl->ctrl.admin_tagset = set;
+ return ret;
+}
+
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
- return set;
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_rdma_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
+ if (nctrl->max_integrity_segments)
+ set->cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ ctrl->ctrl.tagset = set;
+ return ret;
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
@@ -885,11 +886,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue;
if (new) {
- ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset)) {
- error = PTR_ERR(ctrl->ctrl.admin_tagset);
+ error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
+ if (error)
goto out_free_async_qe;
- }
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
@@ -972,11 +971,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset)) {
- ret = PTR_ERR(ctrl->ctrl.tagset);
+ ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
@@ -1205,6 +1202,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
+ nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
@@ -1894,7 +1892,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
if (ctrl->opts->tos >= 0)
rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
- ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
if (ret) {
dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
queue->cm_error);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b95ee85053e3..e82dcfcda29b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -209,9 +209,11 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
}
-static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
{
- return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+ if (nvme_is_fabrics(req->req.cmd))
+ return NVME_TCP_ADMIN_CCSZ;
+ return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
}
static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
@@ -229,7 +231,7 @@ static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
rq = blk_mq_rq_from_pdu(req);
return rq_data_dir(rq) == WRITE && req->data_len &&
- req->data_len <= nvme_tcp_inline_data_size(req->queue);
+ req->data_len <= nvme_tcp_inline_data_size(req);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -1685,45 +1687,49 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret;
}
-static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
- bool admin)
+static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set;
+ struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret;
- if (admin) {
- set = &ctrl->admin_tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- } else {
- set = &ctrl->tag_set;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- }
-
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
ret = blk_mq_alloc_tag_set(set);
- if (ret)
- return ERR_PTR(ret);
+ if (!ret)
+ nctrl->admin_tagset = set;
+ return ret;
+}
- return set;
+static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct blk_mq_tag_set *set = &ctrl->tag_set;
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = nctrl->numa_node;
+ set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (!ret)
+ nctrl->tagset = set;
+ return ret;
}
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
@@ -1899,11 +1905,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return ret;
if (new) {
- ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
- if (IS_ERR(ctrl->tagset)) {
- ret = PTR_ERR(ctrl->tagset);
+ ret = nvme_tcp_alloc_tag_set(ctrl);
+ if (ret)
goto out_free_io_queues;
- }
ret = nvme_ctrl_init_connect_q(ctrl);
if (ret)
@@ -1968,11 +1972,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error;
if (new) {
- ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
- if (IS_ERR(ctrl->admin_tagset)) {
- error = PTR_ERR(ctrl->admin_tagset);
+ error = nvme_tcp_alloc_admin_tag_set(ctrl);
+ if (error)
goto out_free_queue;
- }
ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
if (IS_ERR(ctrl->fabrics_q)) {
@@ -2173,6 +2175,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2371,7 +2374,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
nvme_tcp_set_sg_null(c);
else if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
nvme_tcp_set_sg_host_data(c, req->data_len);
@@ -2406,7 +2409,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE &&
- req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->data_len <= nvme_tcp_inline_data_size(req))
req->pdu_len = req->data_len;
pdu->hdr.type = nvme_tcp_cmd;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 2a89c5aa0790..1c36fcedea20 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -287,6 +287,34 @@ static const char *nvme_trace_fabrics_property_get(struct trace_seq *p, u8 *spc)
return ret;
}
+static const char *nvme_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvme_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvme_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -306,6 +334,10 @@ const char *nvme_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvme_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvme_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvme_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvme_trace_fabrics_auth_receive(p, spc);
default:
return nvme_trace_fabrics_common(p, spc);
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 37c7f4c89f92..6f0eaf6a1528 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -98,7 +98,7 @@ TRACE_EVENT(nvme_complete_rq,
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
__entry->qid = nvme_req_qid(req);
- __entry->cid = req->tag;
+ __entry->cid = nvme_req(req)->cmd->common.command_id;
__entry->result = le64_to_cpu(nvme_req(req)->result.u64);
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 973561c93888..79fc64035ee3 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -83,3 +83,18 @@ config NVME_TARGET_TCP
devices over TCP.
If unsure, say N.
+
+config NVME_TARGET_AUTH
+ bool "NVMe over Fabrics In-band Authentication support"
+ depends on NVME_TARGET
+ select NVME_COMMON
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_DH
+ select CRYPTO_DH_RFC7919_GROUPS
+ help
+ This enables support for NVMe over Fabrics In-band Authentication
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 9837e580fa7e..c66820102493 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -13,6 +13,7 @@ nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o
nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o
+nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 397daaf51f1b..fc8a957fad0a 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1017,7 +1017,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
u16 ret;
if (nvme_is_fabrics(cmd))
- return nvmet_parse_fabrics_cmd(req);
+ return nvmet_parse_fabrics_admin_cmd(req);
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
new file mode 100644
index 000000000000..cf690df34775
--- /dev/null
+++ b/drivers/nvme/target/auth.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <crypto/hash.h>
+#include <linux/crc32.h>
+#include <linux/base64.h>
+#include <linux/ctype.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <asm/unaligned.h>
+
+#include "nvmet.h"
+
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl)
+{
+ unsigned char key_hash;
+ char *dhchap_secret;
+
+ if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1)
+ return -EINVAL;
+ if (key_hash > 3) {
+ pr_warn("Invalid DH-HMAC-CHAP hash id %d\n",
+ key_hash);
+ return -EINVAL;
+ }
+ if (key_hash > 0) {
+ /* Validate selected hash algorithm */
+ const char *hmac = nvme_auth_hmac_name(key_hash);
+
+ if (!crypto_has_shash(hmac, 0, 0)) {
+ pr_err("DH-HMAC-CHAP hash %s unsupported\n", hmac);
+ return -ENOTSUPP;
+ }
+ }
+ dhchap_secret = kstrdup(secret, GFP_KERNEL);
+ if (!dhchap_secret)
+ return -ENOMEM;
+ if (set_ctrl) {
+ host->dhchap_ctrl_secret = strim(dhchap_secret);
+ host->dhchap_ctrl_key_hash = key_hash;
+ } else {
+ host->dhchap_secret = strim(dhchap_secret);
+ host->dhchap_key_hash = key_hash;
+ }
+ return 0;
+}
+
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
+{
+ const char *dhgroup_kpp;
+ int ret = 0;
+
+ pr_debug("%s: ctrl %d selecting dhgroup %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+
+ if (ctrl->dh_tfm) {
+ if (ctrl->dh_gid == dhgroup_id) {
+ pr_debug("%s: ctrl %d reuse existing DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return 0;
+ }
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+
+ if (dhgroup_id == NVME_AUTH_DHGROUP_NULL)
+ return 0;
+
+ dhgroup_kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+ if (!dhgroup_kpp) {
+ pr_debug("%s: ctrl %d invalid DH group %d\n",
+ __func__, ctrl->cntlid, dhgroup_id);
+ return -EINVAL;
+ }
+ ctrl->dh_tfm = crypto_alloc_kpp(dhgroup_kpp, 0, 0);
+ if (IS_ERR(ctrl->dh_tfm)) {
+ pr_debug("%s: ctrl %d failed to setup DH group %d, err %ld\n",
+ __func__, ctrl->cntlid, dhgroup_id,
+ PTR_ERR(ctrl->dh_tfm));
+ ret = PTR_ERR(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ } else {
+ ctrl->dh_gid = dhgroup_id;
+ pr_debug("%s: ctrl %d setup DH group %d\n",
+ __func__, ctrl->cntlid, ctrl->dh_gid);
+ ret = nvme_auth_gen_privkey(ctrl->dh_tfm, ctrl->dh_gid);
+ if (ret < 0) {
+ pr_debug("%s: ctrl %d failed to generate private key, err %d\n",
+ __func__, ctrl->cntlid, ret);
+ kfree_sensitive(ctrl->dh_key);
+ return ret;
+ }
+ ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm);
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = kzalloc(ctrl->dh_keysize, GFP_KERNEL);
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d failed to allocate public key\n",
+ ctrl->cntlid);
+ return -ENOMEM;
+ }
+ ret = nvme_auth_gen_pubkey(ctrl->dh_tfm, ctrl->dh_key,
+ ctrl->dh_keysize);
+ if (ret < 0) {
+ pr_warn("ctrl %d failed to generate public key\n",
+ ctrl->cntlid);
+ kfree(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+ }
+ }
+
+ return ret;
+}
+
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ int ret = 0;
+ struct nvmet_host_link *p;
+ struct nvmet_host *host = NULL;
+ const char *hash_name;
+
+ down_read(&nvmet_config_sem);
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ goto out_unlock;
+
+ if (ctrl->subsys->allow_any_host)
+ goto out_unlock;
+
+ list_for_each_entry(p, &ctrl->subsys->hosts, entry) {
+ pr_debug("check %s\n", nvmet_host_name(p->host));
+ if (strcmp(nvmet_host_name(p->host), ctrl->hostnqn))
+ continue;
+ host = p->host;
+ break;
+ }
+ if (!host) {
+ pr_debug("host %s not found\n", ctrl->hostnqn);
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
+ if (ret < 0)
+ pr_warn("Failed to setup DH group");
+
+ if (!host->dhchap_secret) {
+ pr_debug("No authentication provided\n");
+ goto out_unlock;
+ }
+
+ if (host->dhchap_hash_id == ctrl->shash_id) {
+ pr_debug("Re-use existing hash ID %d\n",
+ ctrl->shash_id);
+ } else {
+ hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", host->dhchap_hash_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ ctrl->shash_id = host->dhchap_hash_id;
+ }
+
+ /* Skip the 'DHHC-1:XX:' prefix */
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = nvme_auth_extract_key(host->dhchap_secret + 10,
+ host->dhchap_key_hash);
+ if (IS_ERR(ctrl->host_key)) {
+ ret = PTR_ERR(ctrl->host_key);
+ ctrl->host_key = NULL;
+ goto out_free_hash;
+ }
+ pr_debug("%s: using hash %s key %*ph\n", __func__,
+ ctrl->host_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->host_key->hash) : "none",
+ (int)ctrl->host_key->len, ctrl->host_key->key);
+
+ nvme_auth_free_key(ctrl->ctrl_key);
+ if (!host->dhchap_ctrl_secret) {
+ ctrl->ctrl_key = NULL;
+ goto out_unlock;
+ }
+
+ ctrl->ctrl_key = nvme_auth_extract_key(host->dhchap_ctrl_secret + 10,
+ host->dhchap_ctrl_key_hash);
+ if (IS_ERR(ctrl->ctrl_key)) {
+ ret = PTR_ERR(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+ pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
+ ctrl->ctrl_key->hash > 0 ?
+ nvme_auth_hmac_name(ctrl->ctrl_key->hash) : "none",
+ (int)ctrl->ctrl_key->len, ctrl->ctrl_key->key);
+
+out_free_hash:
+ if (ret) {
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ ctrl->shash_id = 0;
+ }
+out_unlock:
+ up_read(&nvmet_config_sem);
+
+ return ret;
+}
+
+void nvmet_auth_sq_free(struct nvmet_sq *sq)
+{
+ cancel_delayed_work(&sq->auth_expired_work);
+ kfree(sq->dhchap_c1);
+ sq->dhchap_c1 = NULL;
+ kfree(sq->dhchap_c2);
+ sq->dhchap_c2 = NULL;
+ kfree(sq->dhchap_skey);
+ sq->dhchap_skey = NULL;
+}
+
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
+{
+ ctrl->shash_id = 0;
+
+ if (ctrl->dh_tfm) {
+ crypto_free_kpp(ctrl->dh_tfm);
+ ctrl->dh_tfm = NULL;
+ ctrl->dh_gid = 0;
+ }
+ kfree_sensitive(ctrl->dh_key);
+ ctrl->dh_key = NULL;
+
+ if (ctrl->host_key) {
+ nvme_auth_free_key(ctrl->host_key);
+ ctrl->host_key = NULL;
+ }
+ if (ctrl->ctrl_key) {
+ nvme_auth_free_key(ctrl->ctrl_key);
+ ctrl->ctrl_key = NULL;
+ }
+}
+
+bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ if (req->sq->ctrl->host_key &&
+ !req->sq->authenticated)
+ return false;
+ return true;
+}
+
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c1, *host_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ host_response = nvme_auth_transform_key(ctrl->host_key, ctrl->hostnqn);
+ if (IS_ERR(host_response)) {
+ ret = PTR_ERR(host_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, host_response,
+ ctrl->host_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c1,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ pr_debug("ctrl %d qid %d host response seq %u transaction %d\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid);
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s1, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "HostHost", 8);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c1)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(host_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int shash_len)
+{
+ struct crypto_shash *shash_tfm;
+ struct shash_desc *shash;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ const char *hash_name;
+ u8 *challenge = req->sq->dhchap_c2, *ctrl_response;
+ u8 buf[4];
+ int ret;
+
+ hash_name = nvme_auth_hmac_name(ctrl->shash_id);
+ if (!hash_name) {
+ pr_warn("Hash ID %d invalid\n", ctrl->shash_id);
+ return -EINVAL;
+ }
+
+ shash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+ if (IS_ERR(shash_tfm)) {
+ pr_err("failed to allocate shash %s\n", hash_name);
+ return PTR_ERR(shash_tfm);
+ }
+
+ if (shash_len != crypto_shash_digestsize(shash_tfm)) {
+ pr_debug("%s: hash len mismatch (len %d digest %d)\n",
+ __func__, shash_len,
+ crypto_shash_digestsize(shash_tfm));
+ ret = -EINVAL;
+ goto out_free_tfm;
+ }
+
+ ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key,
+ ctrl->subsysnqn);
+ if (IS_ERR(ctrl_response)) {
+ ret = PTR_ERR(ctrl_response);
+ goto out_free_tfm;
+ }
+
+ ret = crypto_shash_setkey(shash_tfm, ctrl_response,
+ ctrl->ctrl_key->len);
+ if (ret)
+ goto out_free_response;
+
+ if (ctrl->dh_gid != NVME_AUTH_DHGROUP_NULL) {
+ challenge = kmalloc(shash_len, GFP_KERNEL);
+ if (!challenge) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ ret = nvme_auth_augmented_challenge(ctrl->shash_id,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len,
+ req->sq->dhchap_c2,
+ challenge, shash_len);
+ if (ret)
+ goto out_free_response;
+ }
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto out_free_response;
+ }
+ shash->tfm = shash_tfm;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, challenge, shash_len);
+ if (ret)
+ goto out;
+ put_unaligned_le32(req->sq->dhchap_s2, buf);
+ ret = crypto_shash_update(shash, buf, 4);
+ if (ret)
+ goto out;
+ put_unaligned_le16(req->sq->dhchap_tid, buf);
+ ret = crypto_shash_update(shash, buf, 2);
+ if (ret)
+ goto out;
+ memset(buf, 0, 4);
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, "Controller", 10);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->subsysnqn,
+ strlen(ctrl->subsysnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, buf, 1);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(shash, response);
+out:
+ if (challenge != req->sq->dhchap_c2)
+ kfree(challenge);
+ kfree(shash);
+out_free_response:
+ kfree_sensitive(ctrl_response);
+out_free_tfm:
+ crypto_free_shash(shash_tfm);
+ return 0;
+}
+
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+
+ if (!ctrl->dh_key) {
+ pr_warn("ctrl %d no DH public key!\n", ctrl->cntlid);
+ return -ENOKEY;
+ }
+ if (buf_size != ctrl->dh_keysize) {
+ pr_warn("ctrl %d DH public key size mismatch, need %zu is %d\n",
+ ctrl->cntlid, ctrl->dh_keysize, buf_size);
+ ret = -EINVAL;
+ } else {
+ memcpy(buf, ctrl->dh_key, buf_size);
+ pr_debug("%s: ctrl %d public key %*ph\n", __func__,
+ ctrl->cntlid, (int)buf_size, buf);
+ }
+
+ return ret;
+}
+
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *pkey, int pkey_size)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret;
+
+ req->sq->dhchap_skey_len = ctrl->dh_keysize;
+ req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL);
+ if (!req->sq->dhchap_skey)
+ return -ENOMEM;
+ ret = nvme_auth_gen_shared_secret(ctrl->dh_tfm,
+ pkey, pkey_size,
+ req->sq->dhchap_skey,
+ req->sq->dhchap_skey_len);
+ if (ret)
+ pr_debug("failed to compute shared secret, err %d\n", ret);
+ else
+ pr_debug("%s: shared secret %*ph\n", __func__,
+ (int)req->sq->dhchap_skey_len,
+ req->sq->dhchap_skey);
+
+ return ret;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index ff77c3d2354f..2bcd60758919 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -11,6 +11,11 @@
#include <linux/ctype.h>
#include <linux/pci.h>
#include <linux/pci-p2pdma.h>
+#ifdef CONFIG_NVME_TARGET_AUTH
+#include <linux/nvme-auth.h>
+#endif
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include "nvmet.h"
@@ -1680,10 +1685,133 @@ static const struct config_item_type nvmet_ports_type = {
static struct config_group nvmet_subsystems_group;
static struct config_group nvmet_ports_group;
+#ifdef CONFIG_NVME_TARGET_AUTH
+static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, false);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_key);
+
+static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
+ char *page)
+{
+ u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
+
+ if (!dhchap_secret)
+ return sprintf(page, "\n");
+ return sprintf(page, "%s\n", dhchap_secret);
+}
+
+static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int ret;
+
+ ret = nvmet_auth_set_key(host, page, true);
+ /*
+ * Re-authentication is a soft state, so keep the
+ * current authentication valid until the host
+ * requests re-authentication.
+ */
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
+
+static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
+
+ return sprintf(page, "%s\n", hash_name ? hash_name : "none");
+}
+
+static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ u8 hmac_id;
+
+ hmac_id = nvme_auth_hmac_id(page);
+ if (hmac_id == NVME_AUTH_HASH_INVALID)
+ return -EINVAL;
+ if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
+ return -ENOTSUPP;
+ host->dhchap_hash_id = hmac_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
+
+static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_host *host = to_host(item);
+ const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
+
+ return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
+}
+
+static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_host *host = to_host(item);
+ int dhgroup_id;
+
+ dhgroup_id = nvme_auth_dhgroup_id(page);
+ if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
+ return -EINVAL;
+ if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
+ const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
+
+ if (!crypto_has_kpp(kpp, 0, 0))
+ return -EINVAL;
+ }
+ host->dhchap_dhgroup_id = dhgroup_id;
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
+
+static struct configfs_attribute *nvmet_host_attrs[] = {
+ &nvmet_host_attr_dhchap_key,
+ &nvmet_host_attr_dhchap_ctrl_key,
+ &nvmet_host_attr_dhchap_hash,
+ &nvmet_host_attr_dhchap_dhgroup,
+ NULL,
+};
+#endif /* CONFIG_NVME_TARGET_AUTH */
+
static void nvmet_host_release(struct config_item *item)
{
struct nvmet_host *host = to_host(item);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ kfree(host->dhchap_secret);
+#endif
kfree(host);
}
@@ -1693,6 +1821,9 @@ static struct configfs_item_operations nvmet_host_item_ops = {
static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
+#ifdef CONFIG_NVME_TARGET_AUTH
+ .ct_attrs = nvmet_host_attrs,
+#endif
.ct_owner = THIS_MODULE,
};
@@ -1705,6 +1836,11 @@ static struct config_group *nvmet_hosts_make_group(struct config_group *group,
if (!host)
return ERR_PTR(-ENOMEM);
+#ifdef CONFIG_NVME_TARGET_AUTH
+ /* Default to SHA256 */
+ host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
+#endif
+
config_group_init_type_name(&host->group, name, &nvmet_host_type);
return &host->group;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index c27660a660d9..a1345790005f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -795,6 +795,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
+ nvmet_auth_sq_free(sq);
if (ctrl) {
/*
@@ -865,8 +866,15 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
+ struct nvme_command *cmd = req->cmd;
u16 ret;
+ if (nvme_is_fabrics(cmd))
+ return nvmet_parse_fabrics_io_cmd(req);
+
+ if (unlikely(!nvmet_check_auth_status(req)))
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+
ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
return ret;
@@ -1271,6 +1279,11 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
+
+ if (unlikely(!nvmet_check_auth_status(req))) {
+ pr_warn("qid %d not authenticated\n", req->sq->qid);
+ return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
+ }
return 0;
}
@@ -1467,6 +1480,8 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
+ nvmet_destroy_auth(ctrl);
+
ida_free(&cntlid_ida, ctrl->cntlid);
nvmet_async_events_free(ctrl);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
new file mode 100644
index 000000000000..c851814d6cb0
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
+ * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
+ * All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/nvme-auth.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include "nvmet.h"
+
+static void nvmet_auth_expired_work(struct work_struct *work)
+{
+ struct nvmet_sq *sq = container_of(to_delayed_work(work),
+ struct nvmet_sq, auth_expired_work);
+
+ pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
+ __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ sq->dhchap_tid = -1;
+}
+
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+ u32 result = le32_to_cpu(req->cqe->result.u32);
+
+ /* Initialize in-band authentication */
+ INIT_DELAYED_WORK(&req->sq->auth_expired_work,
+ nvmet_auth_expired_work);
+ req->sq->authenticated = false;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
+ req->cqe->result.u32 = cpu_to_le32(result);
+}
+
+static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_negotiate_data *data = d;
+ int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
+
+ pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
+ data->auth_protocol[0].dhchap.halen,
+ data->auth_protocol[0].dhchap.dhlen);
+ req->sq->dhchap_tid = le16_to_cpu(data->t_id);
+ if (data->sc_c)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+
+ if (data->napd != 1)
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+
+ if (data->auth_protocol[0].dhchap.authid !=
+ NVME_AUTH_DHCHAP_AUTH_ID)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+
+ for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
+ u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
+
+ if (!fallback_hash_id &&
+ crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
+ fallback_hash_id = host_hmac_id;
+ if (ctrl->shash_id != host_hmac_id)
+ continue;
+ hash_id = ctrl->shash_id;
+ break;
+ }
+ if (hash_id == 0) {
+ if (fallback_hash_id == 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_hmac_name(fallback_hash_id));
+ ctrl->shash_id = fallback_hash_id;
+ }
+
+ dhgid = -1;
+ fallback_dhgid = -1;
+ for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
+ int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
+
+ if (tmp_dhgid != ctrl->dh_gid) {
+ dhgid = tmp_dhgid;
+ break;
+ }
+ if (fallback_dhgid < 0) {
+ const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
+
+ if (crypto_has_kpp(kpp, 0, 0))
+ fallback_dhgid = tmp_dhgid;
+ }
+ }
+ if (dhgid < 0) {
+ if (fallback_dhgid < 0) {
+ pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+ pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(fallback_dhgid));
+ ctrl->dh_gid = fallback_dhgid;
+ }
+ pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
+ return 0;
+}
+
+static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_reply_data *data = d;
+ u16 dhvlen = le16_to_cpu(data->dhvlen);
+ u8 *response;
+
+ pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->hl, data->cvalid, dhvlen);
+
+ if (dhvlen) {
+ if (!ctrl->dh_tfm)
+ return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
+ dhvlen) < 0)
+ return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
+ }
+
+ response = kmalloc(data->hl, GFP_KERNEL);
+ if (!response)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+
+ if (!ctrl->host_key) {
+ pr_warn("ctrl %d qid %d no host key\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
+ pr_debug("ctrl %d qid %d host hash failed\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+
+ if (memcmp(data->rval, response, data->hl)) {
+ pr_info("ctrl %d qid %d host response mismatch\n",
+ ctrl->cntlid, req->sq->qid);
+ kfree(response);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ kfree(response);
+ pr_debug("%s: ctrl %d qid %d host authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ if (data->cvalid) {
+ req->sq->dhchap_c2 = kmalloc(data->hl, GFP_KERNEL);
+ if (!req->sq->dhchap_c2)
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ memcpy(req->sq->dhchap_c2, data->rval + data->hl, data->hl);
+
+ pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
+ __func__, ctrl->cntlid, req->sq->qid, data->hl,
+ req->sq->dhchap_c2);
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else {
+ req->sq->authenticated = true;
+ req->sq->dhchap_c2 = NULL;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ return data->rescode_exp;
+}
+
+void nvmet_execute_auth_send(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_auth_dhchap_success2_data *data;
+ void *d;
+ u32 tl;
+ u16 status = 0;
+
+ if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_send.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, spsp1);
+ goto done;
+ }
+ tl = le32_to_cpu(req->cmd->auth_send.tl);
+ if (!tl) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_send_command, tl);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, tl)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
+ return;
+ }
+
+ d = kmalloc(tl, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+
+ status = nvmet_copy_from_sgl(req, 0, d, tl);
+ if (status) {
+ kfree(d);
+ goto done;
+ }
+
+ data = d;
+ pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
+ req->sq->dhchap_step);
+ if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
+ data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
+ goto done_failure1;
+ if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
+ if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
+ /* Restart negotiation */
+ pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ if (!req->sq->qid) {
+ if (nvmet_setup_auth(ctrl) < 0) {
+ status = NVME_SC_INTERNAL;
+ pr_err("ctrl %d qid 0 failed to setup"
+ "re-authentication",
+ ctrl->cntlid);
+ goto done_failure1;
+ }
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
+ } else if (data->auth_id != req->sq->dhchap_step)
+ goto done_failure1;
+ /* Validate negotiation parameters */
+ status = nvmet_auth_negotiate(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ }
+ if (data->auth_id != req->sq->dhchap_step) {
+ pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ data->auth_id, req->sq->dhchap_step);
+ goto done_failure1;
+ }
+ if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
+ pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ le16_to_cpu(data->t_id),
+ req->sq->dhchap_tid);
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
+ goto done_kfree;
+ }
+
+ switch (data->auth_id) {
+ case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
+ status = nvmet_auth_reply(req, d);
+ if (status == 0)
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ else {
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = status;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ req->sq->authenticated = true;
+ pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ goto done_kfree;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
+ status = nvmet_auth_failure2(req, d);
+ if (status) {
+ pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ status = 0;
+ }
+ goto done_kfree;
+ break;
+ default:
+ req->sq->dhchap_status =
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step =
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+ req->sq->authenticated = false;
+ goto done_kfree;
+ break;
+ }
+done_failure1:
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
+
+done_kfree:
+ kfree(d);
+done:
+ pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status, req->sq->dhchap_step);
+ if (status)
+ pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
+ __func__, ctrl->cntlid, req->sq->qid,
+ status, req->error_loc);
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
+ req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
+ unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
+
+ mod_delayed_work(system_wq, &req->sq->auth_expired_work,
+ auth_expire_secs * HZ);
+ return;
+ }
+ /* Final states, clear up variables */
+ nvmet_auth_sq_free(req->sq);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
+ nvmet_ctrl_fatal_error(ctrl);
+}
+
+static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_challenge_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int ret = 0;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+ int data_size = sizeof(*d) + hash_len;
+
+ if (ctrl->dh_tfm)
+ data_size += ctrl->dh_keysize;
+ if (al < data_size) {
+ pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
+ al, data_size);
+ return -EINVAL;
+ }
+ memset(data, 0, data_size);
+ req->sq->dhchap_s1 = nvme_auth_get_seqnum();
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hashid = ctrl->shash_id;
+ data->hl = hash_len;
+ data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
+ req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
+ if (!req->sq->dhchap_c1)
+ return -ENOMEM;
+ get_random_bytes(req->sq->dhchap_c1, data->hl);
+ memcpy(data->cval, req->sq->dhchap_c1, data->hl);
+ if (ctrl->dh_tfm) {
+ data->dhgid = ctrl->dh_gid;
+ data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
+ ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
+ ctrl->dh_keysize);
+ }
+ pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
+ __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
+ req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
+ return ret;
+}
+
+static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_success1_data *data = d;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
+
+ WARN_ON(al < sizeof(*data));
+ memset(data, 0, sizeof(*data));
+ data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->hl = hash_len;
+ if (req->sq->dhchap_c2) {
+ if (!ctrl->ctrl_key) {
+ pr_warn("ctrl %d qid %d no ctrl key\n",
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
+ return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
+ data->rvalid = 1;
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ }
+ return 0;
+}
+
+static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
+{
+ struct nvmf_auth_dhchap_failure_data *data = d;
+
+ WARN_ON(al < sizeof(*data));
+ data->auth_type = NVME_AUTH_COMMON_MESSAGES;
+ data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ data->t_id = cpu_to_le16(req->sq->dhchap_tid);
+ data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
+ data->rescode_exp = req->sq->dhchap_status;
+}
+
+void nvmet_execute_auth_receive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *d;
+ u32 al;
+ u16 status = 0;
+
+ if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, secp);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp0 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp0);
+ goto done;
+ }
+ if (req->cmd->auth_receive.spsp1 != 0x01) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, spsp1);
+ goto done;
+ }
+ al = le32_to_cpu(req->cmd->auth_receive.al);
+ if (!al) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ req->error_loc =
+ offsetof(struct nvmf_auth_receive_command, al);
+ goto done;
+ }
+ if (!nvmet_check_transfer_len(req, al)) {
+ pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
+ return;
+ }
+
+ d = kmalloc(al, GFP_KERNEL);
+ if (!d) {
+ status = NVME_SC_INTERNAL;
+ goto done;
+ }
+ pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ switch (req->sq->dhchap_step) {
+ case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
+ if (nvmet_auth_challenge(req, d, al) < 0) {
+ pr_warn("ctrl %d qid %d: challenge error (%d)\n",
+ ctrl->cntlid, req->sq->qid, status);
+ status = NVME_SC_INTERNAL;
+ break;
+ }
+ if (status) {
+ req->sq->dhchap_status = status;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: challenge status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ status = 0;
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
+ status = nvmet_auth_success1(req, d, al);
+ if (status) {
+ req->sq->dhchap_status = status;
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d: success1 status (%x)\n",
+ ctrl->cntlid, req->sq->qid,
+ req->sq->dhchap_status);
+ break;
+ }
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
+ break;
+ case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
+ req->sq->authenticated = false;
+ nvmet_auth_failure1(req, d, al);
+ pr_warn("ctrl %d qid %d failure1 (%x)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
+ break;
+ default:
+ pr_warn("ctrl %d qid %d unhandled step (%d)\n",
+ ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
+ req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
+ req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ nvmet_auth_failure1(req, d, al);
+ status = 0;
+ break;
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, d, al);
+ kfree(d);
+done:
+ req->cqe->result.u64 = 0;
+ nvmet_req_complete(req, status);
+ if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
+ nvmet_auth_sq_free(req->sq);
+ else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
+ nvmet_auth_sq_free(req->sq);
+ nvmet_ctrl_fatal_error(ctrl);
+ }
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 70fb587e9413..f91a56180d3d 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -82,7 +82,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -93,6 +93,37 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
case nvme_fabrics_type_property_get:
req->execute = nvmet_execute_prop_get;
break;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
+ default:
+ pr_debug("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ switch (cmd->fabrics.fctype) {
+#ifdef CONFIG_NVME_TARGET_AUTH
+ case nvme_fabrics_type_auth_send:
+ req->execute = nvmet_execute_auth_send;
+ break;
+ case nvme_fabrics_type_auth_receive:
+ req->execute = nvmet_execute_auth_receive;
+ break;
+#endif
default:
pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
@@ -173,6 +204,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
u16 status = 0;
+ int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
@@ -215,18 +247,32 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
uuid_copy(&ctrl->hostid, &d->hostid);
+ ret = nvmet_setup_auth(ctrl);
+ if (ret < 0) {
+ pr_err("Failed to setup authentication, error %d\n", ret);
+ nvmet_ctrl_put(ctrl);
+ if (ret == -EPERM)
+ status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR);
+ else
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
status = nvmet_install_queue(ctrl, req);
if (status) {
nvmet_ctrl_put(ctrl);
goto out;
}
- pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
- ctrl->pi_support ? " T10-PI is enabled" : "");
+ ctrl->pi_support ? " T10-PI is enabled" : "",
+ nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
complete:
@@ -286,6 +332,9 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (nvmet_has_auth(ctrl))
+ nvmet_init_auth(ctrl, req);
out:
kfree(d);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0f5c77e22a0a..9750a7fca268 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -424,9 +424,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
+ nvme_cancel_tagset(&ctrl->ctrl);
nvme_loop_destroy_io_queues(ctrl);
}
@@ -434,9 +432,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
- blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_loop_destroy_admin_queue(ctrl);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 2b3e5719f24e..6ffeeb0a1c49 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -108,6 +108,19 @@ struct nvmet_sq {
u16 size;
u32 sqhd;
bool sqhd_disabled;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct delayed_work auth_expired_work;
+ bool authenticated;
+ u16 dhchap_tid;
+ u16 dhchap_status;
+ int dhchap_step;
+ u8 *dhchap_c1;
+ u8 *dhchap_c2;
+ u32 dhchap_s1;
+ u32 dhchap_s2;
+ u8 *dhchap_skey;
+ int dhchap_skey_len;
+#endif
struct completion free_done;
struct completion confirm_done;
};
@@ -209,6 +222,15 @@ struct nvmet_ctrl {
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
bool pi_support;
+#ifdef CONFIG_NVME_TARGET_AUTH
+ struct nvme_dhchap_key *host_key;
+ struct nvme_dhchap_key *ctrl_key;
+ u8 shash_id;
+ struct crypto_kpp *dh_tfm;
+ u8 dh_gid;
+ u8 *dh_key;
+ size_t dh_keysize;
+#endif
};
struct nvmet_subsys {
@@ -271,6 +293,12 @@ static inline struct nvmet_subsys *namespaces_to_subsys(
struct nvmet_host {
struct config_group group;
+ u8 *dhchap_secret;
+ u8 *dhchap_ctrl_secret;
+ u8 dhchap_key_hash;
+ u8 dhchap_ctrl_key_hash;
+ u8 dhchap_hash_id;
+ u8 dhchap_dhgroup_id;
};
static inline struct nvmet_host *to_host(struct config_item *item)
@@ -420,7 +448,8 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
-u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
+u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
@@ -668,4 +697,48 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_NVME_TARGET_AUTH
+void nvmet_execute_auth_send(struct nvmet_req *req);
+void nvmet_execute_auth_receive(struct nvmet_req *req);
+int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
+ bool set_ctrl);
+int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
+int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
+void nvmet_auth_sq_free(struct nvmet_sq *sq);
+int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
+bool nvmet_check_auth_status(struct nvmet_req *req);
+int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
+ unsigned int hash_len);
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return ctrl->host_key != NULL;
+}
+int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
+ u8 *buf, int buf_size);
+#else
+static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+{
+ return 0;
+}
+static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req) {};
+static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
+static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
+static inline bool nvmet_check_auth_status(struct nvmet_req *req)
+{
+ return true;
+}
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+{
+ return false;
+}
+static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+#endif
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 0a9542599ad1..dc3b4dc8fe08 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1839,7 +1839,8 @@ static int __init nvmet_tcp_init(void)
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!nvmet_tcp_wq)
return -ENOMEM;