diff options
Diffstat (limited to 'drivers/fwctl')
-rw-r--r-- | drivers/fwctl/Kconfig | 33 | ||||
-rw-r--r-- | drivers/fwctl/Makefile | 6 | ||||
-rw-r--r-- | drivers/fwctl/main.c | 421 | ||||
-rw-r--r-- | drivers/fwctl/mlx5/Makefile | 4 | ||||
-rw-r--r-- | drivers/fwctl/mlx5/main.c | 411 | ||||
-rw-r--r-- | drivers/fwctl/pds/Makefile | 4 | ||||
-rw-r--r-- | drivers/fwctl/pds/main.c | 543 |
7 files changed, 1422 insertions, 0 deletions
diff --git a/drivers/fwctl/Kconfig b/drivers/fwctl/Kconfig new file mode 100644 index 000000000000..b5583b12a011 --- /dev/null +++ b/drivers/fwctl/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig FWCTL + tristate "fwctl device firmware access framework" + help + fwctl provides a userspace API for restricted access to communicate + with on-device firmware. The communication channel is intended to + support a wide range of lockdown compatible device behaviors including + manipulating device FLASH, debugging, and other activities that don't + fit neatly into an existing subsystem. + +if FWCTL +config FWCTL_MLX5 + tristate "mlx5 ConnectX control fwctl driver" + depends on MLX5_CORE + help + MLX5 provides interface for the user process to access the debug and + configuration registers of the ConnectX hardware family + (NICs, PCI switches and SmartNIC SoCs). + This will allow configuration and debug tools to work out of the box on + mainstream kernel. + + If you don't know what to do here, say N. + +config FWCTL_PDS + tristate "AMD/Pensando pds fwctl driver" + depends on PDS_CORE + help + The pds_fwctl driver provides an fwctl interface for a user process + to access the debug and configuration information of the AMD/Pensando + DSC hardware family. + + If you don't know what to do here, say N. +endif diff --git a/drivers/fwctl/Makefile b/drivers/fwctl/Makefile new file mode 100644 index 000000000000..c093b5f661d6 --- /dev/null +++ b/drivers/fwctl/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FWCTL) += fwctl.o +obj-$(CONFIG_FWCTL_MLX5) += mlx5/ +obj-$(CONFIG_FWCTL_PDS) += pds/ + +fwctl-y += main.o diff --git a/drivers/fwctl/main.c b/drivers/fwctl/main.c new file mode 100644 index 000000000000..bc6378506296 --- /dev/null +++ b/drivers/fwctl/main.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES + */ +#define pr_fmt(fmt) "fwctl: " fmt +#include <linux/fwctl.h> + +#include <linux/container_of.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#include <uapi/fwctl/fwctl.h> + +enum { + FWCTL_MAX_DEVICES = 4096, + MAX_RPC_LEN = SZ_2M, +}; +static_assert(FWCTL_MAX_DEVICES < (1U << MINORBITS)); + +static dev_t fwctl_dev; +static DEFINE_IDA(fwctl_ida); +static unsigned long fwctl_tainted; + +struct fwctl_ucmd { + struct fwctl_uctx *uctx; + void __user *ubuffer; + void *cmd; + u32 user_size; +}; + +static int ucmd_respond(struct fwctl_ucmd *ucmd, size_t cmd_len) +{ + if (copy_to_user(ucmd->ubuffer, ucmd->cmd, + min_t(size_t, ucmd->user_size, cmd_len))) + return -EFAULT; + return 0; +} + +static int copy_to_user_zero_pad(void __user *to, const void *from, + size_t from_len, size_t user_len) +{ + size_t copy_len; + + copy_len = min(from_len, user_len); + if (copy_to_user(to, from, copy_len)) + return -EFAULT; + if (copy_len < user_len) { + if (clear_user(to + copy_len, user_len - copy_len)) + return -EFAULT; + } + return 0; +} + +static int fwctl_cmd_info(struct fwctl_ucmd *ucmd) +{ + struct fwctl_device *fwctl = ucmd->uctx->fwctl; + struct fwctl_info *cmd = ucmd->cmd; + size_t driver_info_len = 0; + + if (cmd->flags) + return -EOPNOTSUPP; + + if (!fwctl->ops->info && cmd->device_data_len) { + if (clear_user(u64_to_user_ptr(cmd->out_device_data), + cmd->device_data_len)) + return -EFAULT; + } else if (cmd->device_data_len) { + void *driver_info __free(kfree) = + fwctl->ops->info(ucmd->uctx, &driver_info_len); + if (IS_ERR(driver_info)) + return PTR_ERR(driver_info); + + if (copy_to_user_zero_pad(u64_to_user_ptr(cmd->out_device_data), + driver_info, driver_info_len, + cmd->device_data_len)) + return -EFAULT; + } + + cmd->out_device_type = fwctl->ops->device_type; + cmd->device_data_len = driver_info_len; + return ucmd_respond(ucmd, sizeof(*cmd)); +} + +static int fwctl_cmd_rpc(struct fwctl_ucmd *ucmd) +{ + struct fwctl_device *fwctl = ucmd->uctx->fwctl; + struct fwctl_rpc *cmd = ucmd->cmd; + size_t out_len; + + if (cmd->in_len > MAX_RPC_LEN || cmd->out_len > MAX_RPC_LEN) + return -EMSGSIZE; + + switch (cmd->scope) { + case FWCTL_RPC_CONFIGURATION: + case FWCTL_RPC_DEBUG_READ_ONLY: + break; + + case FWCTL_RPC_DEBUG_WRITE_FULL: + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + fallthrough; + case FWCTL_RPC_DEBUG_WRITE: + if (!test_and_set_bit(0, &fwctl_tainted)) { + dev_warn( + &fwctl->dev, + "%s(%d): has requested full access to the physical device", + current->comm, task_pid_nr(current)); + add_taint(TAINT_FWCTL, LOCKDEP_STILL_OK); + } + break; + default: + return -EOPNOTSUPP; + } + + void *inbuf __free(kvfree) = kvzalloc(cmd->in_len, GFP_KERNEL_ACCOUNT); + if (!inbuf) + return -ENOMEM; + if (copy_from_user(inbuf, u64_to_user_ptr(cmd->in), cmd->in_len)) + return -EFAULT; + + out_len = cmd->out_len; + void *outbuf __free(kvfree) = fwctl->ops->fw_rpc( + ucmd->uctx, cmd->scope, inbuf, cmd->in_len, &out_len); + if (IS_ERR(outbuf)) + return PTR_ERR(outbuf); + if (outbuf == inbuf) { + /* The driver can re-use inbuf as outbuf */ + inbuf = NULL; + } + + if (copy_to_user(u64_to_user_ptr(cmd->out), outbuf, + min(cmd->out_len, out_len))) + return -EFAULT; + + cmd->out_len = out_len; + return ucmd_respond(ucmd, sizeof(*cmd)); +} + +/* On stack memory for the ioctl structs */ +union fwctl_ucmd_buffer { + struct fwctl_info info; + struct fwctl_rpc rpc; +}; + +struct fwctl_ioctl_op { + unsigned int size; + unsigned int min_size; + unsigned int ioctl_num; + int (*execute)(struct fwctl_ucmd *ucmd); +}; + +#define IOCTL_OP(_ioctl, _fn, _struct, _last) \ + [_IOC_NR(_ioctl) - FWCTL_CMD_BASE] = { \ + .size = sizeof(_struct) + \ + BUILD_BUG_ON_ZERO(sizeof(union fwctl_ucmd_buffer) < \ + sizeof(_struct)), \ + .min_size = offsetofend(_struct, _last), \ + .ioctl_num = _ioctl, \ + .execute = _fn, \ + } +static const struct fwctl_ioctl_op fwctl_ioctl_ops[] = { + IOCTL_OP(FWCTL_INFO, fwctl_cmd_info, struct fwctl_info, out_device_data), + IOCTL_OP(FWCTL_RPC, fwctl_cmd_rpc, struct fwctl_rpc, out), +}; + +static long fwctl_fops_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct fwctl_uctx *uctx = filp->private_data; + const struct fwctl_ioctl_op *op; + struct fwctl_ucmd ucmd = {}; + union fwctl_ucmd_buffer buf; + unsigned int nr; + int ret; + + nr = _IOC_NR(cmd); + if ((nr - FWCTL_CMD_BASE) >= ARRAY_SIZE(fwctl_ioctl_ops)) + return -ENOIOCTLCMD; + + op = &fwctl_ioctl_ops[nr - FWCTL_CMD_BASE]; + if (op->ioctl_num != cmd) + return -ENOIOCTLCMD; + + ucmd.uctx = uctx; + ucmd.cmd = &buf; + ucmd.ubuffer = (void __user *)arg; + ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer); + if (ret) + return ret; + + if (ucmd.user_size < op->min_size) + return -EINVAL; + + ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer, + ucmd.user_size); + if (ret) + return ret; + + guard(rwsem_read)(&uctx->fwctl->registration_lock); + if (!uctx->fwctl->ops) + return -ENODEV; + return op->execute(&ucmd); +} + +static int fwctl_fops_open(struct inode *inode, struct file *filp) +{ + struct fwctl_device *fwctl = + container_of(inode->i_cdev, struct fwctl_device, cdev); + int ret; + + guard(rwsem_read)(&fwctl->registration_lock); + if (!fwctl->ops) + return -ENODEV; + + struct fwctl_uctx *uctx __free(kfree) = + kzalloc(fwctl->ops->uctx_size, GFP_KERNEL_ACCOUNT); + if (!uctx) + return -ENOMEM; + + uctx->fwctl = fwctl; + ret = fwctl->ops->open_uctx(uctx); + if (ret) + return ret; + + scoped_guard(mutex, &fwctl->uctx_list_lock) { + list_add_tail(&uctx->uctx_list_entry, &fwctl->uctx_list); + } + + get_device(&fwctl->dev); + filp->private_data = no_free_ptr(uctx); + return 0; +} + +static void fwctl_destroy_uctx(struct fwctl_uctx *uctx) +{ + lockdep_assert_held(&uctx->fwctl->uctx_list_lock); + list_del(&uctx->uctx_list_entry); + uctx->fwctl->ops->close_uctx(uctx); +} + +static int fwctl_fops_release(struct inode *inode, struct file *filp) +{ + struct fwctl_uctx *uctx = filp->private_data; + struct fwctl_device *fwctl = uctx->fwctl; + + scoped_guard(rwsem_read, &fwctl->registration_lock) { + /* + * NULL ops means fwctl_unregister() has already removed the + * driver and destroyed the uctx. + */ + if (fwctl->ops) { + guard(mutex)(&fwctl->uctx_list_lock); + fwctl_destroy_uctx(uctx); + } + } + + kfree(uctx); + fwctl_put(fwctl); + return 0; +} + +static const struct file_operations fwctl_fops = { + .owner = THIS_MODULE, + .open = fwctl_fops_open, + .release = fwctl_fops_release, + .unlocked_ioctl = fwctl_fops_ioctl, +}; + +static void fwctl_device_release(struct device *device) +{ + struct fwctl_device *fwctl = + container_of(device, struct fwctl_device, dev); + + ida_free(&fwctl_ida, fwctl->dev.devt - fwctl_dev); + mutex_destroy(&fwctl->uctx_list_lock); + kfree(fwctl); +} + +static char *fwctl_devnode(const struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "fwctl/%s", dev_name(dev)); +} + +static struct class fwctl_class = { + .name = "fwctl", + .dev_release = fwctl_device_release, + .devnode = fwctl_devnode, +}; + +static struct fwctl_device * +_alloc_device(struct device *parent, const struct fwctl_ops *ops, size_t size) +{ + struct fwctl_device *fwctl __free(kfree) = kzalloc(size, GFP_KERNEL); + int devnum; + + if (!fwctl) + return NULL; + + devnum = ida_alloc_max(&fwctl_ida, FWCTL_MAX_DEVICES - 1, GFP_KERNEL); + if (devnum < 0) + return NULL; + + fwctl->dev.devt = fwctl_dev + devnum; + fwctl->dev.class = &fwctl_class; + fwctl->dev.parent = parent; + + init_rwsem(&fwctl->registration_lock); + mutex_init(&fwctl->uctx_list_lock); + INIT_LIST_HEAD(&fwctl->uctx_list); + + device_initialize(&fwctl->dev); + return_ptr(fwctl); +} + +/* Drivers use the fwctl_alloc_device() wrapper */ +struct fwctl_device *_fwctl_alloc_device(struct device *parent, + const struct fwctl_ops *ops, + size_t size) +{ + struct fwctl_device *fwctl __free(fwctl) = + _alloc_device(parent, ops, size); + + if (!fwctl) + return NULL; + + cdev_init(&fwctl->cdev, &fwctl_fops); + /* + * The driver module is protected by fwctl_register/unregister(), + * unregister won't complete until we are done with the driver's module. + */ + fwctl->cdev.owner = THIS_MODULE; + + if (dev_set_name(&fwctl->dev, "fwctl%d", fwctl->dev.devt - fwctl_dev)) + return NULL; + + fwctl->ops = ops; + return_ptr(fwctl); +} +EXPORT_SYMBOL_NS_GPL(_fwctl_alloc_device, "FWCTL"); + +/** + * fwctl_register - Register a new device to the subsystem + * @fwctl: Previously allocated fwctl_device + * + * On return the device is visible through sysfs and /dev, driver ops may be + * called. + */ +int fwctl_register(struct fwctl_device *fwctl) +{ + return cdev_device_add(&fwctl->cdev, &fwctl->dev); +} +EXPORT_SYMBOL_NS_GPL(fwctl_register, "FWCTL"); + +/** + * fwctl_unregister - Unregister a device from the subsystem + * @fwctl: Previously allocated and registered fwctl_device + * + * Undoes fwctl_register(). On return no driver ops will be called. The + * caller must still call fwctl_put() to free the fwctl. + * + * Unregister will return even if userspace still has file descriptors open. + * This will call ops->close_uctx() on any open FDs and after return no driver + * op will be called. The FDs remain open but all fops will return -ENODEV. + * + * The design of fwctl allows this sort of disassociation of the driver from the + * subsystem primarily by keeping memory allocations owned by the core subsytem. + * The fwctl_device and fwctl_uctx can both be freed without requiring a driver + * callback. This allows the module to remain unlocked while FDs are open. + */ +void fwctl_unregister(struct fwctl_device *fwctl) +{ + struct fwctl_uctx *uctx; + + cdev_device_del(&fwctl->cdev, &fwctl->dev); + + /* Disable and free the driver's resources for any still open FDs. */ + guard(rwsem_write)(&fwctl->registration_lock); + guard(mutex)(&fwctl->uctx_list_lock); + while ((uctx = list_first_entry_or_null(&fwctl->uctx_list, + struct fwctl_uctx, + uctx_list_entry))) + fwctl_destroy_uctx(uctx); + + /* + * The driver module may unload after this returns, the op pointer will + * not be valid. + */ + fwctl->ops = NULL; +} +EXPORT_SYMBOL_NS_GPL(fwctl_unregister, "FWCTL"); + +static int __init fwctl_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&fwctl_dev, 0, FWCTL_MAX_DEVICES, "fwctl"); + if (ret) + return ret; + + ret = class_register(&fwctl_class); + if (ret) + goto err_chrdev; + return 0; + +err_chrdev: + unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES); + return ret; +} + +static void __exit fwctl_exit(void) +{ + class_unregister(&fwctl_class); + unregister_chrdev_region(fwctl_dev, FWCTL_MAX_DEVICES); +} + +module_init(fwctl_init); +module_exit(fwctl_exit); +MODULE_DESCRIPTION("fwctl device firmware access framework"); +MODULE_LICENSE("GPL"); diff --git a/drivers/fwctl/mlx5/Makefile b/drivers/fwctl/mlx5/Makefile new file mode 100644 index 000000000000..139a23e3c7c5 --- /dev/null +++ b/drivers/fwctl/mlx5/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FWCTL_MLX5) += mlx5_fwctl.o + +mlx5_fwctl-y += main.o diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c new file mode 100644 index 000000000000..f93aa0cecdb9 --- /dev/null +++ b/drivers/fwctl/mlx5/main.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES + */ +#include <linux/fwctl.h> +#include <linux/auxiliary_bus.h> +#include <linux/mlx5/device.h> +#include <linux/mlx5/driver.h> +#include <uapi/fwctl/mlx5.h> + +#define mlx5ctl_err(mcdev, format, ...) \ + dev_err(&mcdev->fwctl.dev, format, ##__VA_ARGS__) + +#define mlx5ctl_dbg(mcdev, format, ...) \ + dev_dbg(&mcdev->fwctl.dev, "PID %u: " format, current->pid, \ + ##__VA_ARGS__) + +struct mlx5ctl_uctx { + struct fwctl_uctx uctx; + u32 uctx_caps; + u32 uctx_uid; +}; + +struct mlx5ctl_dev { + struct fwctl_device fwctl; + struct mlx5_core_dev *mdev; +}; +DEFINE_FREE(mlx5ctl, struct mlx5ctl_dev *, if (_T) fwctl_put(&_T->fwctl)); + +struct mlx5_ifc_mbox_in_hdr_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_mbox_out_hdr_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES = 0x4, +}; + +enum { + MLX5_CMD_OP_QUERY_DRIVER_VERSION = 0x10c, + MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e, + MLX5_CMD_OP_QUERY_RDB = 0x512, + MLX5_CMD_OP_QUERY_PSV = 0x602, + MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716, + MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722, + MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728, + MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813, + MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819, + MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820, + MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS = 0x821, + MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911, + MLX5_CMD_OP_QUERY_AFU = 0x971, + MLX5_CMD_OP_QUERY_CAPI_PEC = 0x981, + MLX5_CMD_OP_QUERY_UCTX = 0xa05, + MLX5_CMD_OP_QUERY_UMEM = 0xa09, + MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE = 0xb02, + MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO = 0xb03, + MLX5_CMD_OP_QUERY_REGEXP_PARAMS = 0xb05, + MLX5_CMD_OP_QUERY_REGEXP_REGISTER = 0xb07, + MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY = 0xb08, + MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS = 0xb0a, + MLX5_CMD_OP_ACCESS_REGISTER_USER = 0xb0c, + MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING = 0xb0f, + MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO = 0xb13, + MLX5_CMD_OP_QUERY_CRYPTO_STATE = 0xb14, + MLX5_CMD_OP_QUERY_VUID = 0xb22, + MLX5_CMD_OP_QUERY_DPA_PARTITION = 0xb28, + MLX5_CMD_OP_QUERY_DPA_PARTITIONS = 0xb2a, + MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT = 0xb2e, + MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO = 0xb2f, + MLX5_CMD_OP_QUERY_RSV_RESOURCES = 0x8000, + MLX5_CMD_OP_QUERY_MTT = 0x8001, + MLX5_CMD_OP_QUERY_SCHED_QUEUE = 0x8006, +}; + +static int mlx5ctl_alloc_uid(struct mlx5ctl_dev *mcdev, u32 cap) +{ + u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {}; + u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {}; + void *uctx; + int ret; + u16 uid; + + uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx); + + mlx5ctl_dbg(mcdev, "%s: caps 0x%x\n", __func__, cap); + MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX); + MLX5_SET(uctx, uctx, cap, cap); + + ret = mlx5_cmd_exec(mcdev->mdev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + uid = MLX5_GET(create_uctx_out, out, uid); + mlx5ctl_dbg(mcdev, "allocated uid %u with caps 0x%x\n", uid, cap); + return uid; +} + +static void mlx5ctl_release_uid(struct mlx5ctl_dev *mcdev, u16 uid) +{ + u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {}; + struct mlx5_core_dev *mdev = mcdev->mdev; + int ret; + + MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX); + MLX5_SET(destroy_uctx_in, in, uid, uid); + + ret = mlx5_cmd_exec_in(mdev, destroy_uctx, in); + mlx5ctl_dbg(mcdev, "released uid %u %pe\n", uid, ERR_PTR(ret)); +} + +static int mlx5ctl_open_uctx(struct fwctl_uctx *uctx) +{ + struct mlx5ctl_uctx *mfd = + container_of(uctx, struct mlx5ctl_uctx, uctx); + struct mlx5ctl_dev *mcdev = + container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl); + int uid; + + /* + * New FW supports the TOOLS_RESOURCES uid security label + * which allows commands to manipulate the global device state. + * Otherwise only basic existing RDMA devx privilege are allowed. + */ + if (MLX5_CAP_GEN(mcdev->mdev, uctx_cap) & + MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES) + mfd->uctx_caps |= MLX5_UCTX_OBJECT_CAP_TOOLS_RESOURCES; + + uid = mlx5ctl_alloc_uid(mcdev, mfd->uctx_caps); + if (uid < 0) + return uid; + + mfd->uctx_uid = uid; + return 0; +} + +static void mlx5ctl_close_uctx(struct fwctl_uctx *uctx) +{ + struct mlx5ctl_dev *mcdev = + container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl); + struct mlx5ctl_uctx *mfd = + container_of(uctx, struct mlx5ctl_uctx, uctx); + + mlx5ctl_release_uid(mcdev, mfd->uctx_uid); +} + +static void *mlx5ctl_info(struct fwctl_uctx *uctx, size_t *length) +{ + struct mlx5ctl_uctx *mfd = + container_of(uctx, struct mlx5ctl_uctx, uctx); + struct fwctl_info_mlx5 *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + info->uid = mfd->uctx_uid; + info->uctx_caps = mfd->uctx_caps; + *length = sizeof(*info); + return info; +} + +static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope) +{ + u16 opcode = MLX5_GET(mbox_in_hdr, in, opcode); + u16 op_mod = MLX5_GET(mbox_in_hdr, in, op_mod); + + /* + * Currently the driver can't keep track of commands that allocate + * objects in the FW, these commands are safe from a security + * perspective but nothing will free the memory when the FD is closed. + * For now permit only query commands and set commands that don't alter + * objects. Also the caps for the scope have not been defined yet, + * filter commands manually for now. + */ + switch (opcode) { + case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT: + case MLX5_CMD_OP_QUERY_ADAPTER: + case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: + case MLX5_CMD_OP_QUERY_HCA_CAP: + case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: + case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP: + case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: + case MLX5_CMD_OPCODE_QUERY_VUID: + /* + * FW limits SET_HCA_CAP on the tools UID to only the other function + * mode which is used for function pre-configuration + */ + case MLX5_CMD_OP_SET_HCA_CAP: + return true; /* scope >= FWCTL_RPC_CONFIGURATION; */ + + case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: + case MLX5_CMD_OP_FPGA_QUERY_QP: + case MLX5_CMD_OP_NOP: + case MLX5_CMD_OP_QUERY_AFU: + case MLX5_CMD_OP_QUERY_BURST_SIZE: + case MLX5_CMD_OP_QUERY_CAPI_PEC: + case MLX5_CMD_OP_QUERY_CONG_PARAMS: + case MLX5_CMD_OP_QUERY_CONG_STATISTICS: + case MLX5_CMD_OP_QUERY_CONG_STATUS: + case MLX5_CMD_OP_QUERY_CQ: + case MLX5_CMD_OP_QUERY_CRYPTO_STATE: + case MLX5_CMD_OP_QUERY_DC_CNAK_TRACE: + case MLX5_CMD_OP_QUERY_DCT: + case MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS: + case MLX5_CMD_OP_QUERY_DIAGNOSTIC_COUNTERS: + case MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS: + case MLX5_CMD_OP_QUERY_DPA_PARTITION: + case MLX5_CMD_OP_QUERY_DPA_PARTITIONS: + case MLX5_CMD_OP_QUERY_DRIVER_VERSION: + case MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO: + case MLX5_CMD_OP_QUERY_EMULATED_RESOURCES_INFO: + case MLX5_CMD_OP_QUERY_EMULATION_DEVICE_EQ_MSIX_MAPPING: + case MLX5_CMD_OP_QUERY_EQ: + case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: + case MLX5_CMD_OP_QUERY_FLOW_COUNTER: + case MLX5_CMD_OP_QUERY_FLOW_GROUP: + case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_QUERY_FLOW_TABLE: + case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: + case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: + case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: + case MLX5_CMD_OP_QUERY_ISSI: + case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: + case MLX5_CMD_OP_QUERY_LAG: + case MLX5_CMD_OP_QUERY_MAD_DEMUX: + case MLX5_CMD_OP_QUERY_MATCH_SAMPLE_INFO: + case MLX5_CMD_OP_QUERY_MKEY: + case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT: + case MLX5_CMD_OP_QUERY_MTT: + case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: + case MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER: + case MLX5_CMD_OP_QUERY_NVMF_CC_RESPONSE: + case MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT: + case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT: + case MLX5_CMD_OP_QUERY_PAGES: + case MLX5_CMD_OP_QUERY_PSV: + case MLX5_CMD_OP_QUERY_Q_COUNTER: + case MLX5_CMD_OP_QUERY_QP: + case MLX5_CMD_OP_QUERY_RATE_LIMIT: + case MLX5_CMD_OP_QUERY_RDB: + case MLX5_CMD_OP_QUERY_REGEXP_PARAMS: + case MLX5_CMD_OP_QUERY_REGEXP_REGISTER: + case MLX5_CMD_OP_QUERY_RMP: + case MLX5_CMD_OP_QUERY_RQ: + case MLX5_CMD_OP_QUERY_RQT: + case MLX5_CMD_OP_QUERY_RSV_RESOURCES: + case MLX5_CMD_OP_QUERY_SCHED_QUEUE: + case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: + case MLX5_CMD_OP_QUERY_SF_PARTITION: + case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: + case MLX5_CMD_OP_QUERY_SQ: + case MLX5_CMD_OP_QUERY_SRQ: + case MLX5_CMD_OP_QUERY_TIR: + case MLX5_CMD_OP_QUERY_TIS: + case MLX5_CMD_OP_QUERY_UCTX: + case MLX5_CMD_OP_QUERY_UMEM: + case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE: + case MLX5_CMD_OP_QUERY_VHCA_STATE: + case MLX5_CMD_OP_QUERY_VNIC_ENV: + case MLX5_CMD_OP_QUERY_VPORT_COUNTER: + case MLX5_CMD_OP_QUERY_VPORT_STATE: + case MLX5_CMD_OP_QUERY_WOL_ROL: + case MLX5_CMD_OP_QUERY_XRC_SRQ: + case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY: + case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS: + case MLX5_CMD_OP_QUERY_XRQ: + case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY: + case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS: + return scope >= FWCTL_RPC_DEBUG_READ_ONLY; + + case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS: + return scope >= FWCTL_RPC_DEBUG_WRITE; + + case MLX5_CMD_OP_ACCESS_REG: + case MLX5_CMD_OP_ACCESS_REGISTER_USER: + if (op_mod == 0) /* write */ + return true; /* scope >= FWCTL_RPC_CONFIGURATION; */ + return scope >= FWCTL_RPC_DEBUG_READ_ONLY; + default: + return false; + } +} + +static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, + void *rpc_in, size_t in_len, size_t *out_len) +{ + struct mlx5ctl_dev *mcdev = + container_of(uctx->fwctl, struct mlx5ctl_dev, fwctl); + struct mlx5ctl_uctx *mfd = + container_of(uctx, struct mlx5ctl_uctx, uctx); + void *rpc_out; + int ret; + + if (in_len < MLX5_ST_SZ_BYTES(mbox_in_hdr) || + *out_len < MLX5_ST_SZ_BYTES(mbox_out_hdr)) + return ERR_PTR(-EMSGSIZE); + + mlx5ctl_dbg(mcdev, "[UID %d] cmdif: opcode 0x%x inlen %zu outlen %zu\n", + mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode), + in_len, *out_len); + + if (!mlx5ctl_validate_rpc(rpc_in, scope)) + return ERR_PTR(-EBADMSG); + + /* + * mlx5_cmd_do() copies the input message to its own buffer before + * executing it, so we can reuse the allocation for the output. + */ + if (*out_len <= in_len) { + rpc_out = rpc_in; + } else { + rpc_out = kvzalloc(*out_len, GFP_KERNEL); + if (!rpc_out) + return ERR_PTR(-ENOMEM); + } + + /* Enforce the user context for the command */ + MLX5_SET(mbox_in_hdr, rpc_in, uid, mfd->uctx_uid); + ret = mlx5_cmd_do(mcdev->mdev, rpc_in, in_len, rpc_out, *out_len); + + mlx5ctl_dbg(mcdev, + "[UID %d] cmdif: opcode 0x%x status 0x%x retval %pe\n", + mfd->uctx_uid, MLX5_GET(mbox_in_hdr, rpc_in, opcode), + MLX5_GET(mbox_out_hdr, rpc_out, status), ERR_PTR(ret)); + + /* + * -EREMOTEIO means execution succeeded and the out is valid, + * but an error code was returned inside out. Everything else + * means the RPC did not make it to the device. + */ + if (ret && ret != -EREMOTEIO) { + if (rpc_out != rpc_in) + kfree(rpc_out); + return ERR_PTR(ret); + } + return rpc_out; +} + +static const struct fwctl_ops mlx5ctl_ops = { + .device_type = FWCTL_DEVICE_TYPE_MLX5, + .uctx_size = sizeof(struct mlx5ctl_uctx), + .open_uctx = mlx5ctl_open_uctx, + .close_uctx = mlx5ctl_close_uctx, + .info = mlx5ctl_info, + .fw_rpc = mlx5ctl_fw_rpc, +}; + +static int mlx5ctl_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) + +{ + struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = madev->mdev; + struct mlx5ctl_dev *mcdev __free(mlx5ctl) = fwctl_alloc_device( + &mdev->pdev->dev, &mlx5ctl_ops, struct mlx5ctl_dev, fwctl); + int ret; + + if (!mcdev) + return -ENOMEM; + + mcdev->mdev = mdev; + + ret = fwctl_register(&mcdev->fwctl); + if (ret) + return ret; + auxiliary_set_drvdata(adev, no_free_ptr(mcdev)); + return 0; +} + +static void mlx5ctl_remove(struct auxiliary_device *adev) +{ + struct mlx5ctl_dev *mcdev = auxiliary_get_drvdata(adev); + + fwctl_unregister(&mcdev->fwctl); + fwctl_put(&mcdev->fwctl); +} + +static const struct auxiliary_device_id mlx5ctl_id_table[] = { + {.name = MLX5_ADEV_NAME ".fwctl",}, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, mlx5ctl_id_table); + +static struct auxiliary_driver mlx5ctl_driver = { + .name = "mlx5_fwctl", + .probe = mlx5ctl_probe, + .remove = mlx5ctl_remove, + .id_table = mlx5ctl_id_table, +}; + +module_auxiliary_driver(mlx5ctl_driver); + +MODULE_IMPORT_NS("FWCTL"); +MODULE_DESCRIPTION("mlx5 ConnectX fwctl driver"); +MODULE_AUTHOR("Saeed Mahameed <saeedm@nvidia.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/fwctl/pds/Makefile b/drivers/fwctl/pds/Makefile new file mode 100644 index 000000000000..cc2317c07be1 --- /dev/null +++ b/drivers/fwctl/pds/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FWCTL_PDS) += pds_fwctl.o + +pds_fwctl-y += main.o diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c new file mode 100644 index 000000000000..9b9d1f6b5556 --- /dev/null +++ b/drivers/fwctl/pds/main.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) Advanced Micro Devices, Inc */ + +#include <linux/module.h> +#include <linux/auxiliary_bus.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/bitfield.h> + +#include <uapi/fwctl/fwctl.h> +#include <uapi/fwctl/pds.h> +#include <linux/fwctl.h> + +#include <linux/pds/pds_common.h> +#include <linux/pds/pds_core_if.h> +#include <linux/pds/pds_adminq.h> +#include <linux/pds/pds_auxbus.h> + +struct pdsfc_uctx { + struct fwctl_uctx uctx; + u32 uctx_caps; +}; + +struct pdsfc_rpc_endpoint_info { + u32 endpoint; + dma_addr_t operations_pa; + struct pds_fwctl_query_data *operations; + struct mutex lock; /* lock for endpoint info management */ +}; + +struct pdsfc_dev { + struct fwctl_device fwctl; + struct pds_auxiliary_dev *padev; + u32 caps; + struct pds_fwctl_ident ident; + dma_addr_t endpoints_pa; + struct pds_fwctl_query_data *endpoints; + struct pdsfc_rpc_endpoint_info *endpoint_info; +}; + +static int pdsfc_open_uctx(struct fwctl_uctx *uctx) +{ + struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl); + struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx); + + pdsfc_uctx->uctx_caps = pdsfc->caps; + + return 0; +} + +static void pdsfc_close_uctx(struct fwctl_uctx *uctx) +{ +} + +static void *pdsfc_info(struct fwctl_uctx *uctx, size_t *length) +{ + struct pdsfc_uctx *pdsfc_uctx = container_of(uctx, struct pdsfc_uctx, uctx); + struct fwctl_info_pds *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + info->uctx_caps = pdsfc_uctx->uctx_caps; + + return info; +} + +static int pdsfc_identify(struct pdsfc_dev *pdsfc) +{ + struct device *dev = &pdsfc->fwctl.dev; + union pds_core_adminq_comp comp = {0}; + union pds_core_adminq_cmd cmd; + struct pds_fwctl_ident *ident; + dma_addr_t ident_pa; + int err; + + ident = dma_alloc_coherent(dev->parent, sizeof(*ident), &ident_pa, GFP_KERNEL); + if (!ident) { + dev_err(dev, "Failed to map ident buffer\n"); + return -ENOMEM; + } + + cmd = (union pds_core_adminq_cmd) { + .fwctl_ident = { + .opcode = PDS_FWCTL_CMD_IDENT, + .version = 0, + .len = cpu_to_le32(sizeof(*ident)), + .ident_pa = cpu_to_le64(ident_pa), + } + }; + + err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0); + if (err) + dev_err(dev, "Failed to send adminq cmd opcode: %u err: %d\n", + cmd.fwctl_ident.opcode, err); + else + pdsfc->ident = *ident; + + dma_free_coherent(dev->parent, sizeof(*ident), ident, ident_pa); + + return err; +} + +static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc) +{ + struct device *dev = &pdsfc->fwctl.dev; + u32 num_endpoints; + int i; + + if (!pdsfc->endpoints) + return; + + num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries); + for (i = 0; pdsfc->endpoint_info && i < num_endpoints; i++) + mutex_destroy(&pdsfc->endpoint_info[i].lock); + vfree(pdsfc->endpoint_info); + pdsfc->endpoint_info = NULL; + dma_free_coherent(dev->parent, PAGE_SIZE, + pdsfc->endpoints, pdsfc->endpoints_pa); + pdsfc->endpoints = NULL; + pdsfc->endpoints_pa = DMA_MAPPING_ERROR; +} + +static void pdsfc_free_operations(struct pdsfc_dev *pdsfc) +{ + struct device *dev = &pdsfc->fwctl.dev; + u32 num_endpoints; + int i; + + num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries); + for (i = 0; i < num_endpoints; i++) { + struct pdsfc_rpc_endpoint_info *ei = &pdsfc->endpoint_info[i]; + + if (ei->operations) { + dma_free_coherent(dev->parent, PAGE_SIZE, + ei->operations, ei->operations_pa); + ei->operations = NULL; + ei->operations_pa = DMA_MAPPING_ERROR; + } + } +} + +static struct pds_fwctl_query_data *pdsfc_get_endpoints(struct pdsfc_dev *pdsfc, + dma_addr_t *pa) +{ + struct device *dev = &pdsfc->fwctl.dev; + union pds_core_adminq_comp comp = {0}; + struct pds_fwctl_query_data *data; + union pds_core_adminq_cmd cmd; + dma_addr_t data_pa; + int err; + + data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL); + if (!data) { + dev_err(dev, "Failed to map endpoint list\n"); + return ERR_PTR(-ENOMEM); + } + + cmd = (union pds_core_adminq_cmd) { + .fwctl_query = { + .opcode = PDS_FWCTL_CMD_QUERY, + .entity = PDS_FWCTL_RPC_ROOT, + .version = 0, + .query_data_buf_len = cpu_to_le32(PAGE_SIZE), + .query_data_buf_pa = cpu_to_le64(data_pa), + } + }; + + err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0); + if (err) { + dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n", + cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err); + dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa); + return ERR_PTR(err); + } + + *pa = data_pa; + + return data; +} + +static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc) +{ + struct pds_fwctl_query_data_endpoint *ep_entry; + u32 num_endpoints; + int i; + + pdsfc->endpoints = pdsfc_get_endpoints(pdsfc, &pdsfc->endpoints_pa); + if (IS_ERR(pdsfc->endpoints)) + return PTR_ERR(pdsfc->endpoints); + + num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries); + pdsfc->endpoint_info = vcalloc(num_endpoints, + sizeof(*pdsfc->endpoint_info)); + if (!pdsfc->endpoint_info) { + pdsfc_free_endpoints(pdsfc); + return -ENOMEM; + } + + ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries; + for (i = 0; i < num_endpoints; i++) { + mutex_init(&pdsfc->endpoint_info[i].lock); + pdsfc->endpoint_info[i].endpoint = le32_to_cpu(ep_entry[i].id); + } + + return 0; +} + +static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc, + dma_addr_t *pa, u32 ep) +{ + struct pds_fwctl_query_data_operation *entries; + struct device *dev = &pdsfc->fwctl.dev; + union pds_core_adminq_comp comp = {0}; + struct pds_fwctl_query_data *data; + union pds_core_adminq_cmd cmd; + dma_addr_t data_pa; + u32 num_entries; + int err; + int i; + + /* Query the operations list for the given endpoint */ + data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL); + if (!data) { + dev_err(dev, "Failed to map operations list\n"); + return ERR_PTR(-ENOMEM); + } + + cmd = (union pds_core_adminq_cmd) { + .fwctl_query = { + .opcode = PDS_FWCTL_CMD_QUERY, + .entity = PDS_FWCTL_RPC_ENDPOINT, + .version = 0, + .query_data_buf_len = cpu_to_le32(PAGE_SIZE), + .query_data_buf_pa = cpu_to_le64(data_pa), + .ep = cpu_to_le32(ep), + } + }; + + err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0); + if (err) { + dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n", + cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err); + dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa); + return ERR_PTR(err); + } + + *pa = data_pa; + + entries = (struct pds_fwctl_query_data_operation *)data->entries; + num_entries = le32_to_cpu(data->num_entries); + dev_dbg(dev, "num_entries %d\n", num_entries); + for (i = 0; i < num_entries; i++) { + + /* Translate FW command attribute to fwctl scope */ + switch (entries[i].scope) { + case PDSFC_FW_CMD_ATTR_READ: + case PDSFC_FW_CMD_ATTR_WRITE: + case PDSFC_FW_CMD_ATTR_SYNC: + entries[i].scope = FWCTL_RPC_CONFIGURATION; + break; + case PDSFC_FW_CMD_ATTR_DEBUG_READ: + entries[i].scope = FWCTL_RPC_DEBUG_READ_ONLY; + break; + case PDSFC_FW_CMD_ATTR_DEBUG_WRITE: + entries[i].scope = FWCTL_RPC_DEBUG_WRITE; + break; + default: + entries[i].scope = FWCTL_RPC_DEBUG_WRITE_FULL; + break; + } + dev_dbg(dev, "endpoint %d operation: id %x scope %d\n", + ep, le32_to_cpu(entries[i].id), entries[i].scope); + } + + return data; +} + +static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc, + struct fwctl_rpc_pds *rpc, + enum fwctl_rpc_scope scope) +{ + struct pds_fwctl_query_data_operation *op_entry; + struct pdsfc_rpc_endpoint_info *ep_info = NULL; + struct device *dev = &pdsfc->fwctl.dev; + u32 num_entries; + int i; + + /* validate rpc in_len & out_len based + * on ident.max_req_sz & max_resp_sz + */ + if (rpc->in.len > le32_to_cpu(pdsfc->ident.max_req_sz)) { + dev_dbg(dev, "Invalid request size %u, max %u\n", + rpc->in.len, le32_to_cpu(pdsfc->ident.max_req_sz)); + return -EINVAL; + } + + if (rpc->out.len > le32_to_cpu(pdsfc->ident.max_resp_sz)) { + dev_dbg(dev, "Invalid response size %u, max %u\n", + rpc->out.len, le32_to_cpu(pdsfc->ident.max_resp_sz)); + return -EINVAL; + } + + num_entries = le32_to_cpu(pdsfc->endpoints->num_entries); + for (i = 0; i < num_entries; i++) { + if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) { + ep_info = &pdsfc->endpoint_info[i]; + break; + } + } + if (!ep_info) { + dev_dbg(dev, "Invalid endpoint %d\n", rpc->in.ep); + return -EINVAL; + } + + /* query and cache this endpoint's operations */ + mutex_lock(&ep_info->lock); + if (!ep_info->operations) { + struct pds_fwctl_query_data *operations; + + operations = pdsfc_get_operations(pdsfc, + &ep_info->operations_pa, + rpc->in.ep); + if (IS_ERR(operations)) { + mutex_unlock(&ep_info->lock); + return -ENOMEM; + } + ep_info->operations = operations; + } + mutex_unlock(&ep_info->lock); + + /* reject unsupported and/or out of scope commands */ + op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries; + num_entries = le32_to_cpu(ep_info->operations->num_entries); + for (i = 0; i < num_entries; i++) { + if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, le32_to_cpu(op_entry[i].id))) { + if (scope < op_entry[i].scope) + return -EPERM; + return 0; + } + } + + dev_dbg(dev, "Invalid operation %d for endpoint %d\n", rpc->in.op, rpc->in.ep); + + return -EINVAL; +} + +static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, + void *in, size_t in_len, size_t *out_len) +{ + struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl); + struct device *dev = &uctx->fwctl->dev; + union pds_core_adminq_comp comp = {0}; + dma_addr_t out_payload_dma_addr = 0; + dma_addr_t in_payload_dma_addr = 0; + struct fwctl_rpc_pds *rpc = in; + union pds_core_adminq_cmd cmd; + void *out_payload = NULL; + void *in_payload = NULL; + void *out = NULL; + int err; + + err = pdsfc_validate_rpc(pdsfc, rpc, scope); + if (err) + return ERR_PTR(err); + + if (rpc->in.len > 0) { + in_payload = kzalloc(rpc->in.len, GFP_KERNEL); + if (!in_payload) { + dev_err(dev, "Failed to allocate in_payload\n"); + err = -ENOMEM; + goto err_out; + } + + if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload), + rpc->in.len)) { + dev_dbg(dev, "Failed to copy in_payload from user\n"); + err = -EFAULT; + goto err_in_payload; + } + + in_payload_dma_addr = dma_map_single(dev->parent, in_payload, + rpc->in.len, DMA_TO_DEVICE); + err = dma_mapping_error(dev->parent, in_payload_dma_addr); + if (err) { + dev_dbg(dev, "Failed to map in_payload\n"); + goto err_in_payload; + } + } + + if (rpc->out.len > 0) { + out_payload = kzalloc(rpc->out.len, GFP_KERNEL); + if (!out_payload) { + dev_dbg(dev, "Failed to allocate out_payload\n"); + err = -ENOMEM; + goto err_out_payload; + } + + out_payload_dma_addr = dma_map_single(dev->parent, out_payload, + rpc->out.len, DMA_FROM_DEVICE); + err = dma_mapping_error(dev->parent, out_payload_dma_addr); + if (err) { + dev_dbg(dev, "Failed to map out_payload\n"); + goto err_out_payload; + } + } + + cmd = (union pds_core_adminq_cmd) { + .fwctl_rpc = { + .opcode = PDS_FWCTL_CMD_RPC, + .flags = cpu_to_le16(PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP), + .ep = cpu_to_le32(rpc->in.ep), + .op = cpu_to_le32(rpc->in.op), + .req_pa = cpu_to_le64(in_payload_dma_addr), + .req_sz = cpu_to_le32(rpc->in.len), + .resp_pa = cpu_to_le64(out_payload_dma_addr), + .resp_sz = cpu_to_le32(rpc->out.len), + } + }; + + err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0); + if (err) { + dev_dbg(dev, "%s: ep %d op %x req_pa %llx req_sz %d req_sg %d resp_pa %llx resp_sz %d resp_sg %d err %d\n", + __func__, rpc->in.ep, rpc->in.op, + cmd.fwctl_rpc.req_pa, cmd.fwctl_rpc.req_sz, cmd.fwctl_rpc.req_sg_elems, + cmd.fwctl_rpc.resp_pa, cmd.fwctl_rpc.resp_sz, cmd.fwctl_rpc.resp_sg_elems, + err); + goto done; + } + + dynamic_hex_dump("out ", DUMP_PREFIX_OFFSET, 16, 1, out_payload, rpc->out.len, true); + + if (copy_to_user(u64_to_user_ptr(rpc->out.payload), out_payload, rpc->out.len)) { + dev_dbg(dev, "Failed to copy out_payload to user\n"); + out = ERR_PTR(-EFAULT); + goto done; + } + + rpc->out.retval = le32_to_cpu(comp.fwctl_rpc.err); + *out_len = in_len; + out = in; + +done: + if (out_payload_dma_addr) + dma_unmap_single(dev->parent, out_payload_dma_addr, + rpc->out.len, DMA_FROM_DEVICE); +err_out_payload: + kfree(out_payload); + + if (in_payload_dma_addr) + dma_unmap_single(dev->parent, in_payload_dma_addr, + rpc->in.len, DMA_TO_DEVICE); +err_in_payload: + kfree(in_payload); +err_out: + if (err) + return ERR_PTR(err); + + return out; +} + +static const struct fwctl_ops pdsfc_ops = { + .device_type = FWCTL_DEVICE_TYPE_PDS, + .uctx_size = sizeof(struct pdsfc_uctx), + .open_uctx = pdsfc_open_uctx, + .close_uctx = pdsfc_close_uctx, + .info = pdsfc_info, + .fw_rpc = pdsfc_fw_rpc, +}; + +static int pdsfc_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct pds_auxiliary_dev *padev = + container_of(adev, struct pds_auxiliary_dev, aux_dev); + struct device *dev = &adev->dev; + struct pdsfc_dev *pdsfc; + int err; + + pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops, + struct pdsfc_dev, fwctl); + if (!pdsfc) + return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n"); + pdsfc->padev = padev; + + err = pdsfc_identify(pdsfc); + if (err) { + fwctl_put(&pdsfc->fwctl); + return dev_err_probe(dev, err, "Failed to identify device\n"); + } + + err = pdsfc_init_endpoints(pdsfc); + if (err) { + fwctl_put(&pdsfc->fwctl); + return dev_err_probe(dev, err, "Failed to init endpoints\n"); + } + + pdsfc->caps = PDS_FWCTL_QUERY_CAP | PDS_FWCTL_SEND_CAP; + + err = fwctl_register(&pdsfc->fwctl); + if (err) { + pdsfc_free_endpoints(pdsfc); + fwctl_put(&pdsfc->fwctl); + return dev_err_probe(dev, err, "Failed to register device\n"); + } + + auxiliary_set_drvdata(adev, pdsfc); + + return 0; +} + +static void pdsfc_remove(struct auxiliary_device *adev) +{ + struct pdsfc_dev *pdsfc = auxiliary_get_drvdata(adev); + + fwctl_unregister(&pdsfc->fwctl); + pdsfc_free_operations(pdsfc); + pdsfc_free_endpoints(pdsfc); + + fwctl_put(&pdsfc->fwctl); +} + +static const struct auxiliary_device_id pdsfc_id_table[] = { + {.name = PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_FWCTL_STR }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, pdsfc_id_table); + +static struct auxiliary_driver pdsfc_driver = { + .name = "pds_fwctl", + .probe = pdsfc_probe, + .remove = pdsfc_remove, + .id_table = pdsfc_id_table, +}; + +module_auxiliary_driver(pdsfc_driver); + +MODULE_IMPORT_NS("FWCTL"); +MODULE_DESCRIPTION("pds fwctl driver"); +MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>"); +MODULE_AUTHOR("Brett Creeley <brett.creeley@amd.com>"); +MODULE_LICENSE("GPL"); |