summaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/api.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl/api.c')
-rw-r--r--drivers/misc/cxl/api.c132
1 files changed, 0 insertions, 132 deletions
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 8fd5ec4d6042..750470ef2049 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <misc/cxl.h>
-#include <linux/msi.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/sched/mm.h>
@@ -168,21 +167,6 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
-int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
-{
- if (*ctx == NULL || *afu_irq == 0) {
- *afu_irq = 1;
- *ctx = cxl_get_context(pdev);
- } else {
- (*afu_irq)++;
- if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
- *ctx = list_next_entry(*ctx, extra_irq_contexts);
- *afu_irq = 1;
- }
- }
- return cxl_find_afu_irq(*ctx, *afu_irq);
-}
-/* Exported via cxl_base */
int cxl_set_priv(struct cxl_context *ctx, void *priv)
{
@@ -310,7 +294,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
kernel = false;
- ctx->real_mode = false;
/* acquire a reference to the task's mm */
ctx->mm = get_task_mm(current);
@@ -374,24 +357,6 @@ void cxl_set_master(struct cxl_context *ctx)
}
EXPORT_SYMBOL_GPL(cxl_set_master);
-int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode)
-{
- if (ctx->status == STARTED) {
- /*
- * We could potentially update the PE and issue an update LLCMD
- * to support this, but it doesn't seem to have a good use case
- * since it's trivial to just create a second kernel context
- * with different translation modes, so until someone convinces
- * me otherwise:
- */
- return -EBUSY;
- }
-
- ctx->real_mode = real_mode;
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_set_translation_mode);
-
/* wrappers around afu_* file ops which are EXPORTED */
int cxl_fd_open(struct inode *inode, struct file *file)
{
@@ -573,100 +538,3 @@ ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
}
EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
-
-int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
-{
- struct cxl_afu *afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return -ENODEV;
-
- if (irqs > afu->adapter->user_irqs)
- return -EINVAL;
-
- /* Limit user_irqs to prevent the user increasing this via sysfs */
- afu->adapter->user_irqs = irqs;
- afu->irqs_max = irqs;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
-
-int cxl_get_max_irqs_per_process(struct pci_dev *dev)
-{
- struct cxl_afu *afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return -ENODEV;
-
- return afu->irqs_max;
-}
-EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
-
-/*
- * This is a special interrupt allocation routine called from the PHB's MSI
- * setup function. When capi interrupts are allocated in this manner they must
- * still be associated with a running context, but since the MSI APIs have no
- * way to specify this we use the default context associated with the device.
- *
- * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
- * interrupt number, so in order to overcome this their driver informs us of
- * the restriction by setting the maximum interrupts per context, and we
- * allocate additional contexts as necessary so that we can keep the AFU
- * interrupt number within the supported range.
- */
-int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- struct cxl_context *ctx, *new_ctx, *default_ctx;
- int remaining;
- int rc;
-
- ctx = default_ctx = cxl_get_context(pdev);
- if (WARN_ON(!default_ctx))
- return -ENODEV;
-
- remaining = nvec;
- while (remaining > 0) {
- rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
- if (rc) {
- pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
- return rc;
- }
- remaining -= ctx->afu->irqs_max;
-
- if (ctx != default_ctx && default_ctx->status == STARTED) {
- WARN_ON(cxl_start_context(ctx,
- be64_to_cpu(default_ctx->elem->common.wed),
- NULL));
- }
-
- if (remaining > 0) {
- new_ctx = cxl_dev_context_init(pdev);
- if (IS_ERR(new_ctx)) {
- pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
- return -ENOSPC;
- }
- list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
- ctx = new_ctx;
- }
- }
-
- return 0;
-}
-/* Exported via cxl_base */
-
-void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct cxl_context *ctx, *pos, *tmp;
-
- ctx = cxl_get_context(pdev);
- if (WARN_ON(!ctx))
- return;
-
- cxl_free_afu_irqs(ctx);
- list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
- cxl_stop_context(pos);
- cxl_free_afu_irqs(pos);
- list_del(&pos->extra_irq_contexts);
- cxl_release_context(pos);
- }
-}
-/* Exported via cxl_base */