diff options
author | Dave Jiang <dave.jiang@intel.com> | 2023-12-21 15:04:11 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2023-12-22 15:31:53 -0800 |
commit | 86557b7edf77d2a3835136c325c8baa6fe803234 (patch) | |
tree | 133420f397d07418788cb829c3efb7a23f14807e | |
parent | 7a4f148dd8d518bc1e012aa738b0ed6244959293 (diff) | |
download | linux-86557b7edf77d2a3835136c325c8baa6fe803234.tar.gz linux-86557b7edf77d2a3835136c325c8baa6fe803234.tar.bz2 linux-86557b7edf77d2a3835136c325c8baa6fe803234.zip |
cxl: Store QTG IDs and related info to the CXL memory device context
Once the QTG ID _DSM is executed successfully, the QTG ID is retrieved from
the return package. Create a list of entries in the cxl_memdev context and
store the QTG ID as qos_class token and the associated DPA range. This
information can be exposed to user space via sysfs in order to help region
setup for hot-plugged CXL memory devices.
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/170319625109.2212653.11872111896220384056.stgit@djiang5-mobl3
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | drivers/cxl/core/cdat.c | 69 | ||||
-rw-r--r-- | drivers/cxl/core/mbox.c | 2 | ||||
-rw-r--r-- | drivers/cxl/cxlmem.h | 21 |
3 files changed, 92 insertions, 0 deletions
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index 43dfef80fb84..6189d967f399 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -6,6 +6,7 @@ #include <linux/node.h> #include <linux/overflow.h> #include "cxlpci.h" +#include "cxlmem.h" #include "cxl.h" struct dsmas_entry { @@ -206,6 +207,71 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, return 0; } +static void add_perf_entry(struct device *dev, struct dsmas_entry *dent, + struct list_head *list) +{ + struct cxl_dpa_perf *dpa_perf; + + dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL); + if (!dpa_perf) + return; + + dpa_perf->dpa_range = dent->dpa_range; + dpa_perf->coord = dent->coord; + dpa_perf->qos_class = dent->qos_class; + list_add_tail(&dpa_perf->list, list); + dev_dbg(dev, + "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", + dent->dpa_range.start, dpa_perf->qos_class, + dent->coord.read_bandwidth, dent->coord.write_bandwidth, + dent->coord.read_latency, dent->coord.write_latency); +} + +static void free_perf_ents(void *data) +{ + struct cxl_memdev_state *mds = data; + struct cxl_dpa_perf *dpa_perf, *n; + LIST_HEAD(discard); + + list_splice_tail_init(&mds->ram_perf_list, &discard); + list_splice_tail_init(&mds->pmem_perf_list, &discard); + list_for_each_entry_safe(dpa_perf, n, &discard, list) { + list_del(&dpa_perf->list); + kfree(dpa_perf); + } +} + +static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds, + struct xarray *dsmas_xa) +{ + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + struct device *dev = cxlds->dev; + struct range pmem_range = { + .start = cxlds->pmem_res.start, + .end = cxlds->pmem_res.end, + }; + struct range ram_range = { + .start = cxlds->ram_res.start, + .end = cxlds->ram_res.end, + }; + struct dsmas_entry *dent; + unsigned long index; + + xa_for_each(dsmas_xa, index, dent) { + if (resource_size(&cxlds->ram_res) && + range_contains(&ram_range, &dent->dpa_range)) + add_perf_entry(dev, dent, &mds->ram_perf_list); + else if (resource_size(&cxlds->pmem_res) && + range_contains(&pmem_range, &dent->dpa_range)) + add_perf_entry(dev, dent, &mds->pmem_perf_list); + else + dev_dbg(dev, "no partition for dsmas dpa: %#llx\n", + dent->dpa_range.start); + } + + devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds); +} + static void discard_dsmas(struct xarray *xa) { unsigned long index; @@ -221,6 +287,8 @@ DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T)) void cxl_endpoint_parse_cdat(struct cxl_port *port) { + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; struct xarray __dsmas_xa; struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa; int rc; @@ -241,6 +309,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port) return; } + cxl_memdev_set_qos_class(cxlds, dsmas_xa); } EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL); diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 36270dcfb42e..fbaa508ab245 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1404,6 +1404,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) mds->cxlds.reg_map.host = dev; mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; + INIT_LIST_HEAD(&mds->ram_perf_list); + INIT_LIST_HEAD(&mds->pmem_perf_list); return mds; } diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index a2fcbca253f3..205bc2a016b2 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -6,6 +6,7 @@ #include <linux/cdev.h> #include <linux/uuid.h> #include <linux/rcuwait.h> +#include <linux/node.h> #include "cxl.h" /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */ @@ -392,6 +393,20 @@ enum cxl_devtype { }; /** + * struct cxl_dpa_perf - DPA performance property entry + * @list - list entry + * @dpa_range - range for DPA address + * @coord - QoS performance data (i.e. latency, bandwidth) + * @qos_class - QoS Class cookies + */ +struct cxl_dpa_perf { + struct list_head list; + struct range dpa_range; + struct access_coordinate coord; + int qos_class; +}; + +/** * struct cxl_dev_state - The driver device state * * cxl_dev_state represents the CXL driver/device state. It provides an @@ -455,6 +470,8 @@ struct cxl_dev_state { * @security: security driver state info * @fw: firmware upload / activation state * @mbox_send: @dev specific transport for transmitting mailbox commands + * @ram_perf_list: performance data entries matched to RAM + * @pmem_perf_list: performance data entries matched to PMEM * * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -475,6 +492,10 @@ struct cxl_memdev_state { u64 active_persistent_bytes; u64 next_volatile_bytes; u64 next_persistent_bytes; + + struct list_head ram_perf_list; + struct list_head pmem_perf_list; + struct cxl_event_state event; struct cxl_poison_state poison; struct cxl_security_state security; |