summaryrefslogtreecommitdiffstats
path: root/drivers/cxl
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2022-08-01 12:55:30 -0700
committerDan Williams <dan.j.williams@intel.com>2022-08-05 08:41:02 -0700
commite29a8995d63f6f861b2cc446c58cef430885f469 (patch)
tree984e4105a2d6429ea051ec551e20f86b38ac5192 /drivers/cxl
parent69c9961387f244077101de3ce4e272717617dc87 (diff)
downloadlinux-stable-e29a8995d63f6f861b2cc446c58cef430885f469.tar.gz
linux-stable-e29a8995d63f6f861b2cc446c58cef430885f469.tar.bz2
linux-stable-e29a8995d63f6f861b2cc446c58cef430885f469.zip
cxl/region: Fix region reference target accounting
Dan reports: The error handling in cxl_port_attach_region() looks like it might have a similar bug. The cxl_rr->nr_targets++; might want a --. That function is more complicated. Indeed cxl_rr->nr_targets leaks when cxl_rr_ep_add() fails, but that flow is not clear. Fix the bug and the clarity by separating the 'new' region-reference case from the 'extend' region-reference case. This also moves the host-physical-address (HPA) validation, that the HPA of a new region being accounted to the port is greater than the HPA of all other regions associated with the port, to alloc_region_ref(). Introduce @nr_targets_inc to track when the error exit path needs to clean up cxl_rr->nr_targets. Fixes: 384e624bb211 ("cxl/region: Attach endpoint decoders") Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Link: http://lore.kernel.org/r/165939482134.252363.1915691883146696327.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/cxl')
-rw-r--r--drivers/cxl/core/region.c71
1 files changed, 43 insertions, 28 deletions
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index addab74d24bb..f8f3df798aa3 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -648,12 +648,26 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
struct cxl_region *cxlr)
{
- struct cxl_region_ref *cxl_rr;
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_region_ref *cxl_rr, *iter;
+ unsigned long index;
int rc;
+ xa_for_each(&port->regions, index, iter) {
+ struct cxl_region_params *ip = &iter->region->params;
+
+ if (ip->res->start > p->res->start) {
+ dev_dbg(&cxlr->dev,
+ "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
if (!cxl_rr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
cxl_rr->port = port;
cxl_rr->region = cxlr;
cxl_rr->nr_targets = 1;
@@ -665,7 +679,7 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
"%s: failed to track region reference: %d\n",
dev_name(&port->dev), rc);
kfree(cxl_rr);
- return NULL;
+ return ERR_PTR(rc);
}
return cxl_rr;
@@ -740,33 +754,25 @@ static int cxl_port_attach_region(struct cxl_port *port,
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
- struct cxl_region_ref *cxl_rr = NULL, *iter;
- struct cxl_region_params *p = &cxlr->params;
- struct cxl_decoder *cxld = NULL;
+ struct cxl_region_ref *cxl_rr;
+ bool nr_targets_inc = false;
+ struct cxl_decoder *cxld;
unsigned long index;
int rc = -EBUSY;
lockdep_assert_held_write(&cxl_region_rwsem);
- xa_for_each(&port->regions, index, iter) {
- struct cxl_region_params *ip = &iter->region->params;
-
- if (iter->region == cxlr)
- cxl_rr = iter;
- if (ip->res->start > p->res->start) {
- dev_dbg(&cxlr->dev,
- "%s: HPA order violation %s:%pr vs %pr\n",
- dev_name(&port->dev),
- dev_name(&iter->region->dev), ip->res, p->res);
- return -EBUSY;
- }
- }
-
+ cxl_rr = cxl_rr_load(port, cxlr);
if (cxl_rr) {
struct cxl_ep *ep_iter;
int found = 0;
- cxld = cxl_rr->decoder;
+ /*
+ * Walk the existing endpoints that have been attached to
+ * @cxlr at @port and see if they share the same 'next' port
+ * in the downstream direction. I.e. endpoints that share common
+ * upstream switch.
+ */
xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
if (ep_iter == ep)
continue;
@@ -777,22 +783,29 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
/*
- * If this is a new target or if this port is direct connected
- * to this endpoint then add to the target count.
+ * New target port, or @port is an endpoint port that always
+ * accounts its own local decode as a target.
*/
- if (!found || !ep->next)
+ if (!found || !ep->next) {
cxl_rr->nr_targets++;
+ nr_targets_inc = true;
+ }
+
+ /*
+ * The decoder for @cxlr was allocated when the region was first
+ * attached to @port.
+ */
+ cxld = cxl_rr->decoder;
} else {
cxl_rr = alloc_region_ref(port, cxlr);
- if (!cxl_rr) {
+ if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
dev_name(&port->dev));
- return -ENOMEM;
+ return PTR_ERR(cxl_rr);
}
- }
+ nr_targets_inc = true;
- if (!cxld) {
if (port == cxled_to_port(cxled))
cxld = &cxled->cxld;
else
@@ -835,6 +848,8 @@ static int cxl_port_attach_region(struct cxl_port *port,
return 0;
out_erase:
+ if (nr_targets_inc)
+ cxl_rr->nr_targets--;
if (cxl_rr->nr_eps == 0)
free_region_ref(cxl_rr);
return rc;