summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSarah Sharp <sarah.a.sharp@linux.intel.com>2011-07-22 14:34:34 -0700
committerSarah Sharp <sarah.a.sharp@linux.intel.com>2011-08-09 14:48:45 -0700
commitd13565c12828ce0cd2a3862bf6260164a0653352 (patch)
tree07d69f033253186d220fe0cb6da96dd20170d411 /drivers
parent8a8ff2f9399b23b968901f585ccb5a70a537c5ae (diff)
downloadlinux-d13565c12828ce0cd2a3862bf6260164a0653352.tar.gz
linux-d13565c12828ce0cd2a3862bf6260164a0653352.tar.bz2
linux-d13565c12828ce0cd2a3862bf6260164a0653352.zip
xhci: Fix memory leak during failed enqueue.
When the isochronous transfer support was introduced, and the xHCI driver switched to using urb->hcpriv to store an "urb_priv" pointer, a couple of memory leaks were introduced into the URB enqueue function in its error handling paths. xhci_urb_enqueue allocates urb_priv, but it doesn't free it if changing the control endpoint's max packet size fails or the bulk endpoint is in the middle of allocating or deallocating streams. xhci_urb_enqueue also doesn't free urb_priv if any of the four endpoint types' enqueue functions fail. Instead, it expects those functions to free urb_priv if an error occurs. However, the bulk, control, and interrupt enqueue functions do not free urb_priv if the endpoint ring is NULL. It will, however, get freed if prepare_transfer() fails in those enqueue functions. Several of the error paths in the isochronous endpoint enqueue function also fail to free it. xhci_queue_isoc_tx_prepare() doesn't free urb_priv if prepare_ring() indicates there is not enough room for all the isochronous TDs in this URB. If individual isochronous TDs fail to be queued (perhaps due to an endpoint state change), urb_priv is also leaked. This argues that the freeing of urb_priv should be done in the function that allocated it, xhci_urb_enqueue. This patch looks rather ugly, but refactoring the code will have to wait because this patch needs to be backported to stable kernels. This patch should be backported to kernels as old as 2.6.36. Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com> Cc: Andiry Xu <andiry.xu@amd.com> Cc: stable@kernel.org
Diffstat (limited to 'drivers')
-rw-r--r--drivers/usb/host/xhci-ring.c5
-rw-r--r--drivers/usb/host/xhci.c21
2 files changed, 18 insertions, 8 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 7113d16e2d3a..9d3f9dd1ad28 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2500,11 +2500,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
if (td_index == 0) {
ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
- if (unlikely(ret)) {
- xhci_urb_free_priv(xhci, urb_priv);
- urb->hcpriv = NULL;
+ if (unlikely(ret))
return ret;
- }
}
td->urb = urb;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1c4432d8fc10..8e84acff1134 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1085,8 +1085,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
- if (ret < 0)
+ if (ret < 0) {
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
return ret;
+ }
}
/* We have a spinlock and interrupts disabled, so we must pass
@@ -1097,6 +1100,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1117,6 +1122,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
}
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1124,6 +1131,8 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
spin_lock_irqsave(&xhci->lock, flags);
@@ -1131,18 +1140,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto dying;
ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
+ if (ret)
+ goto free_priv;
spin_unlock_irqrestore(&xhci->lock, flags);
}
exit:
return ret;
dying:
- xhci_urb_free_priv(xhci, urb_priv);
- urb->hcpriv = NULL;
xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
+ ret = -ESHUTDOWN;
+free_priv:
+ xhci_urb_free_priv(xhci, urb_priv);
+ urb->hcpriv = NULL;
spin_unlock_irqrestore(&xhci->lock, flags);
- return -ESHUTDOWN;
+ return ret;
}
/* Get the right ring for the given URB.