summaryrefslogtreecommitdiffstats
path: root/net/bluetooth/hci_core.c
diff options
context:
space:
mode:
authorLuiz Augusto von Dentz <luiz.von.dentz@intel.com>2024-01-09 13:45:40 -0500
committerLuiz Augusto von Dentz <luiz.von.dentz@intel.com>2024-03-06 17:22:38 -0500
commit63298d6e752fc0ec7f5093860af8bc9f047b30c8 (patch)
treede9da8f2791140f77dff2c423aac9b781c65fe1a /net/bluetooth/hci_core.c
parent79c0868ad65a8fc7cdfaa5f2b77a4b70d0b0ea16 (diff)
downloadlinux-63298d6e752fc0ec7f5093860af8bc9f047b30c8.tar.gz
linux-63298d6e752fc0ec7f5093860af8bc9f047b30c8.tar.bz2
linux-63298d6e752fc0ec7f5093860af8bc9f047b30c8.zip
Bluetooth: hci_core: Cancel request on command timeout
If command has timed out call __hci_cmd_sync_cancel to notify the hci_req since it will inevitably cause a timeout. This also rework the code around __hci_cmd_sync_cancel since it was wrongly assuming it needs to cancel timer as well, but sometimes the timers have not been started or in fact they already had timed out in which case they don't need to be cancel yet again. Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
Diffstat (limited to 'net/bluetooth/hci_core.c')
-rw-r--r--net/bluetooth/hci_core.c84
1 files changed, 57 insertions, 27 deletions
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e5cb618fa6d3..de730d210ccb 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1523,10 +1523,11 @@ static void hci_cmd_timeout(struct work_struct *work)
cmd_timer.work);
if (hdev->sent_cmd) {
- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
- u16 opcode = __le16_to_cpu(sent->opcode);
+ u16 opcode = hci_skb_opcode(hdev->sent_cmd);
bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
+
+ hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
} else {
bt_dev_err(hdev, "command tx timeout");
}
@@ -2857,6 +2858,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
return ret;
}
+/* Cancel ongoing command synchronously:
+ *
+ * - Cancel command timer
+ * - Reset command counter
+ * - Cancel command request
+ */
+static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
+{
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ hci_cmd_sync_cancel_sync(hdev, -err);
+}
+
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
@@ -2874,7 +2892,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
return 0;
/* Cancel potentially blocking sync operation before suspend */
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cancel_cmd_sync(hdev, -EHOSTDOWN);
hci_req_sync_lock(hdev);
ret = hci_suspend_sync(hdev);
@@ -4159,6 +4177,33 @@ static void hci_rx_work(struct work_struct *work)
}
}
+static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "skb %p", skb);
+
+ kfree_skb(hdev->sent_cmd);
+
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+ if (!hdev->sent_cmd) {
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ return;
+ }
+
+ err = hci_send_frame(hdev, skb);
+ if (err < 0) {
+ hci_cmd_sync_cancel_sync(hdev, err);
+ return;
+ }
+
+ if (hci_req_status_pend(hdev))
+ hci_dev_set_flag(hdev, HCI_CMD_PENDING);
+
+ atomic_dec(&hdev->cmd_cnt);
+}
+
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
@@ -4173,30 +4218,15 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
- kfree_skb(hdev->sent_cmd);
-
- hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
- if (hdev->sent_cmd) {
- int res;
- if (hci_req_status_pend(hdev))
- hci_dev_set_flag(hdev, HCI_CMD_PENDING);
- atomic_dec(&hdev->cmd_cnt);
+ hci_send_cmd_sync(hdev, skb);
- res = hci_send_frame(hdev, skb);
- if (res < 0)
- __hci_cmd_sync_cancel(hdev, -res);
-
- rcu_read_lock();
- if (test_bit(HCI_RESET, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
- cancel_delayed_work(&hdev->cmd_timer);
- else
- queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
- HCI_CMD_TIMEOUT);
- rcu_read_unlock();
- } else {
- skb_queue_head(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
+ rcu_read_lock();
+ if (test_bit(HCI_RESET, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+ cancel_delayed_work(&hdev->cmd_timer);
+ else
+ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+ HCI_CMD_TIMEOUT);
+ rcu_read_unlock();
}
}